2795ff2411e030e5f9a1291f5457c1a071b72c04
[linux-flexiantxendom0-natty.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
1 /*
2  * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * Derived from ca91c042.c by Michael Wyrick
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
35
36 static int __init ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 static void __exit ca91cx42_exit(void);
40
41 /* Module parameters */
42 static int geoid;
43
44 static char driver_name[] = "vme_ca91cx42";
45
46 static const struct pci_device_id ca91cx42_ids[] = {
47         { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
48         { },
49 };
50
51 static struct pci_driver ca91cx42_driver = {
52         .name = driver_name,
53         .id_table = ca91cx42_ids,
54         .probe = ca91cx42_probe,
55         .remove = ca91cx42_remove,
56 };
57
58 static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
59 {
60         wake_up(&(bridge->dma_queue));
61
62         return CA91CX42_LINT_DMA;
63 }
64
65 static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
66 {
67         int i;
68         u32 serviced = 0;
69
70         for (i = 0; i < 4; i++) {
71                 if (stat & CA91CX42_LINT_LM[i]) {
72                         /* We only enable interrupts if the callback is set */
73                         bridge->lm_callback[i](i);
74                         serviced |= CA91CX42_LINT_LM[i];
75                 }
76         }
77
78         return serviced;
79 }
80
81 /* XXX This needs to be split into 4 queues */
82 static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
83 {
84         wake_up(&(bridge->mbox_queue));
85
86         return CA91CX42_LINT_MBOX;
87 }
88
89 static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
90 {
91         wake_up(&(bridge->iack_queue));
92
93         return CA91CX42_LINT_SW_IACK;
94 }
95
96 static u32 ca91cx42_VERR_irqhandler(struct ca91cx42_driver *bridge)
97 {
98         int val;
99
100         val = ioread32(bridge->base + DGCS);
101
102         if (!(val & 0x00000800)) {
103                 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
104                         "Error DGCS=%08X\n", val);
105         }
106
107         return CA91CX42_LINT_VERR;
108 }
109
110 static u32 ca91cx42_LERR_irqhandler(struct ca91cx42_driver *bridge)
111 {
112         int val;
113
114         val = ioread32(bridge->base + DGCS);
115
116         if (!(val & 0x00000800)) {
117                 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
118                         "Error DGCS=%08X\n", val);
119
120         }
121
122         return CA91CX42_LINT_LERR;
123 }
124
125
126 static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
127         int stat)
128 {
129         int vec, i, serviced = 0;
130         struct ca91cx42_driver *bridge;
131
132         bridge = ca91cx42_bridge->driver_priv;
133
134
135         for (i = 7; i > 0; i--) {
136                 if (stat & (1 << i)) {
137                         vec = ioread32(bridge->base +
138                                 CA91CX42_V_STATID[i]) & 0xff;
139
140                         vme_irq_handler(ca91cx42_bridge, i, vec);
141
142                         serviced |= (1 << i);
143                 }
144         }
145
146         return serviced;
147 }
148
149 static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
150 {
151         u32 stat, enable, serviced = 0;
152         struct vme_bridge *ca91cx42_bridge;
153         struct ca91cx42_driver *bridge;
154
155         ca91cx42_bridge = ptr;
156
157         bridge = ca91cx42_bridge->driver_priv;
158
159         enable = ioread32(bridge->base + LINT_EN);
160         stat = ioread32(bridge->base + LINT_STAT);
161
162         /* Only look at unmasked interrupts */
163         stat &= enable;
164
165         if (unlikely(!stat))
166                 return IRQ_NONE;
167
168         if (stat & CA91CX42_LINT_DMA)
169                 serviced |= ca91cx42_DMA_irqhandler(bridge);
170         if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
171                         CA91CX42_LINT_LM3))
172                 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
173         if (stat & CA91CX42_LINT_MBOX)
174                 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
175         if (stat & CA91CX42_LINT_SW_IACK)
176                 serviced |= ca91cx42_IACK_irqhandler(bridge);
177         if (stat & CA91CX42_LINT_VERR)
178                 serviced |= ca91cx42_VERR_irqhandler(bridge);
179         if (stat & CA91CX42_LINT_LERR)
180                 serviced |= ca91cx42_LERR_irqhandler(bridge);
181         if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
182                         CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
183                         CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
184                         CA91CX42_LINT_VIRQ7))
185                 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
186
187         /* Clear serviced interrupts */
188         iowrite32(stat, bridge->base + LINT_STAT);
189
190         return IRQ_HANDLED;
191 }
192
193 static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
194 {
195         int result, tmp;
196         struct pci_dev *pdev;
197         struct ca91cx42_driver *bridge;
198
199         bridge = ca91cx42_bridge->driver_priv;
200
201         /* Need pdev */
202         pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
203
204         /* Initialise list for VME bus errors */
205         INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
206
207         mutex_init(&(ca91cx42_bridge->irq_mtx));
208
209         /* Disable interrupts from PCI to VME */
210         iowrite32(0, bridge->base + VINT_EN);
211
212         /* Disable PCI interrupts */
213         iowrite32(0, bridge->base + LINT_EN);
214         /* Clear Any Pending PCI Interrupts */
215         iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
216
217         result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
218                         driver_name, ca91cx42_bridge);
219         if (result) {
220                 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
221                        pdev->irq);
222                 return result;
223         }
224
225         /* Ensure all interrupts are mapped to PCI Interrupt 0 */
226         iowrite32(0, bridge->base + LINT_MAP0);
227         iowrite32(0, bridge->base + LINT_MAP1);
228         iowrite32(0, bridge->base + LINT_MAP2);
229
230         /* Enable DMA, mailbox & LM Interrupts */
231         tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
232                 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
233                 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
234
235         iowrite32(tmp, bridge->base + LINT_EN);
236
237         return 0;
238 }
239
240 static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
241         struct pci_dev *pdev)
242 {
243         /* Disable interrupts from PCI to VME */
244         iowrite32(0, bridge->base + VINT_EN);
245
246         /* Disable PCI interrupts */
247         iowrite32(0, bridge->base + LINT_EN);
248         /* Clear Any Pending PCI Interrupts */
249         iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
250
251         free_irq(pdev->irq, pdev);
252 }
253
254 /*
255  * Set up an VME interrupt
256  */
257 void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
258         int sync)
259
260 {
261         struct pci_dev *pdev;
262         u32 tmp;
263         struct ca91cx42_driver *bridge;
264
265         bridge = ca91cx42_bridge->driver_priv;
266
267         /* Enable IRQ level */
268         tmp = ioread32(bridge->base + LINT_EN);
269
270         if (state == 0)
271                 tmp &= ~CA91CX42_LINT_VIRQ[level];
272         else
273                 tmp |= CA91CX42_LINT_VIRQ[level];
274
275         iowrite32(tmp, bridge->base + LINT_EN);
276
277         if ((state == 0) && (sync != 0)) {
278                 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
279                         dev);
280
281                 synchronize_irq(pdev->irq);
282         }
283 }
284
285 int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
286         int statid)
287 {
288         u32 tmp;
289         struct ca91cx42_driver *bridge;
290
291         bridge = ca91cx42_bridge->driver_priv;
292
293         /* Universe can only generate even vectors */
294         if (statid & 1)
295                 return -EINVAL;
296
297         mutex_lock(&(bridge->vme_int));
298
299         tmp = ioread32(bridge->base + VINT_EN);
300
301         /* Set Status/ID */
302         iowrite32(statid << 24, bridge->base + STATID);
303
304         /* Assert VMEbus IRQ */
305         tmp = tmp | (1 << (level + 24));
306         iowrite32(tmp, bridge->base + VINT_EN);
307
308         /* Wait for IACK */
309         wait_event_interruptible(bridge->iack_queue, 0);
310
311         /* Return interrupt to low state */
312         tmp = ioread32(bridge->base + VINT_EN);
313         tmp = tmp & ~(1 << (level + 24));
314         iowrite32(tmp, bridge->base + VINT_EN);
315
316         mutex_unlock(&(bridge->vme_int));
317
318         return 0;
319 }
320
321 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
322         unsigned long long vme_base, unsigned long long size,
323         dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
324 {
325         unsigned int i, addr = 0, granularity;
326         unsigned int temp_ctl = 0;
327         unsigned int vme_bound, pci_offset;
328         struct ca91cx42_driver *bridge;
329
330         bridge = image->parent->driver_priv;
331
332         i = image->number;
333
334         switch (aspace) {
335         case VME_A16:
336                 addr |= CA91CX42_VSI_CTL_VAS_A16;
337                 break;
338         case VME_A24:
339                 addr |= CA91CX42_VSI_CTL_VAS_A24;
340                 break;
341         case VME_A32:
342                 addr |= CA91CX42_VSI_CTL_VAS_A32;
343                 break;
344         case VME_USER1:
345                 addr |= CA91CX42_VSI_CTL_VAS_USER1;
346                 break;
347         case VME_USER2:
348                 addr |= CA91CX42_VSI_CTL_VAS_USER2;
349                 break;
350         case VME_A64:
351         case VME_CRCSR:
352         case VME_USER3:
353         case VME_USER4:
354         default:
355                 printk(KERN_ERR "Invalid address space\n");
356                 return -EINVAL;
357                 break;
358         }
359
360         /*
361          * Bound address is a valid address for the window, adjust
362          * accordingly
363          */
364         vme_bound = vme_base + size;
365         pci_offset = pci_base - vme_base;
366
367         if ((i == 0) || (i == 4))
368                 granularity = 0x1000;
369         else
370                 granularity = 0x10000;
371
372         if (vme_base & (granularity - 1)) {
373                 printk(KERN_ERR "Invalid VME base alignment\n");
374                 return -EINVAL;
375         }
376         if (vme_bound & (granularity - 1)) {
377                 printk(KERN_ERR "Invalid VME bound alignment\n");
378                 return -EINVAL;
379         }
380         if (pci_offset & (granularity - 1)) {
381                 printk(KERN_ERR "Invalid PCI Offset alignment\n");
382                 return -EINVAL;
383         }
384
385         /* Disable while we are mucking around */
386         temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
387         temp_ctl &= ~CA91CX42_VSI_CTL_EN;
388         iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
389
390         /* Setup mapping */
391         iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
392         iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
393         iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
394
395         /* Setup address space */
396         temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
397         temp_ctl |= addr;
398
399         /* Setup cycle types */
400         temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
401         if (cycle & VME_SUPER)
402                 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
403         if (cycle & VME_USER)
404                 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
405         if (cycle & VME_PROG)
406                 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
407         if (cycle & VME_DATA)
408                 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
409
410         /* Write ctl reg without enable */
411         iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
412
413         if (enabled)
414                 temp_ctl |= CA91CX42_VSI_CTL_EN;
415
416         iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
417
418         return 0;
419 }
420
421 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
422         unsigned long long *vme_base, unsigned long long *size,
423         dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
424 {
425         unsigned int i, granularity = 0, ctl = 0;
426         unsigned long long vme_bound, pci_offset;
427         struct ca91cx42_driver *bridge;
428
429         bridge = image->parent->driver_priv;
430
431         i = image->number;
432
433         if ((i == 0) || (i == 4))
434                 granularity = 0x1000;
435         else
436                 granularity = 0x10000;
437
438         /* Read Registers */
439         ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
440
441         *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
442         vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
443         pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
444
445         *pci_base = (dma_addr_t)vme_base + pci_offset;
446         *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
447
448         *enabled = 0;
449         *aspace = 0;
450         *cycle = 0;
451
452         if (ctl & CA91CX42_VSI_CTL_EN)
453                 *enabled = 1;
454
455         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
456                 *aspace = VME_A16;
457         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
458                 *aspace = VME_A24;
459         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
460                 *aspace = VME_A32;
461         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
462                 *aspace = VME_USER1;
463         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
464                 *aspace = VME_USER2;
465
466         if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
467                 *cycle |= VME_SUPER;
468         if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
469                 *cycle |= VME_USER;
470         if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
471                 *cycle |= VME_PROG;
472         if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
473                 *cycle |= VME_DATA;
474
475         return 0;
476 }
477
478 /*
479  * Allocate and map PCI Resource
480  */
481 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
482         unsigned long long size)
483 {
484         unsigned long long existing_size;
485         int retval = 0;
486         struct pci_dev *pdev;
487         struct vme_bridge *ca91cx42_bridge;
488
489         ca91cx42_bridge = image->parent;
490
491         /* Find pci_dev container of dev */
492         if (ca91cx42_bridge->parent == NULL) {
493                 printk(KERN_ERR "Dev entry NULL\n");
494                 return -EINVAL;
495         }
496         pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
497
498         existing_size = (unsigned long long)(image->bus_resource.end -
499                 image->bus_resource.start);
500
501         /* If the existing size is OK, return */
502         if (existing_size == (size - 1))
503                 return 0;
504
505         if (existing_size != 0) {
506                 iounmap(image->kern_base);
507                 image->kern_base = NULL;
508                 if (image->bus_resource.name != NULL)
509                         kfree(image->bus_resource.name);
510                 release_resource(&(image->bus_resource));
511                 memset(&(image->bus_resource), 0, sizeof(struct resource));
512         }
513
514         if (image->bus_resource.name == NULL) {
515                 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
516                 if (image->bus_resource.name == NULL) {
517                         printk(KERN_ERR "Unable to allocate memory for resource"
518                                 " name\n");
519                         retval = -ENOMEM;
520                         goto err_name;
521                 }
522         }
523
524         sprintf((char *)image->bus_resource.name, "%s.%d",
525                 ca91cx42_bridge->name, image->number);
526
527         image->bus_resource.start = 0;
528         image->bus_resource.end = (unsigned long)size;
529         image->bus_resource.flags = IORESOURCE_MEM;
530
531         retval = pci_bus_alloc_resource(pdev->bus,
532                 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
533                 0, NULL, NULL);
534         if (retval) {
535                 printk(KERN_ERR "Failed to allocate mem resource for "
536                         "window %d size 0x%lx start 0x%lx\n",
537                         image->number, (unsigned long)size,
538                         (unsigned long)image->bus_resource.start);
539                 goto err_resource;
540         }
541
542         image->kern_base = ioremap_nocache(
543                 image->bus_resource.start, size);
544         if (image->kern_base == NULL) {
545                 printk(KERN_ERR "Failed to remap resource\n");
546                 retval = -ENOMEM;
547                 goto err_remap;
548         }
549
550         return 0;
551
552         iounmap(image->kern_base);
553         image->kern_base = NULL;
554 err_remap:
555         release_resource(&(image->bus_resource));
556 err_resource:
557         kfree(image->bus_resource.name);
558         memset(&(image->bus_resource), 0, sizeof(struct resource));
559 err_name:
560         return retval;
561 }
562
563 /*
564  * Free and unmap PCI Resource
565  */
566 static void ca91cx42_free_resource(struct vme_master_resource *image)
567 {
568         iounmap(image->kern_base);
569         image->kern_base = NULL;
570         release_resource(&(image->bus_resource));
571         kfree(image->bus_resource.name);
572         memset(&(image->bus_resource), 0, sizeof(struct resource));
573 }
574
575
576 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
577         unsigned long long vme_base, unsigned long long size,
578         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
579 {
580         int retval = 0;
581         unsigned int i, granularity = 0;
582         unsigned int temp_ctl = 0;
583         unsigned long long pci_bound, vme_offset, pci_base;
584         struct ca91cx42_driver *bridge;
585
586         bridge = image->parent->driver_priv;
587
588         i = image->number;
589
590         if ((i == 0) || (i == 4))
591                 granularity = 0x1000;
592         else
593                 granularity = 0x10000;
594
595         /* Verify input data */
596         if (vme_base & (granularity - 1)) {
597                 printk(KERN_ERR "Invalid VME Window alignment\n");
598                 retval = -EINVAL;
599                 goto err_window;
600         }
601         if (size & (granularity - 1)) {
602                 printk(KERN_ERR "Invalid VME Window alignment\n");
603                 retval = -EINVAL;
604                 goto err_window;
605         }
606
607         spin_lock(&(image->lock));
608
609         /*
610          * Let's allocate the resource here rather than further up the stack as
611          * it avoids pushing loads of bus dependant stuff up the stack
612          */
613         retval = ca91cx42_alloc_resource(image, size);
614         if (retval) {
615                 spin_unlock(&(image->lock));
616                 printk(KERN_ERR "Unable to allocate memory for resource "
617                         "name\n");
618                 retval = -ENOMEM;
619                 goto err_res;
620         }
621
622         pci_base = (unsigned long long)image->bus_resource.start;
623
624         /*
625          * Bound address is a valid address for the window, adjust
626          * according to window granularity.
627          */
628         pci_bound = pci_base + size;
629         vme_offset = vme_base - pci_base;
630
631         /* Disable while we are mucking around */
632         temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
633         temp_ctl &= ~CA91CX42_LSI_CTL_EN;
634         iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
635
636         /* Setup cycle types */
637         temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
638         if (cycle & VME_BLT)
639                 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
640         if (cycle & VME_MBLT)
641                 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
642
643         /* Setup data width */
644         temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
645         switch (dwidth) {
646         case VME_D8:
647                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
648                 break;
649         case VME_D16:
650                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
651                 break;
652         case VME_D32:
653                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
654                 break;
655         case VME_D64:
656                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
657                 break;
658         default:
659                 spin_unlock(&(image->lock));
660                 printk(KERN_ERR "Invalid data width\n");
661                 retval = -EINVAL;
662                 goto err_dwidth;
663                 break;
664         }
665
666         /* Setup address space */
667         temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
668         switch (aspace) {
669         case VME_A16:
670                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
671                 break;
672         case VME_A24:
673                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
674                 break;
675         case VME_A32:
676                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
677                 break;
678         case VME_CRCSR:
679                 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
680                 break;
681         case VME_USER1:
682                 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
683                 break;
684         case VME_USER2:
685                 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
686                 break;
687         case VME_A64:
688         case VME_USER3:
689         case VME_USER4:
690         default:
691                 spin_unlock(&(image->lock));
692                 printk(KERN_ERR "Invalid address space\n");
693                 retval = -EINVAL;
694                 goto err_aspace;
695                 break;
696         }
697
698         temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
699         if (cycle & VME_SUPER)
700                 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
701         if (cycle & VME_PROG)
702                 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
703
704         /* Setup mapping */
705         iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
706         iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
707         iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
708
709         /* Write ctl reg without enable */
710         iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
711
712         if (enabled)
713                 temp_ctl |= CA91CX42_LSI_CTL_EN;
714
715         iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
716
717         spin_unlock(&(image->lock));
718         return 0;
719
720 err_aspace:
721 err_dwidth:
722         ca91cx42_free_resource(image);
723 err_res:
724 err_window:
725         return retval;
726 }
727
728 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
729         unsigned long long *vme_base, unsigned long long *size,
730         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
731 {
732         unsigned int i, ctl;
733         unsigned long long pci_base, pci_bound, vme_offset;
734         struct ca91cx42_driver *bridge;
735
736         bridge = image->parent->driver_priv;
737
738         i = image->number;
739
740         ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
741
742         pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
743         vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
744         pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
745
746         *vme_base = pci_base + vme_offset;
747         *size = (unsigned long long)(pci_bound - pci_base);
748
749         *enabled = 0;
750         *aspace = 0;
751         *cycle = 0;
752         *dwidth = 0;
753
754         if (ctl & CA91CX42_LSI_CTL_EN)
755                 *enabled = 1;
756
757         /* Setup address space */
758         switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
759         case CA91CX42_LSI_CTL_VAS_A16:
760                 *aspace = VME_A16;
761                 break;
762         case CA91CX42_LSI_CTL_VAS_A24:
763                 *aspace = VME_A24;
764                 break;
765         case CA91CX42_LSI_CTL_VAS_A32:
766                 *aspace = VME_A32;
767                 break;
768         case CA91CX42_LSI_CTL_VAS_CRCSR:
769                 *aspace = VME_CRCSR;
770                 break;
771         case CA91CX42_LSI_CTL_VAS_USER1:
772                 *aspace = VME_USER1;
773                 break;
774         case CA91CX42_LSI_CTL_VAS_USER2:
775                 *aspace = VME_USER2;
776                 break;
777         }
778
779         /* XXX Not sure howto check for MBLT */
780         /* Setup cycle types */
781         if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
782                 *cycle |= VME_BLT;
783         else
784                 *cycle |= VME_SCT;
785
786         if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
787                 *cycle |= VME_SUPER;
788         else
789                 *cycle |= VME_USER;
790
791         if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
792                 *cycle = VME_PROG;
793         else
794                 *cycle = VME_DATA;
795
796         /* Setup data width */
797         switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
798         case CA91CX42_LSI_CTL_VDW_D8:
799                 *dwidth = VME_D8;
800                 break;
801         case CA91CX42_LSI_CTL_VDW_D16:
802                 *dwidth = VME_D16;
803                 break;
804         case CA91CX42_LSI_CTL_VDW_D32:
805                 *dwidth = VME_D32;
806                 break;
807         case CA91CX42_LSI_CTL_VDW_D64:
808                 *dwidth = VME_D64;
809                 break;
810         }
811
812         return 0;
813 }
814
815 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
816         unsigned long long *vme_base, unsigned long long *size,
817         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
818 {
819         int retval;
820
821         spin_lock(&(image->lock));
822
823         retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
824                 cycle, dwidth);
825
826         spin_unlock(&(image->lock));
827
828         return retval;
829 }
830
831 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
832         size_t count, loff_t offset)
833 {
834         ssize_t retval;
835
836         spin_lock(&(image->lock));
837
838         memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
839         retval = count;
840
841         spin_unlock(&(image->lock));
842
843         return retval;
844 }
845
846 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
847         size_t count, loff_t offset)
848 {
849         int retval = 0;
850
851         spin_lock(&(image->lock));
852
853         memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
854         retval = count;
855
856         spin_unlock(&(image->lock));
857
858         return retval;
859 }
860
861 unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
862         unsigned int mask, unsigned int compare, unsigned int swap,
863         loff_t offset)
864 {
865         u32 pci_addr, result;
866         int i;
867         struct ca91cx42_driver *bridge;
868         struct device *dev;
869
870         bridge = image->parent->driver_priv;
871         dev = image->parent->parent;
872
873         /* Find the PCI address that maps to the desired VME address */
874         i = image->number;
875
876         /* Locking as we can only do one of these at a time */
877         mutex_lock(&(bridge->vme_rmw));
878
879         /* Lock image */
880         spin_lock(&(image->lock));
881
882         pci_addr = (u32)image->kern_base + offset;
883
884         /* Address must be 4-byte aligned */
885         if (pci_addr & 0x3) {
886                 dev_err(dev, "RMW Address not 4-byte aligned\n");
887                 return -EINVAL;
888         }
889
890         /* Ensure RMW Disabled whilst configuring */
891         iowrite32(0, bridge->base + SCYC_CTL);
892
893         /* Configure registers */
894         iowrite32(mask, bridge->base + SCYC_EN);
895         iowrite32(compare, bridge->base + SCYC_CMP);
896         iowrite32(swap, bridge->base + SCYC_SWP);
897         iowrite32(pci_addr, bridge->base + SCYC_ADDR);
898
899         /* Enable RMW */
900         iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
901
902         /* Kick process off with a read to the required address. */
903         result = ioread32(image->kern_base + offset);
904
905         /* Disable RMW */
906         iowrite32(0, bridge->base + SCYC_CTL);
907
908         spin_unlock(&(image->lock));
909
910         mutex_unlock(&(bridge->vme_rmw));
911
912         return result;
913 }
914
915 int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
916         struct vme_dma_attr *dest, size_t count)
917 {
918         struct ca91cx42_dma_entry *entry, *prev;
919         struct vme_dma_pci *pci_attr;
920         struct vme_dma_vme *vme_attr;
921         dma_addr_t desc_ptr;
922         int retval = 0;
923
924         /* XXX descriptor must be aligned on 64-bit boundaries */
925         entry = (struct ca91cx42_dma_entry *)
926                 kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
927         if (entry == NULL) {
928                 printk(KERN_ERR "Failed to allocate memory for dma resource "
929                         "structure\n");
930                 retval = -ENOMEM;
931                 goto err_mem;
932         }
933
934         /* Test descriptor alignment */
935         if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
936                 printk("Descriptor not aligned to 16 byte boundary as "
937                         "required: %p\n", &(entry->descriptor));
938                 retval = -EINVAL;
939                 goto err_align;
940         }
941
942         memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
943
944         if (dest->type == VME_DMA_VME) {
945                 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
946                 vme_attr = (struct vme_dma_vme *)dest->private;
947                 pci_attr = (struct vme_dma_pci *)src->private;
948         } else {
949                 vme_attr = (struct vme_dma_vme *)src->private;
950                 pci_attr = (struct vme_dma_pci *)dest->private;
951         }
952
953         /* Check we can do fullfill required attributes */
954         if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
955                 VME_USER2)) != 0) {
956
957                 printk(KERN_ERR "Unsupported cycle type\n");
958                 retval = -EINVAL;
959                 goto err_aspace;
960         }
961
962         if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
963                 VME_PROG | VME_DATA)) != 0) {
964
965                 printk(KERN_ERR "Unsupported cycle type\n");
966                 retval = -EINVAL;
967                 goto err_cycle;
968         }
969
970         /* Check to see if we can fullfill source and destination */
971         if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
972                 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
973
974                 printk(KERN_ERR "Cannot perform transfer with this "
975                         "source-destination combination\n");
976                 retval = -EINVAL;
977                 goto err_direct;
978         }
979
980         /* Setup cycle types */
981         if (vme_attr->cycle & VME_BLT)
982                 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
983
984         /* Setup data width */
985         switch (vme_attr->dwidth) {
986         case VME_D8:
987                 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
988                 break;
989         case VME_D16:
990                 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
991                 break;
992         case VME_D32:
993                 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
994                 break;
995         case VME_D64:
996                 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
997                 break;
998         default:
999                 printk(KERN_ERR "Invalid data width\n");
1000                 return -EINVAL;
1001         }
1002
1003         /* Setup address space */
1004         switch (vme_attr->aspace) {
1005         case VME_A16:
1006                 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1007                 break;
1008         case VME_A24:
1009                 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1010                 break;
1011         case VME_A32:
1012                 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1013                 break;
1014         case VME_USER1:
1015                 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1016                 break;
1017         case VME_USER2:
1018                 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1019                 break;
1020         default:
1021                 printk(KERN_ERR "Invalid address space\n");
1022                 return -EINVAL;
1023                 break;
1024         }
1025
1026         if (vme_attr->cycle & VME_SUPER)
1027                 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1028         if (vme_attr->cycle & VME_PROG)
1029                 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1030
1031         entry->descriptor.dtbc = count;
1032         entry->descriptor.dla = pci_attr->address;
1033         entry->descriptor.dva = vme_attr->address;
1034         entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1035
1036         /* Add to list */
1037         list_add_tail(&(entry->list), &(list->entries));
1038
1039         /* Fill out previous descriptors "Next Address" */
1040         if (entry->list.prev != &(list->entries)) {
1041                 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1042                         list);
1043                 /* We need the bus address for the pointer */
1044                 desc_ptr = virt_to_bus(&(entry->descriptor));
1045                 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1046         }
1047
1048         return 0;
1049
1050 err_cycle:
1051 err_aspace:
1052 err_direct:
1053 err_align:
1054         kfree(entry);
1055 err_mem:
1056         return retval;
1057 }
1058
1059 static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1060 {
1061         u32 tmp;
1062         struct ca91cx42_driver *bridge;
1063
1064         bridge = ca91cx42_bridge->driver_priv;
1065
1066         tmp = ioread32(bridge->base + DGCS);
1067
1068         if (tmp & CA91CX42_DGCS_ACT)
1069                 return 0;
1070         else
1071                 return 1;
1072 }
1073
1074 int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1075 {
1076         struct vme_dma_resource *ctrlr;
1077         struct ca91cx42_dma_entry *entry;
1078         int retval = 0;
1079         dma_addr_t bus_addr;
1080         u32 val;
1081
1082         struct ca91cx42_driver *bridge;
1083
1084         ctrlr = list->parent;
1085
1086         bridge = ctrlr->parent->driver_priv;
1087
1088         mutex_lock(&(ctrlr->mtx));
1089
1090         if (!(list_empty(&(ctrlr->running)))) {
1091                 /*
1092                  * XXX We have an active DMA transfer and currently haven't
1093                  *     sorted out the mechanism for "pending" DMA transfers.
1094                  *     Return busy.
1095                  */
1096                 /* Need to add to pending here */
1097                 mutex_unlock(&(ctrlr->mtx));
1098                 return -EBUSY;
1099         } else {
1100                 list_add(&(list->list), &(ctrlr->running));
1101         }
1102
1103         /* Get first bus address and write into registers */
1104         entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
1105                 list);
1106
1107         bus_addr = virt_to_bus(&(entry->descriptor));
1108
1109         mutex_unlock(&(ctrlr->mtx));
1110
1111         iowrite32(0, bridge->base + DTBC);
1112         iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1113
1114         /* Start the operation */
1115         val = ioread32(bridge->base + DGCS);
1116
1117         /* XXX Could set VMEbus On and Off Counters here */
1118         val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1119
1120         val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1121                 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1122                 CA91CX42_DGCS_PERR);
1123
1124         iowrite32(val, bridge->base + DGCS);
1125
1126         val |= CA91CX42_DGCS_GO;
1127
1128         iowrite32(val, bridge->base + DGCS);
1129
1130         wait_event_interruptible(bridge->dma_queue,
1131                 ca91cx42_dma_busy(ctrlr->parent));
1132
1133         /*
1134          * Read status register, this register is valid until we kick off a
1135          * new transfer.
1136          */
1137         val = ioread32(bridge->base + DGCS);
1138
1139         if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1140                 CA91CX42_DGCS_PERR)) {
1141
1142                 printk(KERN_ERR "ca91c042: DMA Error. DGCS=%08X\n", val);
1143                 val = ioread32(bridge->base + DCTL);
1144         }
1145
1146         /* Remove list from running list */
1147         mutex_lock(&(ctrlr->mtx));
1148         list_del(&(list->list));
1149         mutex_unlock(&(ctrlr->mtx));
1150
1151         return retval;
1152
1153 }
1154
1155 int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1156 {
1157         struct list_head *pos, *temp;
1158         struct ca91cx42_dma_entry *entry;
1159
1160         /* detach and free each entry */
1161         list_for_each_safe(pos, temp, &(list->entries)) {
1162                 list_del(pos);
1163                 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1164                 kfree(entry);
1165         }
1166
1167         return 0;
1168 }
1169
1170 /*
1171  * All 4 location monitors reside at the same base - this is therefore a
1172  * system wide configuration.
1173  *
1174  * This does not enable the LM monitor - that should be done when the first
1175  * callback is attached and disabled when the last callback is removed.
1176  */
1177 int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1178         vme_address_t aspace, vme_cycle_t cycle)
1179 {
1180         u32 temp_base, lm_ctl = 0;
1181         int i;
1182         struct ca91cx42_driver *bridge;
1183         struct device *dev;
1184
1185         bridge = lm->parent->driver_priv;
1186         dev = lm->parent->parent;
1187
1188         /* Check the alignment of the location monitor */
1189         temp_base = (u32)lm_base;
1190         if (temp_base & 0xffff) {
1191                 dev_err(dev, "Location monitor must be aligned to 64KB "
1192                         "boundary");
1193                 return -EINVAL;
1194         }
1195
1196         mutex_lock(&(lm->mtx));
1197
1198         /* If we already have a callback attached, we can't move it! */
1199         for (i = 0; i < lm->monitors; i++) {
1200                 if (bridge->lm_callback[i] != NULL) {
1201                         mutex_unlock(&(lm->mtx));
1202                         dev_err(dev, "Location monitor callback attached, "
1203                                 "can't reset\n");
1204                         return -EBUSY;
1205                 }
1206         }
1207
1208         switch (aspace) {
1209         case VME_A16:
1210                 lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1211                 break;
1212         case VME_A24:
1213                 lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1214                 break;
1215         case VME_A32:
1216                 lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1217                 break;
1218         default:
1219                 mutex_unlock(&(lm->mtx));
1220                 dev_err(dev, "Invalid address space\n");
1221                 return -EINVAL;
1222                 break;
1223         }
1224
1225         if (cycle & VME_SUPER)
1226                 lm_ctl |= CA91CX42_LM_CTL_SUPR;
1227         if (cycle & VME_USER)
1228                 lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1229         if (cycle & VME_PROG)
1230                 lm_ctl |= CA91CX42_LM_CTL_PGM;
1231         if (cycle & VME_DATA)
1232                 lm_ctl |= CA91CX42_LM_CTL_DATA;
1233
1234         iowrite32(lm_base, bridge->base + LM_BS);
1235         iowrite32(lm_ctl, bridge->base + LM_CTL);
1236
1237         mutex_unlock(&(lm->mtx));
1238
1239         return 0;
1240 }
1241
1242 /* Get configuration of the callback monitor and return whether it is enabled
1243  * or disabled.
1244  */
1245 int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1246         vme_address_t *aspace, vme_cycle_t *cycle)
1247 {
1248         u32 lm_ctl, enabled = 0;
1249         struct ca91cx42_driver *bridge;
1250
1251         bridge = lm->parent->driver_priv;
1252
1253         mutex_lock(&(lm->mtx));
1254
1255         *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1256         lm_ctl = ioread32(bridge->base + LM_CTL);
1257
1258         if (lm_ctl & CA91CX42_LM_CTL_EN)
1259                 enabled = 1;
1260
1261         if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1262                 *aspace = VME_A16;
1263         if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1264                 *aspace = VME_A24;
1265         if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1266                 *aspace = VME_A32;
1267
1268         *cycle = 0;
1269         if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1270                 *cycle |= VME_SUPER;
1271         if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1272                 *cycle |= VME_USER;
1273         if (lm_ctl & CA91CX42_LM_CTL_PGM)
1274                 *cycle |= VME_PROG;
1275         if (lm_ctl & CA91CX42_LM_CTL_DATA)
1276                 *cycle |= VME_DATA;
1277
1278         mutex_unlock(&(lm->mtx));
1279
1280         return enabled;
1281 }
1282
1283 /*
1284  * Attach a callback to a specific location monitor.
1285  *
1286  * Callback will be passed the monitor triggered.
1287  */
1288 int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1289         void (*callback)(int))
1290 {
1291         u32 lm_ctl, tmp;
1292         struct ca91cx42_driver *bridge;
1293         struct device *dev;
1294
1295         bridge = lm->parent->driver_priv;
1296         dev = lm->parent->parent;
1297
1298         mutex_lock(&(lm->mtx));
1299
1300         /* Ensure that the location monitor is configured - need PGM or DATA */
1301         lm_ctl = ioread32(bridge->base + LM_CTL);
1302         if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1303                 mutex_unlock(&(lm->mtx));
1304                 dev_err(dev, "Location monitor not properly configured\n");
1305                 return -EINVAL;
1306         }
1307
1308         /* Check that a callback isn't already attached */
1309         if (bridge->lm_callback[monitor] != NULL) {
1310                 mutex_unlock(&(lm->mtx));
1311                 dev_err(dev, "Existing callback attached\n");
1312                 return -EBUSY;
1313         }
1314
1315         /* Attach callback */
1316         bridge->lm_callback[monitor] = callback;
1317
1318         /* Enable Location Monitor interrupt */
1319         tmp = ioread32(bridge->base + LINT_EN);
1320         tmp |= CA91CX42_LINT_LM[monitor];
1321         iowrite32(tmp, bridge->base + LINT_EN);
1322
1323         /* Ensure that global Location Monitor Enable set */
1324         if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1325                 lm_ctl |= CA91CX42_LM_CTL_EN;
1326                 iowrite32(lm_ctl, bridge->base + LM_CTL);
1327         }
1328
1329         mutex_unlock(&(lm->mtx));
1330
1331         return 0;
1332 }
1333
1334 /*
1335  * Detach a callback function forn a specific location monitor.
1336  */
1337 int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1338 {
1339         u32 tmp;
1340         struct ca91cx42_driver *bridge;
1341
1342         bridge = lm->parent->driver_priv;
1343
1344         mutex_lock(&(lm->mtx));
1345
1346         /* Disable Location Monitor and ensure previous interrupts are clear */
1347         tmp = ioread32(bridge->base + LINT_EN);
1348         tmp &= ~CA91CX42_LINT_LM[monitor];
1349         iowrite32(tmp, bridge->base + LINT_EN);
1350
1351         iowrite32(CA91CX42_LINT_LM[monitor],
1352                  bridge->base + LINT_STAT);
1353
1354         /* Detach callback */
1355         bridge->lm_callback[monitor] = NULL;
1356
1357         /* If all location monitors disabled, disable global Location Monitor */
1358         if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1359                         CA91CX42_LINT_LM3)) == 0) {
1360                 tmp = ioread32(bridge->base + LM_CTL);
1361                 tmp &= ~CA91CX42_LM_CTL_EN;
1362                 iowrite32(tmp, bridge->base + LM_CTL);
1363         }
1364
1365         mutex_unlock(&(lm->mtx));
1366
1367         return 0;
1368 }
1369
1370 int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1371 {
1372         u32 slot = 0;
1373         struct ca91cx42_driver *bridge;
1374
1375         bridge = ca91cx42_bridge->driver_priv;
1376
1377         if (!geoid) {
1378                 slot = ioread32(bridge->base + VCSR_BS);
1379                 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1380         } else
1381                 slot = geoid;
1382
1383         return (int)slot;
1384
1385 }
1386
1387 static int __init ca91cx42_init(void)
1388 {
1389         return pci_register_driver(&ca91cx42_driver);
1390 }
1391
1392 /*
1393  * Configure CR/CSR space
1394  *
1395  * Access to the CR/CSR can be configured at power-up. The location of the
1396  * CR/CSR registers in the CR/CSR address space is determined by the boards
1397  * Auto-ID or Geographic address. This function ensures that the window is
1398  * enabled at an offset consistent with the boards geopgraphic address.
1399  */
1400 static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1401         struct pci_dev *pdev)
1402 {
1403         unsigned int crcsr_addr;
1404         int tmp, slot;
1405         struct ca91cx42_driver *bridge;
1406
1407         bridge = ca91cx42_bridge->driver_priv;
1408
1409         slot = ca91cx42_slot_get(ca91cx42_bridge);
1410
1411         /* Write CSR Base Address if slot ID is supplied as a module param */
1412         if (geoid)
1413                 iowrite32(geoid << 27, bridge->base + VCSR_BS);
1414
1415         dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1416         if (slot == 0) {
1417                 dev_err(&pdev->dev, "Slot number is unset, not configuring "
1418                         "CR/CSR space\n");
1419                 return -EINVAL;
1420         }
1421
1422         /* Allocate mem for CR/CSR image */
1423         bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1424                 &(bridge->crcsr_bus));
1425         if (bridge->crcsr_kernel == NULL) {
1426                 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1427                         "image\n");
1428                 return -ENOMEM;
1429         }
1430
1431         memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1432
1433         crcsr_addr = slot * (512 * 1024);
1434         iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1435
1436         tmp = ioread32(bridge->base + VCSR_CTL);
1437         tmp |= CA91CX42_VCSR_CTL_EN;
1438         iowrite32(tmp, bridge->base + VCSR_CTL);
1439
1440         return 0;
1441 }
1442
1443 static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1444         struct pci_dev *pdev)
1445 {
1446         u32 tmp;
1447         struct ca91cx42_driver *bridge;
1448
1449         bridge = ca91cx42_bridge->driver_priv;
1450
1451         /* Turn off CR/CSR space */
1452         tmp = ioread32(bridge->base + VCSR_CTL);
1453         tmp &= ~CA91CX42_VCSR_CTL_EN;
1454         iowrite32(tmp, bridge->base + VCSR_CTL);
1455
1456         /* Free image */
1457         iowrite32(0, bridge->base + VCSR_TO);
1458
1459         pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1460                 bridge->crcsr_bus);
1461 }
1462
1463 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1464 {
1465         int retval, i;
1466         u32 data;
1467         struct list_head *pos = NULL;
1468         struct vme_bridge *ca91cx42_bridge;
1469         struct ca91cx42_driver *ca91cx42_device;
1470         struct vme_master_resource *master_image;
1471         struct vme_slave_resource *slave_image;
1472         struct vme_dma_resource *dma_ctrlr;
1473         struct vme_lm_resource *lm;
1474
1475         /* We want to support more than one of each bridge so we need to
1476          * dynamically allocate the bridge structure
1477          */
1478         ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1479
1480         if (ca91cx42_bridge == NULL) {
1481                 dev_err(&pdev->dev, "Failed to allocate memory for device "
1482                         "structure\n");
1483                 retval = -ENOMEM;
1484                 goto err_struct;
1485         }
1486
1487         memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1488
1489         ca91cx42_device = kmalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1490
1491         if (ca91cx42_device == NULL) {
1492                 dev_err(&pdev->dev, "Failed to allocate memory for device "
1493                         "structure\n");
1494                 retval = -ENOMEM;
1495                 goto err_driver;
1496         }
1497
1498         memset(ca91cx42_device, 0, sizeof(struct ca91cx42_driver));
1499
1500         ca91cx42_bridge->driver_priv = ca91cx42_device;
1501
1502         /* Enable the device */
1503         retval = pci_enable_device(pdev);
1504         if (retval) {
1505                 dev_err(&pdev->dev, "Unable to enable device\n");
1506                 goto err_enable;
1507         }
1508
1509         /* Map Registers */
1510         retval = pci_request_regions(pdev, driver_name);
1511         if (retval) {
1512                 dev_err(&pdev->dev, "Unable to reserve resources\n");
1513                 goto err_resource;
1514         }
1515
1516         /* map registers in BAR 0 */
1517         ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1518                 4096);
1519         if (!ca91cx42_device->base) {
1520                 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1521                 retval = -EIO;
1522                 goto err_remap;
1523         }
1524
1525         /* Check to see if the mapping worked out */
1526         data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1527         if (data != PCI_VENDOR_ID_TUNDRA) {
1528                 dev_err(&pdev->dev, "PCI_ID check failed\n");
1529                 retval = -EIO;
1530                 goto err_test;
1531         }
1532
1533         /* Initialize wait queues & mutual exclusion flags */
1534         init_waitqueue_head(&(ca91cx42_device->dma_queue));
1535         init_waitqueue_head(&(ca91cx42_device->iack_queue));
1536         mutex_init(&(ca91cx42_device->vme_int));
1537         mutex_init(&(ca91cx42_device->vme_rmw));
1538
1539         ca91cx42_bridge->parent = &(pdev->dev);
1540         strcpy(ca91cx42_bridge->name, driver_name);
1541
1542         /* Setup IRQ */
1543         retval = ca91cx42_irq_init(ca91cx42_bridge);
1544         if (retval != 0) {
1545                 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1546                 goto err_irq;
1547         }
1548
1549         /* Add master windows to list */
1550         INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1551         for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1552                 master_image = kmalloc(sizeof(struct vme_master_resource),
1553                         GFP_KERNEL);
1554                 if (master_image == NULL) {
1555                         dev_err(&pdev->dev, "Failed to allocate memory for "
1556                         "master resource structure\n");
1557                         retval = -ENOMEM;
1558                         goto err_master;
1559                 }
1560                 master_image->parent = ca91cx42_bridge;
1561                 spin_lock_init(&(master_image->lock));
1562                 master_image->locked = 0;
1563                 master_image->number = i;
1564                 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1565                         VME_CRCSR | VME_USER1 | VME_USER2;
1566                 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1567                         VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1568                 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1569                 memset(&(master_image->bus_resource), 0,
1570                         sizeof(struct resource));
1571                 master_image->kern_base  = NULL;
1572                 list_add_tail(&(master_image->list),
1573                         &(ca91cx42_bridge->master_resources));
1574         }
1575
1576         /* Add slave windows to list */
1577         INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1578         for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1579                 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1580                         GFP_KERNEL);
1581                 if (slave_image == NULL) {
1582                         dev_err(&pdev->dev, "Failed to allocate memory for "
1583                         "slave resource structure\n");
1584                         retval = -ENOMEM;
1585                         goto err_slave;
1586                 }
1587                 slave_image->parent = ca91cx42_bridge;
1588                 mutex_init(&(slave_image->mtx));
1589                 slave_image->locked = 0;
1590                 slave_image->number = i;
1591                 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1592                         VME_USER2;
1593
1594                 /* Only windows 0 and 4 support A16 */
1595                 if (i == 0 || i == 4)
1596                         slave_image->address_attr |= VME_A16;
1597
1598                 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1599                         VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1600                 list_add_tail(&(slave_image->list),
1601                         &(ca91cx42_bridge->slave_resources));
1602         }
1603
1604         /* Add dma engines to list */
1605         INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1606         for (i = 0; i < CA91C142_MAX_DMA; i++) {
1607                 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1608                         GFP_KERNEL);
1609                 if (dma_ctrlr == NULL) {
1610                         dev_err(&pdev->dev, "Failed to allocate memory for "
1611                         "dma resource structure\n");
1612                         retval = -ENOMEM;
1613                         goto err_dma;
1614                 }
1615                 dma_ctrlr->parent = ca91cx42_bridge;
1616                 mutex_init(&(dma_ctrlr->mtx));
1617                 dma_ctrlr->locked = 0;
1618                 dma_ctrlr->number = i;
1619                 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1620                         VME_DMA_MEM_TO_VME;
1621                 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1622                 INIT_LIST_HEAD(&(dma_ctrlr->running));
1623                 list_add_tail(&(dma_ctrlr->list),
1624                         &(ca91cx42_bridge->dma_resources));
1625         }
1626
1627         /* Add location monitor to list */
1628         INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1629         lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1630         if (lm == NULL) {
1631                 dev_err(&pdev->dev, "Failed to allocate memory for "
1632                 "location monitor resource structure\n");
1633                 retval = -ENOMEM;
1634                 goto err_lm;
1635         }
1636         lm->parent = ca91cx42_bridge;
1637         mutex_init(&(lm->mtx));
1638         lm->locked = 0;
1639         lm->number = 1;
1640         lm->monitors = 4;
1641         list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1642
1643         ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1644         ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1645         ca91cx42_bridge->master_get = ca91cx42_master_get;
1646         ca91cx42_bridge->master_set = ca91cx42_master_set;
1647         ca91cx42_bridge->master_read = ca91cx42_master_read;
1648         ca91cx42_bridge->master_write = ca91cx42_master_write;
1649         ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1650         ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1651         ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1652         ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1653         ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1654         ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1655         ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1656         ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1657         ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1658         ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1659         ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1660
1661         data = ioread32(ca91cx42_device->base + MISC_CTL);
1662         dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1663                 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1664         dev_info(&pdev->dev, "Slot ID is %d\n",
1665                 ca91cx42_slot_get(ca91cx42_bridge));
1666
1667         if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) {
1668                 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1669         }
1670
1671         /* Need to save ca91cx42_bridge pointer locally in link list for use in
1672          * ca91cx42_remove()
1673          */
1674         retval = vme_register_bridge(ca91cx42_bridge);
1675         if (retval != 0) {
1676                 dev_err(&pdev->dev, "Chip Registration failed.\n");
1677                 goto err_reg;
1678         }
1679
1680         pci_set_drvdata(pdev, ca91cx42_bridge);
1681
1682         return 0;
1683
1684         vme_unregister_bridge(ca91cx42_bridge);
1685 err_reg:
1686         ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1687 err_lm:
1688         /* resources are stored in link list */
1689         list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1690                 lm = list_entry(pos, struct vme_lm_resource, list);
1691                 list_del(pos);
1692                 kfree(lm);
1693         }
1694 err_dma:
1695         /* resources are stored in link list */
1696         list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1697                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1698                 list_del(pos);
1699                 kfree(dma_ctrlr);
1700         }
1701 err_slave:
1702         /* resources are stored in link list */
1703         list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1704                 slave_image = list_entry(pos, struct vme_slave_resource, list);
1705                 list_del(pos);
1706                 kfree(slave_image);
1707         }
1708 err_master:
1709         /* resources are stored in link list */
1710         list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1711                 master_image = list_entry(pos, struct vme_master_resource,
1712                         list);
1713                 list_del(pos);
1714                 kfree(master_image);
1715         }
1716
1717         ca91cx42_irq_exit(ca91cx42_device, pdev);
1718 err_irq:
1719 err_test:
1720         iounmap(ca91cx42_device->base);
1721 err_remap:
1722         pci_release_regions(pdev);
1723 err_resource:
1724         pci_disable_device(pdev);
1725 err_enable:
1726         kfree(ca91cx42_device);
1727 err_driver:
1728         kfree(ca91cx42_bridge);
1729 err_struct:
1730         return retval;
1731
1732 }
1733
1734 void ca91cx42_remove(struct pci_dev *pdev)
1735 {
1736         struct list_head *pos = NULL;
1737         struct vme_master_resource *master_image;
1738         struct vme_slave_resource *slave_image;
1739         struct vme_dma_resource *dma_ctrlr;
1740         struct vme_lm_resource *lm;
1741         struct ca91cx42_driver *bridge;
1742         struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1743
1744         bridge = ca91cx42_bridge->driver_priv;
1745
1746
1747         /* Turn off Ints */
1748         iowrite32(0, bridge->base + LINT_EN);
1749
1750         /* Turn off the windows */
1751         iowrite32(0x00800000, bridge->base + LSI0_CTL);
1752         iowrite32(0x00800000, bridge->base + LSI1_CTL);
1753         iowrite32(0x00800000, bridge->base + LSI2_CTL);
1754         iowrite32(0x00800000, bridge->base + LSI3_CTL);
1755         iowrite32(0x00800000, bridge->base + LSI4_CTL);
1756         iowrite32(0x00800000, bridge->base + LSI5_CTL);
1757         iowrite32(0x00800000, bridge->base + LSI6_CTL);
1758         iowrite32(0x00800000, bridge->base + LSI7_CTL);
1759         iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1760         iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1761         iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1762         iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1763         iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1764         iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1765         iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1766         iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1767
1768         vme_unregister_bridge(ca91cx42_bridge);
1769
1770         ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1771
1772         /* resources are stored in link list */
1773         list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1774                 lm = list_entry(pos, struct vme_lm_resource, list);
1775                 list_del(pos);
1776                 kfree(lm);
1777         }
1778
1779         /* resources are stored in link list */
1780         list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1781                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1782                 list_del(pos);
1783                 kfree(dma_ctrlr);
1784         }
1785
1786         /* resources are stored in link list */
1787         list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1788                 slave_image = list_entry(pos, struct vme_slave_resource, list);
1789                 list_del(pos);
1790                 kfree(slave_image);
1791         }
1792
1793         /* resources are stored in link list */
1794         list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1795                 master_image = list_entry(pos, struct vme_master_resource,
1796                         list);
1797                 list_del(pos);
1798                 kfree(master_image);
1799         }
1800
1801         ca91cx42_irq_exit(bridge, pdev);
1802
1803         iounmap(bridge->base);
1804
1805         pci_release_regions(pdev);
1806
1807         pci_disable_device(pdev);
1808
1809         kfree(ca91cx42_bridge);
1810 }
1811
1812 static void __exit ca91cx42_exit(void)
1813 {
1814         pci_unregister_driver(&ca91cx42_driver);
1815 }
1816
1817 MODULE_PARM_DESC(geoid, "Override geographical addressing");
1818 module_param(geoid, int, 0);
1819
1820 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1821 MODULE_LICENSE("GPL");
1822
1823 module_init(ca91cx42_init);
1824 module_exit(ca91cx42_exit);