- patches.suse/slab-handle-memoryless-nodes-v2a.patch: Refresh.
[linux-flexiantxendom0-3.2.10.git] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23 #include <linux/sched.h>
24
25 #include "poch.h"
26
27 #include <asm/cacheflush.h>
28
29 #ifndef PCI_VENDOR_ID_RRAPIDS
30 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
31 #endif
32
33 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
34 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
35 #endif
36
37 #define POCH_NCHANNELS 2
38
39 #define MAX_POCH_CARDS 8
40 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
41
42 #define DRV_NAME "poch"
43 #define PFX      DRV_NAME ": "
44
45 /*
46  * BAR0 Bridge Register Definitions
47  */
48
49 #define BRIDGE_REV_REG                  0x0
50 #define BRIDGE_INT_MASK_REG             0x4
51 #define BRIDGE_INT_STAT_REG             0x8
52
53 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
54 #define BRIDGE_INT_FPGA                 (0x1 << 2)
55 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
56 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
57
58 #define BRIDGE_FPGA_RESET_REG           0xC
59
60 #define BRIDGE_CARD_POWER_REG           0x10
61 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
62 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
63
64 #define BRIDGE_JTAG_REG                 0x14
65 #define BRIDGE_DMA_GO_REG               0x18
66 #define BRIDGE_STAT_0_REG               0x1C
67 #define BRIDGE_STAT_1_REG               0x20
68 #define BRIDGE_STAT_2_REG               0x24
69 #define BRIDGE_STAT_3_REG               0x28
70 #define BRIDGE_TEMP_STAT_REG            0x2C
71 #define BRIDGE_TEMP_THRESH_REG          0x30
72 #define BRIDGE_EEPROM_REVSEL_REG        0x34
73 #define BRIDGE_CIS_STRUCT_REG           0x100
74 #define BRIDGE_BOARDREV_REG             0x124
75
76 /*
77  * BAR1 FPGA Register Definitions
78  */
79
80 #define FPGA_IFACE_REV_REG              0x0
81 #define FPGA_RX_BLOCK_SIZE_REG          0x8
82 #define FPGA_TX_BLOCK_SIZE_REG          0xC
83 #define FPGA_RX_BLOCK_COUNT_REG         0x10
84 #define FPGA_TX_BLOCK_COUNT_REG         0x14
85 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
86 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
87 #define FPGA_RX_GROUP_COUNT_REG         0x20
88 #define FPGA_TX_GROUP_COUNT_REG         0x24
89 #define FPGA_RX_CURR_GROUP_REG          0x28
90 #define FPGA_TX_CURR_GROUP_REG          0x2C
91 #define FPGA_RX_CURR_PCI_REG            0x38
92 #define FPGA_TX_CURR_PCI_REG            0x3C
93 #define FPGA_RX_GROUP0_START_REG        0x40
94 #define FPGA_TX_GROUP0_START_REG        0xC0
95 #define FPGA_DMA_DESC_1_REG             0x140
96 #define FPGA_DMA_DESC_2_REG             0x144
97 #define FPGA_DMA_DESC_3_REG             0x148
98 #define FPGA_DMA_DESC_4_REG             0x14C
99
100 #define FPGA_DMA_INT_STAT_REG           0x150
101 #define FPGA_DMA_INT_MASK_REG           0x154
102 #define FPGA_DMA_INT_RX         (1 << 0)
103 #define FPGA_DMA_INT_TX         (1 << 1)
104
105 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
106 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
107 #define FPGA_DMA_ADR_PAGE_REG           0x160
108 #define FPGA_FPGA_REV_REG               0x200
109
110 #define FPGA_ADC_CLOCK_CTL_REG          0x204
111 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
112 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
113 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
114
115 #define FPGA_ADC_DAC_EN_REG             0x208
116 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
117 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
118
119 #define FPGA_INT_STAT_REG               0x20C
120 #define FPGA_INT_MASK_REG               0x210
121 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
122 #define FPGA_INT_DMA_CORE               (0x1 << 8)
123 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
124 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
125 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
126 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
127 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
128 #define FPGA_INT_RX_ACQ_DONE            (0x1)
129
130 #define FPGA_RX_CTL_REG                 0x214
131 #define FPGA_RX_CTL_FIFO_FLUSH          (0x1 << 9)
132 #define FPGA_RX_CTL_SYNTH_DATA          (0x1 << 8)
133 #define FPGA_RX_CTL_CONT_CAP            (0x0 << 1)
134 #define FPGA_RX_CTL_SNAP_CAP            (0x1 << 1)
135
136 #define FPGA_RX_ARM_REG                 0x21C
137
138 #define FPGA_DOM_REG                    0x224
139 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
140 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
141 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
142 #define FPGA_DOM_TARGET_ACCESS          (0x1)
143
144 #define FPGA_TX_CTL_REG                 0x228
145 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
146 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
147 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
148 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
149 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
150 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
151
152 #define FPGA_ENDIAN_MODE_REG            0x22C
153 #define FPGA_RX_FIFO_COUNT_REG          0x28C
154 #define FPGA_TX_ENABLE_REG              0x298
155 #define FPGA_TX_TRIGGER_REG             0x29C
156 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
157 #define FPGA_CAP_FIFO_REG               0x300
158 #define FPGA_TX_SNAPSHOT_REG            0x8000
159
160 /*
161  * Channel Index Definitions
162  */
163
164 enum {
165         CHNO_RX_CHANNEL,
166         CHNO_TX_CHANNEL,
167 };
168
169 struct poch_dev;
170
171 enum channel_dir {
172         CHANNEL_DIR_RX,
173         CHANNEL_DIR_TX,
174 };
175
176 struct poch_group_info {
177         struct page *pg;
178         dma_addr_t dma_addr;
179         unsigned long user_offset;
180 };
181
182 struct channel_info {
183         unsigned int chno;
184
185         atomic_t sys_block_size;
186         atomic_t sys_group_size;
187         atomic_t sys_group_count;
188
189         enum channel_dir dir;
190
191         unsigned long block_size;
192         unsigned long group_size;
193         unsigned long group_count;
194
195         /* Contains the DMA address and VM offset of each group. */
196         struct poch_group_info *groups;
197
198         /* Contains the header and circular buffer exported to userspace. */
199         spinlock_t group_offsets_lock;
200
201         /* Last group consumed by user space. */
202         unsigned int consumed;
203         /* Last group indicated as 'complete' to user space. */
204         unsigned int transfer;
205
206         wait_queue_head_t wq;
207
208         union {
209                 unsigned int data_available;
210                 unsigned int space_available;
211         };
212
213         void __iomem *bridge_iomem;
214         void __iomem *fpga_iomem;
215         spinlock_t *iomem_lock;
216
217         atomic_t free;
218         atomic_t inited;
219
220         /* Error counters */
221         struct poch_counters counters;
222         spinlock_t counters_lock;
223
224         struct device *dev;
225 };
226
227 struct poch_dev {
228         struct uio_info uio;
229         struct pci_dev *pci_dev;
230         unsigned int nchannels;
231         struct channel_info channels[POCH_NCHANNELS];
232         struct cdev cdev;
233
234         /* Counts the no. of channels that have been opened. On first
235          * open, the card is powered on. On last channel close, the
236          * card is powered off.
237          */
238         atomic_t usage;
239
240         void __iomem *bridge_iomem;
241         void __iomem *fpga_iomem;
242         spinlock_t iomem_lock;
243
244         struct device *dev;
245 };
246
247 static int synth_rx;
248 module_param(synth_rx, bool, 0600);
249 MODULE_PARM_DESC(synth_rx,
250                 "Synthesize received values using a counter. Default: No");
251
252 static int loopback;
253 module_param(loopback, bool, 0600);
254 MODULE_PARM_DESC(loopback,
255                 "Enable hardware loopback of trasnmitted data. Default: No");
256
257 static dev_t poch_first_dev;
258 static struct class *poch_cls;
259 static DEFINE_IDR(poch_ids);
260
261 static ssize_t store_block_size(struct device *dev,
262                                 struct device_attribute *attr,
263                                 const char *buf, size_t count)
264 {
265         struct channel_info *channel = dev_get_drvdata(dev);
266         unsigned long block_size;
267
268         sscanf(buf, "%lu", &block_size);
269         atomic_set(&channel->sys_block_size, block_size);
270
271         return count;
272 }
273 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
274
275 static ssize_t store_group_size(struct device *dev,
276                                 struct device_attribute *attr,
277                                 const char *buf, size_t count)
278 {
279         struct channel_info *channel = dev_get_drvdata(dev);
280         unsigned long group_size;
281
282         sscanf(buf, "%lu", &group_size);
283         atomic_set(&channel->sys_group_size, group_size);
284
285         return count;
286 }
287 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
288
289 static ssize_t store_group_count(struct device *dev,
290                                 struct device_attribute *attr,
291                                  const char *buf, size_t count)
292 {
293         struct channel_info *channel = dev_get_drvdata(dev);
294         unsigned long group_count;
295
296         sscanf(buf, "%lu", &group_count);
297         atomic_set(&channel->sys_group_count, group_count);
298
299         return count;
300 }
301 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
302
303 static ssize_t show_direction(struct device *dev,
304                               struct device_attribute *attr, char *buf)
305 {
306         struct channel_info *channel = dev_get_drvdata(dev);
307         int len;
308
309         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
310         return len;
311 }
312 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
313
314 static unsigned long npages(unsigned long bytes)
315 {
316         if (bytes % PAGE_SIZE == 0)
317                 return bytes / PAGE_SIZE;
318         else
319                 return (bytes / PAGE_SIZE) + 1;
320 }
321
322 static ssize_t show_mmap_size(struct device *dev,
323                               struct device_attribute *attr, char *buf)
324 {
325         struct channel_info *channel = dev_get_drvdata(dev);
326         int len;
327         unsigned long mmap_size;
328         unsigned long group_pages;
329         unsigned long total_group_pages;
330
331         group_pages = npages(channel->group_size);
332         total_group_pages = group_pages * channel->group_count;
333
334         mmap_size = total_group_pages * PAGE_SIZE;
335         len = sprintf(buf, "%lu\n", mmap_size);
336         return len;
337 }
338 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
339
340 static struct device_attribute *poch_class_attrs[] = {
341         &dev_attr_block_size,
342         &dev_attr_group_size,
343         &dev_attr_group_count,
344         &dev_attr_dir,
345         &dev_attr_mmap_size,
346 };
347
348 static void poch_channel_free_groups(struct channel_info *channel)
349 {
350         unsigned long i;
351
352         for (i = 0; i < channel->group_count; i++) {
353                 struct poch_group_info *group;
354                 unsigned int order;
355
356                 group = &channel->groups[i];
357                 order = get_order(channel->group_size);
358                 if (group->pg)
359                         __free_pages(group->pg, order);
360         }
361 }
362
363 static int poch_channel_alloc_groups(struct channel_info *channel)
364 {
365         unsigned long i;
366         unsigned long group_pages;
367
368         group_pages = npages(channel->group_size);
369
370         for (i = 0; i < channel->group_count; i++) {
371                 struct poch_group_info *group;
372                 unsigned int order;
373                 gfp_t gfp_mask;
374
375                 group = &channel->groups[i];
376                 order = get_order(channel->group_size);
377
378                 /*
379                  * __GFP_COMP is required here since we are going to
380                  * perform non-linear mapping to userspace. For more
381                  * information read the vm_insert_page() function
382                  * comments.
383                  */
384
385                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
386                 group->pg = alloc_pages(gfp_mask, order);
387                 if (!group->pg) {
388                         poch_channel_free_groups(channel);
389                         return -ENOMEM;
390                 }
391
392                 /* FIXME: This is the physical address not the bus
393                  * address!  This won't work in architectures that
394                  * have an IOMMU. Can we use pci_map_single() for
395                  * this?
396                  */
397                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
398                 group->user_offset = (i * group_pages) * PAGE_SIZE;
399
400                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
401                        group->user_offset);
402         }
403
404         return 0;
405 }
406
407 static int channel_latch_attr(struct channel_info *channel)
408 {
409         channel->group_count = atomic_read(&channel->sys_group_count);
410         channel->group_size = atomic_read(&channel->sys_group_size);
411         channel->block_size = atomic_read(&channel->sys_block_size);
412
413         if (channel->group_count == 0) {
414                 printk(KERN_ERR PFX "invalid group count %lu",
415                        channel->group_count);
416                 return -EINVAL;
417         }
418
419         if (channel->group_size == 0 ||
420             channel->group_size < channel->block_size) {
421                 printk(KERN_ERR PFX "invalid group size %lu",
422                        channel->group_size);
423                 return -EINVAL;
424         }
425
426         if (channel->block_size == 0 || (channel->block_size % 8) != 0) {
427                 printk(KERN_ERR PFX "invalid block size %lu",
428                        channel->block_size);
429                 return -EINVAL;
430         }
431
432         if (channel->group_size % channel->block_size != 0) {
433                 printk(KERN_ERR PFX
434                        "group size should be multiple of block size");
435                 return -EINVAL;
436         }
437
438         return 0;
439 }
440
441 /*
442  * Configure DMA group registers
443  */
444 static void channel_dma_init(struct channel_info *channel)
445 {
446         void __iomem *fpga = channel->fpga_iomem;
447         u32 group_regs_base;
448         u32 group_reg;
449         unsigned int page;
450         unsigned int group_in_page;
451         unsigned long i;
452         u32 block_size_reg;
453         u32 block_count_reg;
454         u32 group_count_reg;
455         u32 groups_per_int_reg;
456         u32 curr_pci_reg;
457
458         if (channel->chno == CHNO_RX_CHANNEL) {
459                 group_regs_base = FPGA_RX_GROUP0_START_REG;
460                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
461                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
462                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
463                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
464                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
465         } else {
466                 group_regs_base = FPGA_TX_GROUP0_START_REG;
467                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
468                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
469                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
470                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
471                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
472         }
473
474         printk(KERN_WARNING "block_size, group_size, group_count\n");
475         /*
476          * Block size is represented in no. of 64 bit transfers.
477          */
478         iowrite32(channel->block_size / 8, fpga + block_size_reg);
479         iowrite32(channel->group_size / channel->block_size,
480                   fpga + block_count_reg);
481         iowrite32(channel->group_count, fpga + group_count_reg);
482         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
483         iowrite32(16, fpga + groups_per_int_reg);
484
485         /* Unlock PCI address? Not defined in the data sheet, but used
486          * in the reference code by Redrapids.
487          */
488         iowrite32(0x1, fpga + curr_pci_reg);
489
490         /* The DMA address page register is shared between the RX and
491          * TX channels, so acquire lock.
492          */
493         for (i = 0; i < channel->group_count; i++) {
494                 page = i / 32;
495                 group_in_page = i % 32;
496
497                 group_reg = group_regs_base + (group_in_page * 4);
498
499                 spin_lock(channel->iomem_lock);
500                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
501                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
502                 spin_unlock(channel->iomem_lock);
503         }
504
505         for (i = 0; i < channel->group_count; i++) {
506                 page = i / 32;
507                 group_in_page = i % 32;
508
509                 group_reg = group_regs_base + (group_in_page * 4);
510
511                 spin_lock(channel->iomem_lock);
512                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
513                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
514                        ioread32(fpga + group_reg));
515                 spin_unlock(channel->iomem_lock);
516         }
517
518 }
519
520 static void __poch_channel_clear_counters(struct channel_info *channel)
521 {
522         channel->counters.pll_unlock = 0;
523         channel->counters.fifo_empty = 0;
524         channel->counters.fifo_overflow = 0;
525 }
526
527 static int poch_channel_init(struct channel_info *channel,
528                              struct poch_dev *poch_dev)
529 {
530         struct pci_dev *pdev = poch_dev->pci_dev;
531         struct device *dev = &pdev->dev;
532         unsigned long alloc_size;
533         int ret;
534
535         printk(KERN_WARNING "channel_latch_attr\n");
536
537         ret = channel_latch_attr(channel);
538         if (ret != 0)
539                 goto out;
540
541         channel->consumed = 0;
542         channel->transfer = 0;
543
544         /* Allocate memory to hold group information. */
545         alloc_size = channel->group_count * sizeof(struct poch_group_info);
546         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
547         if (!channel->groups) {
548                 dev_err(dev, "error allocating memory for group info\n");
549                 ret = -ENOMEM;
550                 goto out;
551         }
552
553         printk(KERN_WARNING "poch_channel_alloc_groups\n");
554
555         ret = poch_channel_alloc_groups(channel);
556         if (ret) {
557                 dev_err(dev, "error allocating groups of order %d\n",
558                         get_order(channel->group_size));
559                 goto out_free_group_info;
560         }
561
562         channel->fpga_iomem = poch_dev->fpga_iomem;
563         channel->bridge_iomem = poch_dev->bridge_iomem;
564         channel->iomem_lock = &poch_dev->iomem_lock;
565         spin_lock_init(&channel->counters_lock);
566
567         __poch_channel_clear_counters(channel);
568
569         return 0;
570
571  out_free_group_info:
572         kfree(channel->groups);
573  out:
574         return ret;
575 }
576
577 static int poch_wait_fpga_prog(void __iomem *bridge)
578 {
579         unsigned long total_wait;
580         const unsigned long wait_period = 100;
581         /* FIXME: Get the actual timeout */
582         const unsigned long prog_timeo = 10000; /* 10 Seconds */
583         u32 card_power;
584
585         printk(KERN_WARNING "poch_wait_fpg_prog\n");
586
587         printk(KERN_INFO PFX "programming fpga ...\n");
588         total_wait = 0;
589         while (1) {
590                 msleep(wait_period);
591                 total_wait += wait_period;
592
593                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
594                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
595                         printk(KERN_INFO PFX "programming done\n");
596                         return 0;
597                 }
598                 if (total_wait > prog_timeo) {
599                         printk(KERN_ERR PFX
600                                "timed out while programming FPGA\n");
601                         return -EIO;
602                 }
603         }
604 }
605
606 static void poch_card_power_off(struct poch_dev *poch_dev)
607 {
608         void __iomem *bridge = poch_dev->bridge_iomem;
609         u32 card_power;
610
611         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
612         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
613
614         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
615         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
616                   bridge + BRIDGE_CARD_POWER_REG);
617 }
618
619 enum clk_src {
620         CLK_SRC_ON_BOARD,
621         CLK_SRC_EXTERNAL
622 };
623
624 static void poch_card_clock_on(void __iomem *fpga)
625 {
626         /* FIXME: Get this data through sysfs? */
627         enum clk_src clk_src = CLK_SRC_ON_BOARD;
628
629         if (clk_src == CLK_SRC_ON_BOARD) {
630                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
631                           fpga + FPGA_ADC_CLOCK_CTL_REG);
632         } else if (clk_src == CLK_SRC_EXTERNAL) {
633                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
634                           fpga + FPGA_ADC_CLOCK_CTL_REG);
635         }
636 }
637
638 static int poch_card_power_on(struct poch_dev *poch_dev)
639 {
640         void __iomem *bridge = poch_dev->bridge_iomem;
641         void __iomem *fpga = poch_dev->fpga_iomem;
642
643         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
644
645         if (poch_wait_fpga_prog(bridge) != 0) {
646                 poch_card_power_off(poch_dev);
647                 return -EIO;
648         }
649
650         poch_card_clock_on(fpga);
651
652         /* Sync to new clock, reset state machines, set DMA mode. */
653         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
654                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
655
656         /* FIXME: The time required for sync. needs to be tuned. */
657         msleep(1000);
658
659         return 0;
660 }
661
662 static void poch_channel_analog_on(struct channel_info *channel)
663 {
664         void __iomem *fpga = channel->fpga_iomem;
665         u32 adc_dac_en;
666
667         spin_lock(channel->iomem_lock);
668         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
669         switch (channel->chno) {
670         case CHNO_RX_CHANNEL:
671                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
672                           fpga + FPGA_ADC_DAC_EN_REG);
673                 break;
674         case CHNO_TX_CHANNEL:
675                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
676                           fpga + FPGA_ADC_DAC_EN_REG);
677                 break;
678         }
679         spin_unlock(channel->iomem_lock);
680 }
681
682 static int poch_open(struct inode *inode, struct file *filp)
683 {
684         struct poch_dev *poch_dev;
685         struct channel_info *channel;
686         void __iomem *bridge;
687         void __iomem *fpga;
688         int chno;
689         int usage;
690         int ret;
691
692         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
693         bridge = poch_dev->bridge_iomem;
694         fpga = poch_dev->fpga_iomem;
695
696         chno = iminor(inode) % poch_dev->nchannels;
697         channel = &poch_dev->channels[chno];
698
699         if (!atomic_dec_and_test(&channel->free)) {
700                 atomic_inc(&channel->free);
701                 ret = -EBUSY;
702                 goto out;
703         }
704
705         usage = atomic_inc_return(&poch_dev->usage);
706
707         printk(KERN_WARNING "poch_card_power_on\n");
708
709         if (usage == 1) {
710                 ret = poch_card_power_on(poch_dev);
711                 if (ret)
712                         goto out_dec_usage;
713         }
714
715         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
716                ioread32(bridge + BRIDGE_REV_REG));
717         printk(KERN_INFO "CardBus Interface Revision: %x\n",
718                ioread32(fpga + FPGA_IFACE_REV_REG));
719
720         channel->chno = chno;
721         filp->private_data = channel;
722
723         printk(KERN_WARNING "poch_channel_init\n");
724
725         ret = poch_channel_init(channel, poch_dev);
726         if (ret)
727                 goto out_power_off;
728
729         poch_channel_analog_on(channel);
730
731         printk(KERN_WARNING "channel_dma_init\n");
732
733         channel_dma_init(channel);
734
735         printk(KERN_WARNING "poch_channel_analog_on\n");
736
737         if (usage == 1) {
738                 printk(KERN_WARNING "setting up DMA\n");
739
740                 /* Initialize DMA Controller. */
741                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
742                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
743
744                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
745                 ioread32(fpga + FPGA_INT_STAT_REG);
746                 ioread32(bridge + BRIDGE_INT_STAT_REG);
747
748                 /* Initialize Interrupts. FIXME: Enable temperature
749                  * handling We are enabling both Tx and Rx channel
750                  * interrupts here. Do we need to enable interrupts
751                  * only for the current channel? Anyways we won't get
752                  * the interrupt unless the DMA is activated.
753                  */
754                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
755                 iowrite32(FPGA_INT_DMA_CORE
756                           | FPGA_INT_PLL_UNLOCKED
757                           | FPGA_INT_TX_FF_EMPTY
758                           | FPGA_INT_RX_FF_EMPTY
759                           | FPGA_INT_TX_FF_OVRFLW
760                           | FPGA_INT_RX_FF_OVRFLW,
761                           fpga + FPGA_INT_MASK_REG);
762                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
763                           fpga + FPGA_DMA_INT_MASK_REG);
764         }
765
766         if (channel->dir == CHANNEL_DIR_TX) {
767                 /* Flush TX FIFO and output data from cardbus. */
768                 u32 ctl_val = 0;
769
770                 ctl_val |= FPGA_TX_CTL_FIFO_FLUSH;
771                 ctl_val |= FPGA_TX_CTL_OUTPUT_CARDBUS;
772                 if (loopback)
773                         ctl_val |= FPGA_TX_CTL_LOOPBACK;
774
775                 iowrite32(ctl_val, fpga + FPGA_TX_CTL_REG);
776         } else {
777                 /* Flush RX FIFO and output data to cardbus. */
778                 u32 ctl_val = FPGA_RX_CTL_CONT_CAP | FPGA_RX_CTL_FIFO_FLUSH;
779                 if (synth_rx)
780                         ctl_val |= FPGA_RX_CTL_SYNTH_DATA;
781
782                 iowrite32(ctl_val, fpga + FPGA_RX_CTL_REG);
783         }
784
785         atomic_inc(&channel->inited);
786
787         return 0;
788
789  out_power_off:
790         if (usage == 1)
791                 poch_card_power_off(poch_dev);
792  out_dec_usage:
793         atomic_dec(&poch_dev->usage);
794         atomic_inc(&channel->free);
795  out:
796         return ret;
797 }
798
799 static int poch_release(struct inode *inode, struct file *filp)
800 {
801         struct channel_info *channel = filp->private_data;
802         struct poch_dev *poch_dev;
803         int usage;
804
805         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
806
807         usage = atomic_dec_return(&poch_dev->usage);
808         if (usage == 0) {
809                 printk(KERN_WARNING "poch_card_power_off\n");
810                 poch_card_power_off(poch_dev);
811         }
812
813         atomic_dec(&channel->inited);
814         poch_channel_free_groups(channel);
815         kfree(channel->groups);
816         atomic_inc(&channel->free);
817
818         return 0;
819 }
820
821 /*
822  * Map the the group buffers, to user space.
823  */
824 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
825 {
826         struct channel_info *channel = filp->private_data;
827
828         unsigned long start;
829         unsigned long size;
830
831         unsigned long group_pages;
832         unsigned long total_group_pages;
833
834         int pg_num;
835         struct page *pg;
836
837         int i;
838         int ret;
839
840         printk(KERN_WARNING "poch_mmap\n");
841
842         if (vma->vm_pgoff) {
843                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
844                 return -EINVAL;
845         }
846
847         group_pages = npages(channel->group_size);
848         total_group_pages = group_pages * channel->group_count;
849
850         size = vma->vm_end - vma->vm_start;
851         if (size != total_group_pages * PAGE_SIZE) {
852                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
853                 return -EINVAL;
854         }
855
856         start = vma->vm_start;
857
858         for (i = 0; i < channel->group_count; i++) {
859                 pg = channel->groups[i].pg;
860                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
861                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
862                                pg_num, i, start);
863                         ret = vm_insert_page(vma, start, pg);
864                         if (ret) {
865                                 printk(KERN_DEBUG PFX
866                                        "vm_insert 2 failed at %d\n", pg_num);
867                                 return ret;
868                         }
869                         start += PAGE_SIZE;
870                 }
871         }
872
873         return 0;
874 }
875
876 /*
877  * Check whether there is some group that the user space has not
878  * consumed yet. When the user space consumes a group, it sets it to
879  * -1. Cosuming could be reading data in case of RX and filling a
880  * buffer in case of TX.
881  */
882 static int poch_channel_available(struct channel_info *channel)
883 {
884         int available = 0;
885
886         spin_lock_irq(&channel->group_offsets_lock);
887
888         if (channel->consumed != channel->transfer)
889                 available = 1;
890
891         spin_unlock_irq(&channel->group_offsets_lock);
892
893         return available;
894 }
895
896 static unsigned int poch_poll(struct file *filp, poll_table *pt)
897 {
898         struct channel_info *channel = filp->private_data;
899         unsigned int ret = 0;
900
901         poll_wait(filp, &channel->wq, pt);
902
903         if (poch_channel_available(channel)) {
904                 if (channel->dir == CHANNEL_DIR_RX)
905                         ret = POLLIN | POLLRDNORM;
906                 else
907                         ret = POLLOUT | POLLWRNORM;
908         }
909
910         return ret;
911 }
912
913 static int poch_ioctl(struct inode *inode, struct file *filp,
914                       unsigned int cmd, unsigned long arg)
915 {
916         struct channel_info *channel = filp->private_data;
917         void __iomem *fpga = channel->fpga_iomem;
918         void __iomem *bridge = channel->bridge_iomem;
919         void __user *argp = (void __user *)arg;
920         struct vm_area_struct *vms;
921         struct poch_counters counters;
922         int ret;
923
924         switch (cmd) {
925         case POCH_IOC_TRANSFER_START:
926                 switch (channel->chno) {
927                 case CHNO_TX_CHANNEL:
928                         printk(KERN_INFO PFX "ioctl: Tx start\n");
929                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
930                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
931
932                         /* FIXME: Does it make sense to do a DMA GO
933                          * twice, once in Tx and once in Rx.
934                          */
935                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
936                         break;
937                 case CHNO_RX_CHANNEL:
938                         printk(KERN_INFO PFX "ioctl: Rx start\n");
939                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
940                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
941                         break;
942                 }
943                 break;
944         case POCH_IOC_TRANSFER_STOP:
945                 switch (channel->chno) {
946                 case CHNO_TX_CHANNEL:
947                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
948                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
949                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
950                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
951                         break;
952                 case CHNO_RX_CHANNEL:
953                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
954                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
955                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
956                         break;
957                 }
958                 break;
959         case POCH_IOC_CONSUME:
960         {
961                 int available;
962                 int nfetch;
963                 unsigned int from;
964                 unsigned int count;
965                 unsigned int i, j;
966                 struct poch_consume consume;
967                 struct poch_consume *uconsume;
968
969                 uconsume = argp;
970                 ret = copy_from_user(&consume, uconsume, sizeof(consume));
971                 if (ret)
972                         return ret;
973
974                 spin_lock_irq(&channel->group_offsets_lock);
975
976                 channel->consumed += consume.nflush;
977                 channel->consumed %= channel->group_count;
978
979                 available = channel->transfer - channel->consumed;
980                 if (available < 0)
981                         available += channel->group_count;
982
983                 from = channel->consumed;
984
985                 spin_unlock_irq(&channel->group_offsets_lock);
986
987                 nfetch = consume.nfetch;
988                 count = min(available, nfetch);
989
990                 for (i = 0; i < count; i++) {
991                         j = (from + i) % channel->group_count;
992                         ret = put_user(channel->groups[j].user_offset,
993                                        &consume.offsets[i]);
994                         if (ret)
995                                 return -EFAULT;
996                 }
997
998                 ret = put_user(count, &uconsume->nfetch);
999                 if (ret)
1000                         return -EFAULT;
1001
1002                 break;
1003         }
1004         case POCH_IOC_GET_COUNTERS:
1005                 if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
1006                         return -EFAULT;
1007
1008                 spin_lock_irq(&channel->counters_lock);
1009                 counters = channel->counters;
1010                 __poch_channel_clear_counters(channel);
1011                 spin_unlock_irq(&channel->counters_lock);
1012
1013                 ret = copy_to_user(argp, &counters,
1014                                    sizeof(struct poch_counters));
1015                 if (ret)
1016                         return ret;
1017
1018                 break;
1019         case POCH_IOC_SYNC_GROUP_FOR_USER:
1020         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1021                 vms = find_vma(current->mm, arg);
1022                 if (!vms)
1023                         /* Address not mapped. */
1024                         return -EINVAL;
1025                 if (vms->vm_file != filp)
1026                         /* Address mapped from different device/file. */
1027                         return -EINVAL;
1028
1029                 flush_cache_range(vms, arg, arg + channel->group_size);
1030                 break;
1031         }
1032         return 0;
1033 }
1034
1035 static struct file_operations poch_fops = {
1036         .owner = THIS_MODULE,
1037         .open = poch_open,
1038         .release = poch_release,
1039         .ioctl = poch_ioctl,
1040         .poll = poch_poll,
1041         .mmap = poch_mmap
1042 };
1043
1044 static void poch_irq_dma(struct channel_info *channel)
1045 {
1046         u32 prev_transfer;
1047         u32 curr_transfer;
1048         long groups_done;
1049         unsigned long i, j;
1050         struct poch_group_info *groups;
1051         u32 curr_group_reg;
1052
1053         if (!atomic_read(&channel->inited))
1054                 return;
1055
1056         prev_transfer = channel->transfer;
1057
1058         if (channel->chno == CHNO_RX_CHANNEL)
1059                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1060         else
1061                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1062
1063         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1064
1065         groups_done = curr_transfer - prev_transfer;
1066         /* Check wrap over, and handle it. */
1067         if (groups_done <= 0)
1068                 groups_done += channel->group_count;
1069
1070         groups = channel->groups;
1071
1072         spin_lock(&channel->group_offsets_lock);
1073
1074         for (i = 0; i < groups_done; i++) {
1075                 j = (prev_transfer + i) % channel->group_count;
1076
1077                 channel->transfer += 1;
1078                 channel->transfer %= channel->group_count;
1079
1080                 if (channel->transfer == channel->consumed) {
1081                         channel->consumed += 1;
1082                         channel->consumed %= channel->group_count;
1083                 }
1084         }
1085
1086         spin_unlock(&channel->group_offsets_lock);
1087
1088         wake_up_interruptible(&channel->wq);
1089 }
1090
1091 static irqreturn_t poch_irq_handler(int irq, void *p)
1092 {
1093         struct poch_dev *poch_dev = p;
1094         void __iomem *bridge = poch_dev->bridge_iomem;
1095         void __iomem *fpga = poch_dev->fpga_iomem;
1096         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1097         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1098         u32 bridge_stat;
1099         u32 fpga_stat;
1100         u32 dma_stat;
1101
1102         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1103         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1104         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1105
1106         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1107         ioread32(fpga + FPGA_INT_STAT_REG);
1108         ioread32(bridge + BRIDGE_INT_STAT_REG);
1109
1110         if (bridge_stat & BRIDGE_INT_FPGA) {
1111                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1112                         if (dma_stat & FPGA_DMA_INT_RX)
1113                                 poch_irq_dma(channel_rx);
1114                         if (dma_stat & FPGA_DMA_INT_TX)
1115                                 poch_irq_dma(channel_tx);
1116                 }
1117                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1118                         channel_tx->counters.pll_unlock++;
1119                         channel_rx->counters.pll_unlock++;
1120                         if (printk_ratelimit())
1121                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1122                 }
1123                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1124                         channel_tx->counters.fifo_empty++;
1125                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1126                         channel_tx->counters.fifo_overflow++;
1127                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1128                         channel_rx->counters.fifo_empty++;
1129                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1130                         channel_rx->counters.fifo_overflow++;
1131
1132                 /*
1133                  * FIXME: These errors should be notified through the
1134                  * poll interface as POLLERR.
1135                  */
1136
1137                 /* Re-enable interrupts. */
1138                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1139
1140                 return IRQ_HANDLED;
1141         }
1142
1143         return IRQ_NONE;
1144 }
1145
1146 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1147 {
1148         int i, j;
1149         int nattrs;
1150         struct channel_info *channel;
1151         dev_t devno;
1152
1153         if (poch_dev->dev == NULL)
1154                 return;
1155
1156         for (i = 0; i < poch_dev->nchannels; i++) {
1157                 channel = &poch_dev->channels[i];
1158                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1159
1160                 if (!channel->dev)
1161                         continue;
1162
1163                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1164                 for (j = 0; j < nattrs; j++)
1165                         device_remove_file(channel->dev, poch_class_attrs[j]);
1166
1167                 device_unregister(channel->dev);
1168         }
1169
1170         device_unregister(poch_dev->dev);
1171 }
1172
1173 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1174                                              int id)
1175 {
1176         struct device *dev = &poch_dev->pci_dev->dev;
1177         int i, j;
1178         int nattrs;
1179         int ret;
1180         struct channel_info *channel;
1181         dev_t devno;
1182
1183         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1184                                       MKDEV(0, 0), NULL, "poch%d", id);
1185         if (IS_ERR(poch_dev->dev)) {
1186                 dev_err(dev, "error creating parent class device");
1187                 ret = PTR_ERR(poch_dev->dev);
1188                 poch_dev->dev = NULL;
1189                 return ret;
1190         }
1191
1192         for (i = 0; i < poch_dev->nchannels; i++) {
1193                 channel = &poch_dev->channels[i];
1194
1195                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1196                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1197                                              NULL, "ch%d", i);
1198                 if (IS_ERR(channel->dev)) {
1199                         dev_err(dev, "error creating channel class device");
1200                         ret = PTR_ERR(channel->dev);
1201                         channel->dev = NULL;
1202                         poch_class_dev_unregister(poch_dev, id);
1203                         return ret;
1204                 }
1205
1206                 dev_set_drvdata(channel->dev, channel);
1207                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1208                 for (j = 0; j < nattrs; j++) {
1209                         ret = device_create_file(channel->dev,
1210                                                  poch_class_attrs[j]);
1211                         if (ret) {
1212                                 dev_err(dev, "error creating attribute file");
1213                                 poch_class_dev_unregister(poch_dev, id);
1214                                 return ret;
1215                         }
1216                 }
1217         }
1218
1219         return 0;
1220 }
1221
1222 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1223                                     const struct pci_device_id *pci_id)
1224 {
1225         struct device *dev = &pdev->dev;
1226         struct poch_dev *poch_dev;
1227         struct uio_info *uio;
1228         int ret;
1229         int id;
1230         int i;
1231
1232         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1233         if (!poch_dev) {
1234                 dev_err(dev, "error allocating priv. data memory\n");
1235                 return -ENOMEM;
1236         }
1237
1238         poch_dev->pci_dev = pdev;
1239         uio = &poch_dev->uio;
1240
1241         pci_set_drvdata(pdev, poch_dev);
1242
1243         spin_lock_init(&poch_dev->iomem_lock);
1244
1245         poch_dev->nchannels = POCH_NCHANNELS;
1246         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1247         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1248
1249         for (i = 0; i < poch_dev->nchannels; i++) {
1250                 init_waitqueue_head(&poch_dev->channels[i].wq);
1251                 atomic_set(&poch_dev->channels[i].free, 1);
1252                 atomic_set(&poch_dev->channels[i].inited, 0);
1253         }
1254
1255         ret = pci_enable_device(pdev);
1256         if (ret) {
1257                 dev_err(dev, "error enabling device\n");
1258                 goto out_free;
1259         }
1260
1261         ret = pci_request_regions(pdev, "poch");
1262         if (ret) {
1263                 dev_err(dev, "error requesting resources\n");
1264                 goto out_disable;
1265         }
1266
1267         uio->mem[0].addr = pci_resource_start(pdev, 1);
1268         if (!uio->mem[0].addr) {
1269                 dev_err(dev, "invalid BAR1\n");
1270                 ret = -ENODEV;
1271                 goto out_release;
1272         }
1273
1274         uio->mem[0].size = pci_resource_len(pdev, 1);
1275         uio->mem[0].memtype = UIO_MEM_PHYS;
1276
1277         uio->name = "poch";
1278         uio->version = "0.0.1";
1279         uio->irq = -1;
1280         ret = uio_register_device(dev, uio);
1281         if (ret) {
1282                 dev_err(dev, "error register UIO device: %d\n", ret);
1283                 goto out_release;
1284         }
1285
1286         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1287                                          pci_resource_len(pdev, 0));
1288         if (poch_dev->bridge_iomem == NULL) {
1289                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1290                 ret = -ENOMEM;
1291                 goto out_uio_unreg;
1292         }
1293
1294         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1295                                        pci_resource_len(pdev, 1));
1296         if (poch_dev->fpga_iomem == NULL) {
1297                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1298                 ret = -ENOMEM;
1299                 goto out_bar0_unmap;
1300         }
1301
1302         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1303                           dev_name(dev), poch_dev);
1304         if (ret) {
1305                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1306                 ret = -ENOMEM;
1307                 goto out_bar1_unmap;
1308         }
1309
1310         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1311                 dev_err(dev, "error allocating memory ids\n");
1312                 ret = -ENOMEM;
1313                 goto out_free_irq;
1314         }
1315
1316         idr_get_new(&poch_ids, poch_dev, &id);
1317         if (id >= MAX_POCH_CARDS) {
1318                 dev_err(dev, "minors exhausted\n");
1319                 ret = -EBUSY;
1320                 goto out_free_irq;
1321         }
1322
1323         cdev_init(&poch_dev->cdev, &poch_fops);
1324         poch_dev->cdev.owner = THIS_MODULE;
1325         ret = cdev_add(&poch_dev->cdev,
1326                        poch_first_dev + (id * poch_dev->nchannels),
1327                        poch_dev->nchannels);
1328         if (ret) {
1329                 dev_err(dev, "error register character device\n");
1330                 goto out_idr_remove;
1331         }
1332
1333         ret = poch_class_dev_register(poch_dev, id);
1334         if (ret)
1335                 goto out_cdev_del;
1336
1337         return 0;
1338
1339  out_cdev_del:
1340         cdev_del(&poch_dev->cdev);
1341  out_idr_remove:
1342         idr_remove(&poch_ids, id);
1343  out_free_irq:
1344         free_irq(pdev->irq, poch_dev);
1345  out_bar1_unmap:
1346         iounmap(poch_dev->fpga_iomem);
1347  out_bar0_unmap:
1348         iounmap(poch_dev->bridge_iomem);
1349  out_uio_unreg:
1350         uio_unregister_device(uio);
1351  out_release:
1352         pci_release_regions(pdev);
1353  out_disable:
1354         pci_disable_device(pdev);
1355  out_free:
1356         kfree(poch_dev);
1357         return ret;
1358 }
1359
1360 /*
1361  * FIXME: We are yet to handle the hot unplug case.
1362  */
1363 static void poch_pci_remove(struct pci_dev *pdev)
1364 {
1365         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1366         struct uio_info *uio = &poch_dev->uio;
1367         unsigned int minor = MINOR(poch_dev->cdev.dev);
1368         unsigned int id = minor / poch_dev->nchannels;
1369
1370         poch_class_dev_unregister(poch_dev, id);
1371         cdev_del(&poch_dev->cdev);
1372         idr_remove(&poch_ids, id);
1373         free_irq(pdev->irq, poch_dev);
1374         iounmap(poch_dev->fpga_iomem);
1375         iounmap(poch_dev->bridge_iomem);
1376         uio_unregister_device(uio);
1377         pci_release_regions(pdev);
1378         pci_disable_device(pdev);
1379         pci_set_drvdata(pdev, NULL);
1380         iounmap(uio->mem[0].internal_addr);
1381
1382         kfree(poch_dev);
1383 }
1384
1385 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1386         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1387                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1388         { 0, }
1389 };
1390
1391 static struct pci_driver poch_pci_driver = {
1392         .name = DRV_NAME,
1393         .id_table = poch_pci_ids,
1394         .probe = poch_pci_probe,
1395         .remove = poch_pci_remove,
1396 };
1397
1398 static int __init poch_init_module(void)
1399 {
1400         int ret = 0;
1401
1402         ret = alloc_chrdev_region(&poch_first_dev, 0,
1403                                   MAX_POCH_DEVICES, DRV_NAME);
1404         if (ret) {
1405                 printk(KERN_ERR PFX "error allocating device no.");
1406                 return ret;
1407         }
1408
1409         poch_cls = class_create(THIS_MODULE, "pocketchange");
1410         if (IS_ERR(poch_cls)) {
1411                 ret = PTR_ERR(poch_cls);
1412                 goto out_unreg_chrdev;
1413         }
1414
1415         ret = pci_register_driver(&poch_pci_driver);
1416         if (ret) {
1417                 printk(KERN_ERR PFX "error register PCI device");
1418                 goto out_class_destroy;
1419         }
1420
1421         return 0;
1422
1423  out_class_destroy:
1424         class_destroy(poch_cls);
1425
1426  out_unreg_chrdev:
1427         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1428
1429         return ret;
1430 }
1431
1432 static void __exit poch_exit_module(void)
1433 {
1434         pci_unregister_driver(&poch_pci_driver);
1435         class_destroy(poch_cls);
1436         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1437 }
1438
1439 module_init(poch_init_module);
1440 module_exit(poch_exit_module);
1441
1442 MODULE_LICENSE("GPL v2");