ec50541953bba1ec9f0e6b8b8bb950860341306b
[linux-flexiantxendom0.git] / drivers / spi / spi-topcliff-pch.c
1 /*
2  * SPI bus driver for the Topcliff PCH used by Intel SoCs
3  *
4  * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
18  */
19
20 #include <linux/delay.h>
21 #include <linux/pci.h>
22 #include <linux/wait.h>
23 #include <linux/spi/spi.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/spi/spidev.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/platform_device.h>
30
31 #include <linux/dmaengine.h>
32 #include <linux/pch_dma.h>
33
34 /* Register offsets */
35 #define PCH_SPCR                0x00    /* SPI control register */
36 #define PCH_SPBRR               0x04    /* SPI baud rate register */
37 #define PCH_SPSR                0x08    /* SPI status register */
38 #define PCH_SPDWR               0x0C    /* SPI write data register */
39 #define PCH_SPDRR               0x10    /* SPI read data register */
40 #define PCH_SSNXCR              0x18    /* SSN Expand Control Register */
41 #define PCH_SRST                0x1C    /* SPI reset register */
42 #define PCH_ADDRESS_SIZE        0x20
43
44 #define PCH_SPSR_TFD            0x000007C0
45 #define PCH_SPSR_RFD            0x0000F800
46
47 #define PCH_READABLE(x)         (((x) & PCH_SPSR_RFD)>>11)
48 #define PCH_WRITABLE(x)         (((x) & PCH_SPSR_TFD)>>6)
49
50 #define PCH_RX_THOLD            7
51 #define PCH_RX_THOLD_MAX        15
52
53 #define PCH_TX_THOLD            2
54
55 #define PCH_MAX_BAUDRATE        5000000
56 #define PCH_MAX_FIFO_DEPTH      16
57
58 #define STATUS_RUNNING          1
59 #define STATUS_EXITING          2
60 #define PCH_SLEEP_TIME          10
61
62 #define SSN_LOW                 0x02U
63 #define SSN_HIGH                0x03U
64 #define SSN_NO_CONTROL          0x00U
65 #define PCH_MAX_CS              0xFF
66 #define PCI_DEVICE_ID_GE_SPI    0x8816
67
68 #define SPCR_SPE_BIT            (1 << 0)
69 #define SPCR_MSTR_BIT           (1 << 1)
70 #define SPCR_LSBF_BIT           (1 << 4)
71 #define SPCR_CPHA_BIT           (1 << 5)
72 #define SPCR_CPOL_BIT           (1 << 6)
73 #define SPCR_TFIE_BIT           (1 << 8)
74 #define SPCR_RFIE_BIT           (1 << 9)
75 #define SPCR_FIE_BIT            (1 << 10)
76 #define SPCR_ORIE_BIT           (1 << 11)
77 #define SPCR_MDFIE_BIT          (1 << 12)
78 #define SPCR_FICLR_BIT          (1 << 24)
79 #define SPSR_TFI_BIT            (1 << 0)
80 #define SPSR_RFI_BIT            (1 << 1)
81 #define SPSR_FI_BIT             (1 << 2)
82 #define SPSR_ORF_BIT            (1 << 3)
83 #define SPBRR_SIZE_BIT          (1 << 10)
84
85 #define PCH_ALL                 (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
86                                 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
87
88 #define SPCR_RFIC_FIELD         20
89 #define SPCR_TFIC_FIELD         16
90
91 #define MASK_SPBRR_SPBR_BITS    ((1 << 10) - 1)
92 #define MASK_RFIC_SPCR_BITS     (0xf << SPCR_RFIC_FIELD)
93 #define MASK_TFIC_SPCR_BITS     (0xf << SPCR_TFIC_FIELD)
94
95 #define PCH_CLOCK_HZ            50000000
96 #define PCH_MAX_SPBR            1023
97
98 /* Definition for ML7213 by OKI SEMICONDUCTOR */
99 #define PCI_VENDOR_ID_ROHM              0x10DB
100 #define PCI_DEVICE_ID_ML7213_SPI        0x802c
101 #define PCI_DEVICE_ID_ML7223_SPI        0x800F
102
103 /*
104  * Set the number of SPI instance max
105  * Intel EG20T PCH :            1ch
106  * OKI SEMICONDUCTOR ML7213 IOH :       2ch
107  * OKI SEMICONDUCTOR ML7223 IOH :       1ch
108 */
109 #define PCH_SPI_MAX_DEV                 2
110
111 #define PCH_BUF_SIZE            4096
112 #define PCH_DMA_TRANS_SIZE      12
113
114 static int use_dma = 1;
115
116 struct pch_spi_dma_ctrl {
117         struct dma_async_tx_descriptor  *desc_tx;
118         struct dma_async_tx_descriptor  *desc_rx;
119         struct pch_dma_slave            param_tx;
120         struct pch_dma_slave            param_rx;
121         struct dma_chan         *chan_tx;
122         struct dma_chan         *chan_rx;
123         struct scatterlist              *sg_tx_p;
124         struct scatterlist              *sg_rx_p;
125         struct scatterlist              sg_tx;
126         struct scatterlist              sg_rx;
127         int                             nent;
128         void                            *tx_buf_virt;
129         void                            *rx_buf_virt;
130         dma_addr_t                      tx_buf_dma;
131         dma_addr_t                      rx_buf_dma;
132 };
133 /**
134  * struct pch_spi_data - Holds the SPI channel specific details
135  * @io_remap_addr:              The remapped PCI base address
136  * @master:                     Pointer to the SPI master structure
137  * @work:                       Reference to work queue handler
138  * @wk:                         Workqueue for carrying out execution of the
139  *                              requests
140  * @wait:                       Wait queue for waking up upon receiving an
141  *                              interrupt.
142  * @transfer_complete:          Status of SPI Transfer
143  * @bcurrent_msg_processing:    Status flag for message processing
144  * @lock:                       Lock for protecting this structure
145  * @queue:                      SPI Message queue
146  * @status:                     Status of the SPI driver
147  * @bpw_len:                    Length of data to be transferred in bits per
148  *                              word
149  * @transfer_active:            Flag showing active transfer
150  * @tx_index:                   Transmit data count; for bookkeeping during
151  *                              transfer
152  * @rx_index:                   Receive data count; for bookkeeping during
153  *                              transfer
154  * @tx_buff:                    Buffer for data to be transmitted
155  * @rx_index:                   Buffer for Received data
156  * @n_curnt_chip:               The chip number that this SPI driver currently
157  *                              operates on
158  * @current_chip:               Reference to the current chip that this SPI
159  *                              driver currently operates on
160  * @current_msg:                The current message that this SPI driver is
161  *                              handling
162  * @cur_trans:                  The current transfer that this SPI driver is
163  *                              handling
164  * @board_dat:                  Reference to the SPI device data structure
165  * @plat_dev:                   platform_device structure
166  * @ch:                         SPI channel number
167  * @irq_reg_sts:                Status of IRQ registration
168  */
169 struct pch_spi_data {
170         void __iomem *io_remap_addr;
171         unsigned long io_base_addr;
172         struct spi_master *master;
173         struct work_struct work;
174         struct workqueue_struct *wk;
175         wait_queue_head_t wait;
176         u8 transfer_complete;
177         u8 bcurrent_msg_processing;
178         spinlock_t lock;
179         struct list_head queue;
180         u8 status;
181         u32 bpw_len;
182         u8 transfer_active;
183         u32 tx_index;
184         u32 rx_index;
185         u16 *pkt_tx_buff;
186         u16 *pkt_rx_buff;
187         u8 n_curnt_chip;
188         struct spi_device *current_chip;
189         struct spi_message *current_msg;
190         struct spi_transfer *cur_trans;
191         struct pch_spi_board_data *board_dat;
192         struct platform_device  *plat_dev;
193         int ch;
194         struct pch_spi_dma_ctrl dma;
195         int use_dma;
196         u8 irq_reg_sts;
197 };
198
199 /**
200  * struct pch_spi_board_data - Holds the SPI device specific details
201  * @pdev:               Pointer to the PCI device
202  * @suspend_sts:        Status of suspend
203  * @num:                The number of SPI device instance
204  */
205 struct pch_spi_board_data {
206         struct pci_dev *pdev;
207         u8 suspend_sts;
208         int num;
209 };
210
211 struct pch_pd_dev_save {
212         int num;
213         struct platform_device *pd_save[PCH_SPI_MAX_DEV];
214         struct pch_spi_board_data *board_dat;
215 };
216
217 static struct pci_device_id pch_spi_pcidev_id[] = {
218         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI),    1, },
219         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
220         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
221         { }
222 };
223
224 /**
225  * pch_spi_writereg() - Performs  register writes
226  * @master:     Pointer to struct spi_master.
227  * @idx:        Register offset.
228  * @val:        Value to be written to register.
229  */
230 static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
231 {
232         struct pch_spi_data *data = spi_master_get_devdata(master);
233         iowrite32(val, (data->io_remap_addr + idx));
234 }
235
236 /**
237  * pch_spi_readreg() - Performs register reads
238  * @master:     Pointer to struct spi_master.
239  * @idx:        Register offset.
240  */
241 static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
242 {
243         struct pch_spi_data *data = spi_master_get_devdata(master);
244         return ioread32(data->io_remap_addr + idx);
245 }
246
247 static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
248                                       u32 set, u32 clr)
249 {
250         u32 tmp = pch_spi_readreg(master, idx);
251         tmp = (tmp & ~clr) | set;
252         pch_spi_writereg(master, idx, tmp);
253 }
254
255 static void pch_spi_set_master_mode(struct spi_master *master)
256 {
257         pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
258 }
259
260 /**
261  * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
262  * @master:     Pointer to struct spi_master.
263  */
264 static void pch_spi_clear_fifo(struct spi_master *master)
265 {
266         pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
267         pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
268 }
269
270 static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
271                                 void __iomem *io_remap_addr)
272 {
273         u32 n_read, tx_index, rx_index, bpw_len;
274         u16 *pkt_rx_buffer, *pkt_tx_buff;
275         int read_cnt;
276         u32 reg_spcr_val;
277         void __iomem *spsr;
278         void __iomem *spdrr;
279         void __iomem *spdwr;
280
281         spsr = io_remap_addr + PCH_SPSR;
282         iowrite32(reg_spsr_val, spsr);
283
284         if (data->transfer_active) {
285                 rx_index = data->rx_index;
286                 tx_index = data->tx_index;
287                 bpw_len = data->bpw_len;
288                 pkt_rx_buffer = data->pkt_rx_buff;
289                 pkt_tx_buff = data->pkt_tx_buff;
290
291                 spdrr = io_remap_addr + PCH_SPDRR;
292                 spdwr = io_remap_addr + PCH_SPDWR;
293
294                 n_read = PCH_READABLE(reg_spsr_val);
295
296                 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
297                         pkt_rx_buffer[rx_index++] = ioread32(spdrr);
298                         if (tx_index < bpw_len)
299                                 iowrite32(pkt_tx_buff[tx_index++], spdwr);
300                 }
301
302                 /* disable RFI if not needed */
303                 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
304                         reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
305                         reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
306
307                         /* reset rx threshold */
308                         reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
309                         reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
310
311                         iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
312                 }
313
314                 /* update counts */
315                 data->tx_index = tx_index;
316                 data->rx_index = rx_index;
317
318                 /* if transfer complete interrupt */
319                 if (reg_spsr_val & SPSR_FI_BIT) {
320                         if ((tx_index == bpw_len) && (rx_index == tx_index)) {
321                                 /* disable interrupts */
322                                 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
323                                                    PCH_ALL);
324
325                                 /* transfer is completed;
326                                    inform pch_spi_process_messages */
327                                 data->transfer_complete = true;
328                                 data->transfer_active = false;
329                                 wake_up(&data->wait);
330                         } else {
331                                 dev_err(&data->master->dev,
332                                         "%s : Transfer is not completed",
333                                         __func__);
334                         }
335                 }
336         }
337 }
338
339 /**
340  * pch_spi_handler() - Interrupt handler
341  * @irq:        The interrupt number.
342  * @dev_id:     Pointer to struct pch_spi_board_data.
343  */
344 static irqreturn_t pch_spi_handler(int irq, void *dev_id)
345 {
346         u32 reg_spsr_val;
347         void __iomem *spsr;
348         void __iomem *io_remap_addr;
349         irqreturn_t ret = IRQ_NONE;
350         struct pch_spi_data *data = dev_id;
351         struct pch_spi_board_data *board_dat = data->board_dat;
352
353         if (board_dat->suspend_sts) {
354                 dev_dbg(&board_dat->pdev->dev,
355                         "%s returning due to suspend\n", __func__);
356                 return IRQ_NONE;
357         }
358
359         io_remap_addr = data->io_remap_addr;
360         spsr = io_remap_addr + PCH_SPSR;
361
362         reg_spsr_val = ioread32(spsr);
363
364         if (reg_spsr_val & SPSR_ORF_BIT) {
365                 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
366                 if (data->current_msg->complete != 0) {
367                         data->transfer_complete = true;
368                         data->current_msg->status = -EIO;
369                         data->current_msg->complete(data->current_msg->context);
370                         data->bcurrent_msg_processing = false;
371                         data->current_msg = NULL;
372                         data->cur_trans = NULL;
373                 }
374         }
375
376         if (data->use_dma)
377                 return IRQ_NONE;
378
379         /* Check if the interrupt is for SPI device */
380         if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
381                 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
382                 ret = IRQ_HANDLED;
383         }
384
385         dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
386                 __func__, ret);
387
388         return ret;
389 }
390
391 /**
392  * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
393  * @master:     Pointer to struct spi_master.
394  * @speed_hz:   Baud rate.
395  */
396 static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
397 {
398         u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
399
400         /* if baud rate is less than we can support limit it */
401         if (n_spbr > PCH_MAX_SPBR)
402                 n_spbr = PCH_MAX_SPBR;
403
404         pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
405 }
406
407 /**
408  * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
409  * @master:             Pointer to struct spi_master.
410  * @bits_per_word:      Bits per word for SPI transfer.
411  */
412 static void pch_spi_set_bits_per_word(struct spi_master *master,
413                                       u8 bits_per_word)
414 {
415         if (bits_per_word == 8)
416                 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
417         else
418                 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
419 }
420
421 /**
422  * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
423  * @spi:        Pointer to struct spi_device.
424  */
425 static void pch_spi_setup_transfer(struct spi_device *spi)
426 {
427         u32 flags = 0;
428
429         dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
430                 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
431                 spi->max_speed_hz);
432         pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
433
434         /* set bits per word */
435         pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
436
437         if (!(spi->mode & SPI_LSB_FIRST))
438                 flags |= SPCR_LSBF_BIT;
439         if (spi->mode & SPI_CPOL)
440                 flags |= SPCR_CPOL_BIT;
441         if (spi->mode & SPI_CPHA)
442                 flags |= SPCR_CPHA_BIT;
443         pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
444                            (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
445
446         /* Clear the FIFO by toggling  FICLR to 1 and back to 0 */
447         pch_spi_clear_fifo(spi->master);
448 }
449
450 /**
451  * pch_spi_reset() - Clears SPI registers
452  * @master:     Pointer to struct spi_master.
453  */
454 static void pch_spi_reset(struct spi_master *master)
455 {
456         /* write 1 to reset SPI */
457         pch_spi_writereg(master, PCH_SRST, 0x1);
458
459         /* clear reset */
460         pch_spi_writereg(master, PCH_SRST, 0x0);
461 }
462
463 static int pch_spi_setup(struct spi_device *pspi)
464 {
465         /* check bits per word */
466         if (pspi->bits_per_word == 0) {
467                 pspi->bits_per_word = 8;
468                 dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
469         }
470
471         if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
472                 dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
473                 return -EINVAL;
474         }
475
476         /* Check baud rate setting */
477         /* if baud rate of chip is greater than
478            max we can support,return error */
479         if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
480                 pspi->max_speed_hz = PCH_MAX_BAUDRATE;
481
482         dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
483                 (pspi->mode) & (SPI_CPOL | SPI_CPHA));
484
485         return 0;
486 }
487
488 static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
489 {
490
491         struct spi_transfer *transfer;
492         struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
493         int retval;
494         unsigned long flags;
495
496         /* validate spi message and baud rate */
497         if (unlikely(list_empty(&pmsg->transfers) == 1)) {
498                 dev_err(&pspi->dev, "%s list empty\n", __func__);
499                 retval = -EINVAL;
500                 goto err_out;
501         }
502
503         if (unlikely(pspi->max_speed_hz == 0)) {
504                 dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n",
505                         __func__, pspi->max_speed_hz);
506                 retval = -EINVAL;
507                 goto err_out;
508         }
509
510         dev_dbg(&pspi->dev, "%s Transfer List not empty. "
511                 "Transfer Speed is set.\n", __func__);
512
513         spin_lock_irqsave(&data->lock, flags);
514         /* validate Tx/Rx buffers and Transfer length */
515         list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
516                 if (!transfer->tx_buf && !transfer->rx_buf) {
517                         dev_err(&pspi->dev,
518                                 "%s Tx and Rx buffer NULL\n", __func__);
519                         retval = -EINVAL;
520                         goto err_return_spinlock;
521                 }
522
523                 if (!transfer->len) {
524                         dev_err(&pspi->dev, "%s Transfer length invalid\n",
525                                 __func__);
526                         retval = -EINVAL;
527                         goto err_return_spinlock;
528                 }
529
530                 dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
531                         " valid\n", __func__);
532
533                 /* if baud rate has been specified validate the same */
534                 if (transfer->speed_hz > PCH_MAX_BAUDRATE)
535                         transfer->speed_hz = PCH_MAX_BAUDRATE;
536
537                 /* if bits per word has been specified validate the same */
538                 if (transfer->bits_per_word) {
539                         if ((transfer->bits_per_word != 8)
540                             && (transfer->bits_per_word != 16)) {
541                                 retval = -EINVAL;
542                                 dev_err(&pspi->dev,
543                                         "%s Invalid bits per word\n", __func__);
544                                 goto err_return_spinlock;
545                         }
546                 }
547         }
548         spin_unlock_irqrestore(&data->lock, flags);
549
550         /* We won't process any messages if we have been asked to terminate */
551         if (data->status == STATUS_EXITING) {
552                 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
553                 retval = -ESHUTDOWN;
554                 goto err_out;
555         }
556
557         /* If suspended ,return -EINVAL */
558         if (data->board_dat->suspend_sts) {
559                 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
560                 retval = -EINVAL;
561                 goto err_out;
562         }
563
564         /* set status of message */
565         pmsg->actual_length = 0;
566         dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
567
568         pmsg->status = -EINPROGRESS;
569         spin_lock_irqsave(&data->lock, flags);
570         /* add message to queue */
571         list_add_tail(&pmsg->queue, &data->queue);
572         spin_unlock_irqrestore(&data->lock, flags);
573
574         dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
575
576         /* schedule work queue to run */
577         queue_work(data->wk, &data->work);
578         dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
579
580         retval = 0;
581
582 err_out:
583         dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
584         return retval;
585 err_return_spinlock:
586         dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
587         spin_unlock_irqrestore(&data->lock, flags);
588         return retval;
589 }
590
591 static inline void pch_spi_select_chip(struct pch_spi_data *data,
592                                        struct spi_device *pspi)
593 {
594         if (data->current_chip != NULL) {
595                 if (pspi->chip_select != data->n_curnt_chip) {
596                         dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
597                         data->current_chip = NULL;
598                 }
599         }
600
601         data->current_chip = pspi;
602
603         data->n_curnt_chip = data->current_chip->chip_select;
604
605         dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
606         pch_spi_setup_transfer(pspi);
607 }
608
609 static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
610 {
611         int size;
612         u32 n_writes;
613         int j;
614         struct spi_message *pmsg;
615         const u8 *tx_buf;
616         const u16 *tx_sbuf;
617
618         /* set baud rate if needed */
619         if (data->cur_trans->speed_hz) {
620                 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
621                 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
622         }
623
624         /* set bits per word if needed */
625         if (data->cur_trans->bits_per_word &&
626             (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
627                 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
628                 pch_spi_set_bits_per_word(data->master,
629                                           data->cur_trans->bits_per_word);
630                 *bpw = data->cur_trans->bits_per_word;
631         } else {
632                 *bpw = data->current_msg->spi->bits_per_word;
633         }
634
635         /* reset Tx/Rx index */
636         data->tx_index = 0;
637         data->rx_index = 0;
638
639         data->bpw_len = data->cur_trans->len / (*bpw / 8);
640
641         /* find alloc size */
642         size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
643
644         /* allocate memory for pkt_tx_buff & pkt_rx_buffer */
645         data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
646         if (data->pkt_tx_buff != NULL) {
647                 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
648                 if (!data->pkt_rx_buff)
649                         kfree(data->pkt_tx_buff);
650         }
651
652         if (!data->pkt_rx_buff) {
653                 /* flush queue and set status of all transfers to -ENOMEM */
654                 dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
655                 list_for_each_entry(pmsg, data->queue.next, queue) {
656                         pmsg->status = -ENOMEM;
657
658                         if (pmsg->complete != 0)
659                                 pmsg->complete(pmsg->context);
660
661                         /* delete from queue */
662                         list_del_init(&pmsg->queue);
663                 }
664                 return;
665         }
666
667         /* copy Tx Data */
668         if (data->cur_trans->tx_buf != NULL) {
669                 if (*bpw == 8) {
670                         tx_buf = data->cur_trans->tx_buf;
671                         for (j = 0; j < data->bpw_len; j++)
672                                 data->pkt_tx_buff[j] = *tx_buf++;
673                 } else {
674                         tx_sbuf = data->cur_trans->tx_buf;
675                         for (j = 0; j < data->bpw_len; j++)
676                                 data->pkt_tx_buff[j] = *tx_sbuf++;
677                 }
678         }
679
680         /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
681         n_writes = data->bpw_len;
682         if (n_writes > PCH_MAX_FIFO_DEPTH)
683                 n_writes = PCH_MAX_FIFO_DEPTH;
684
685         dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
686                 "0x2 to SSNXCR\n", __func__);
687         pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
688
689         for (j = 0; j < n_writes; j++)
690                 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
691
692         /* update tx_index */
693         data->tx_index = j;
694
695         /* reset transfer complete flag */
696         data->transfer_complete = false;
697         data->transfer_active = true;
698 }
699
700 static void pch_spi_nomore_transfer(struct pch_spi_data *data)
701 {
702         struct spi_message *pmsg;
703         dev_dbg(&data->master->dev, "%s called\n", __func__);
704         /* Invoke complete callback
705          * [To the spi core..indicating end of transfer] */
706         data->current_msg->status = 0;
707
708         if (data->current_msg->complete != 0) {
709                 dev_dbg(&data->master->dev,
710                         "%s:Invoking callback of SPI core\n", __func__);
711                 data->current_msg->complete(data->current_msg->context);
712         }
713
714         /* update status in global variable */
715         data->bcurrent_msg_processing = false;
716
717         dev_dbg(&data->master->dev,
718                 "%s:data->bcurrent_msg_processing = false\n", __func__);
719
720         data->current_msg = NULL;
721         data->cur_trans = NULL;
722
723         /* check if we have items in list and not suspending
724          * return 1 if list empty */
725         if ((list_empty(&data->queue) == 0) &&
726             (!data->board_dat->suspend_sts) &&
727             (data->status != STATUS_EXITING)) {
728                 /* We have some more work to do (either there is more tranint
729                  * bpw;sfer requests in the current message or there are
730                  *more messages)
731                  */
732                 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
733                 queue_work(data->wk, &data->work);
734         } else if (data->board_dat->suspend_sts ||
735                    data->status == STATUS_EXITING) {
736                 dev_dbg(&data->master->dev,
737                         "%s suspend/remove initiated, flushing queue\n",
738                         __func__);
739                 list_for_each_entry(pmsg, data->queue.next, queue) {
740                         pmsg->status = -EIO;
741
742                         if (pmsg->complete)
743                                 pmsg->complete(pmsg->context);
744
745                         /* delete from queue */
746                         list_del_init(&pmsg->queue);
747                 }
748         }
749 }
750
751 static void pch_spi_set_ir(struct pch_spi_data *data)
752 {
753         /* enable interrupts, set threshold, enable SPI */
754         if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
755                 /* set receive threshold to PCH_RX_THOLD */
756                 pch_spi_setclr_reg(data->master, PCH_SPCR,
757                                    PCH_RX_THOLD << SPCR_RFIC_FIELD |
758                                    SPCR_FIE_BIT | SPCR_RFIE_BIT |
759                                    SPCR_ORIE_BIT | SPCR_SPE_BIT,
760                                    MASK_RFIC_SPCR_BITS | PCH_ALL);
761         else
762                 /* set receive threshold to maximum */
763                 pch_spi_setclr_reg(data->master, PCH_SPCR,
764                                    PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
765                                    SPCR_FIE_BIT | SPCR_ORIE_BIT |
766                                    SPCR_SPE_BIT,
767                                    MASK_RFIC_SPCR_BITS | PCH_ALL);
768
769         /* Wait until the transfer completes; go to sleep after
770                                  initiating the transfer. */
771         dev_dbg(&data->master->dev,
772                 "%s:waiting for transfer to get over\n", __func__);
773
774         wait_event_interruptible(data->wait, data->transfer_complete);
775
776         /* clear all interrupts */
777         pch_spi_writereg(data->master, PCH_SPSR,
778                          pch_spi_readreg(data->master, PCH_SPSR));
779         /* Disable interrupts and SPI transfer */
780         pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
781         /* clear FIFO */
782         pch_spi_clear_fifo(data->master);
783 }
784
785 static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
786 {
787         int j;
788         u8 *rx_buf;
789         u16 *rx_sbuf;
790
791         /* copy Rx Data */
792         if (!data->cur_trans->rx_buf)
793                 return;
794
795         if (bpw == 8) {
796                 rx_buf = data->cur_trans->rx_buf;
797                 for (j = 0; j < data->bpw_len; j++)
798                         *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
799         } else {
800                 rx_sbuf = data->cur_trans->rx_buf;
801                 for (j = 0; j < data->bpw_len; j++)
802                         *rx_sbuf++ = data->pkt_rx_buff[j];
803         }
804 }
805
806 static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
807 {
808         int j;
809         u8 *rx_buf;
810         u16 *rx_sbuf;
811         const u8 *rx_dma_buf;
812         const u16 *rx_dma_sbuf;
813
814         /* copy Rx Data */
815         if (!data->cur_trans->rx_buf)
816                 return;
817
818         if (bpw == 8) {
819                 rx_buf = data->cur_trans->rx_buf;
820                 rx_dma_buf = data->dma.rx_buf_virt;
821                 for (j = 0; j < data->bpw_len; j++)
822                         *rx_buf++ = *rx_dma_buf++ & 0xFF;
823         } else {
824                 rx_sbuf = data->cur_trans->rx_buf;
825                 rx_dma_sbuf = data->dma.rx_buf_virt;
826                 for (j = 0; j < data->bpw_len; j++)
827                         *rx_sbuf++ = *rx_dma_sbuf++;
828         }
829 }
830
831 static int pch_spi_start_transfer(struct pch_spi_data *data)
832 {
833         struct pch_spi_dma_ctrl *dma;
834         unsigned long flags;
835         int rtn;
836
837         dma = &data->dma;
838
839         spin_lock_irqsave(&data->lock, flags);
840
841         /* disable interrupts, SPI set enable */
842         pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
843
844         spin_unlock_irqrestore(&data->lock, flags);
845
846         /* Wait until the transfer completes; go to sleep after
847                                  initiating the transfer. */
848         dev_dbg(&data->master->dev,
849                 "%s:waiting for transfer to get over\n", __func__);
850         rtn = wait_event_interruptible_timeout(data->wait,
851                                                data->transfer_complete,
852                                                msecs_to_jiffies(2 * HZ));
853
854         dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
855                             DMA_FROM_DEVICE);
856
857         dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
858                             DMA_FROM_DEVICE);
859         memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
860
861         async_tx_ack(dma->desc_rx);
862         async_tx_ack(dma->desc_tx);
863         kfree(dma->sg_tx_p);
864         kfree(dma->sg_rx_p);
865
866         spin_lock_irqsave(&data->lock, flags);
867
868         /* clear fifo threshold, disable interrupts, disable SPI transfer */
869         pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
870                            MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
871                            SPCR_SPE_BIT);
872         /* clear all interrupts */
873         pch_spi_writereg(data->master, PCH_SPSR,
874                          pch_spi_readreg(data->master, PCH_SPSR));
875         /* clear FIFO */
876         pch_spi_clear_fifo(data->master);
877
878         spin_unlock_irqrestore(&data->lock, flags);
879
880         return rtn;
881 }
882
883 static void pch_dma_rx_complete(void *arg)
884 {
885         struct pch_spi_data *data = arg;
886
887         /* transfer is completed;inform pch_spi_process_messages_dma */
888         data->transfer_complete = true;
889         wake_up_interruptible(&data->wait);
890 }
891
892 static bool pch_spi_filter(struct dma_chan *chan, void *slave)
893 {
894         struct pch_dma_slave *param = slave;
895
896         if ((chan->chan_id == param->chan_id) &&
897             (param->dma_dev == chan->device->dev)) {
898                 chan->private = param;
899                 return true;
900         } else {
901                 return false;
902         }
903 }
904
905 static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
906 {
907         dma_cap_mask_t mask;
908         struct dma_chan *chan;
909         struct pci_dev *dma_dev;
910         struct pch_dma_slave *param;
911         struct pch_spi_dma_ctrl *dma;
912         unsigned int width;
913
914         if (bpw == 8)
915                 width = PCH_DMA_WIDTH_1_BYTE;
916         else
917                 width = PCH_DMA_WIDTH_2_BYTES;
918
919         dma = &data->dma;
920         dma_cap_zero(mask);
921         dma_cap_set(DMA_SLAVE, mask);
922
923         /* Get DMA's dev information */
924         dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
925
926         /* Set Tx DMA */
927         param = &dma->param_tx;
928         param->dma_dev = &dma_dev->dev;
929         param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
930         param->tx_reg = data->io_base_addr + PCH_SPDWR;
931         param->width = width;
932         chan = dma_request_channel(mask, pch_spi_filter, param);
933         if (!chan) {
934                 dev_err(&data->master->dev,
935                         "ERROR: dma_request_channel FAILS(Tx)\n");
936                 data->use_dma = 0;
937                 return;
938         }
939         dma->chan_tx = chan;
940
941         /* Set Rx DMA */
942         param = &dma->param_rx;
943         param->dma_dev = &dma_dev->dev;
944         param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
945         param->rx_reg = data->io_base_addr + PCH_SPDRR;
946         param->width = width;
947         chan = dma_request_channel(mask, pch_spi_filter, param);
948         if (!chan) {
949                 dev_err(&data->master->dev,
950                         "ERROR: dma_request_channel FAILS(Rx)\n");
951                 dma_release_channel(dma->chan_tx);
952                 dma->chan_tx = NULL;
953                 data->use_dma = 0;
954                 return;
955         }
956         dma->chan_rx = chan;
957 }
958
959 static void pch_spi_release_dma(struct pch_spi_data *data)
960 {
961         struct pch_spi_dma_ctrl *dma;
962
963         dma = &data->dma;
964         if (dma->chan_tx) {
965                 dma_release_channel(dma->chan_tx);
966                 dma->chan_tx = NULL;
967         }
968         if (dma->chan_rx) {
969                 dma_release_channel(dma->chan_rx);
970                 dma->chan_rx = NULL;
971         }
972         return;
973 }
974
975 static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
976 {
977         const u8 *tx_buf;
978         const u16 *tx_sbuf;
979         u8 *tx_dma_buf;
980         u16 *tx_dma_sbuf;
981         struct scatterlist *sg;
982         struct dma_async_tx_descriptor *desc_tx;
983         struct dma_async_tx_descriptor *desc_rx;
984         int num;
985         int i;
986         int size;
987         int rem;
988         unsigned long flags;
989         struct pch_spi_dma_ctrl *dma;
990
991         dma = &data->dma;
992
993         /* set baud rate if needed */
994         if (data->cur_trans->speed_hz) {
995                 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
996                 spin_lock_irqsave(&data->lock, flags);
997                 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
998                 spin_unlock_irqrestore(&data->lock, flags);
999         }
1000
1001         /* set bits per word if needed */
1002         if (data->cur_trans->bits_per_word &&
1003             (data->current_msg->spi->bits_per_word !=
1004              data->cur_trans->bits_per_word)) {
1005                 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
1006                 spin_lock_irqsave(&data->lock, flags);
1007                 pch_spi_set_bits_per_word(data->master,
1008                                           data->cur_trans->bits_per_word);
1009                 spin_unlock_irqrestore(&data->lock, flags);
1010                 *bpw = data->cur_trans->bits_per_word;
1011         } else {
1012                 *bpw = data->current_msg->spi->bits_per_word;
1013         }
1014         data->bpw_len = data->cur_trans->len / (*bpw / 8);
1015
1016         /* copy Tx Data */
1017         if (data->cur_trans->tx_buf != NULL) {
1018                 if (*bpw == 8) {
1019                         tx_buf = data->cur_trans->tx_buf;
1020                         tx_dma_buf = dma->tx_buf_virt;
1021                         for (i = 0; i < data->bpw_len; i++)
1022                                 *tx_dma_buf++ = *tx_buf++;
1023                 } else {
1024                         tx_sbuf = data->cur_trans->tx_buf;
1025                         tx_dma_sbuf = dma->tx_buf_virt;
1026                         for (i = 0; i < data->bpw_len; i++)
1027                                 *tx_dma_sbuf++ = *tx_sbuf++;
1028                 }
1029         }
1030         if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1031                 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1032                 size = PCH_DMA_TRANS_SIZE;
1033                 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
1034         } else {
1035                 num = 1;
1036                 size = data->bpw_len;
1037                 rem = data->bpw_len;
1038         }
1039         dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
1040                 __func__, num, size, rem);
1041         spin_lock_irqsave(&data->lock, flags);
1042
1043         /* set receive fifo threshold and transmit fifo threshold */
1044         pch_spi_setclr_reg(data->master, PCH_SPCR,
1045                            ((size - 1) << SPCR_RFIC_FIELD) |
1046                            (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1047                            MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1048
1049         spin_unlock_irqrestore(&data->lock, flags);
1050
1051         /* RX */
1052         dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1053         sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
1054         /* offset, length setting */
1055         sg = dma->sg_rx_p;
1056         for (i = 0; i < num; i++, sg++) {
1057                 if (i == (num - 2)) {
1058                         sg->offset = size * i;
1059                         sg->offset = sg->offset * (*bpw / 8);
1060                         sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1061                                     sg->offset);
1062                         sg_dma_len(sg) = rem;
1063                 } else if (i == (num - 1)) {
1064                         sg->offset = size * (i - 1) + rem;
1065                         sg->offset = sg->offset * (*bpw / 8);
1066                         sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1067                                     sg->offset);
1068                         sg_dma_len(sg) = size;
1069                 } else {
1070                         sg->offset = size * i;
1071                         sg->offset = sg->offset * (*bpw / 8);
1072                         sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1073                                     sg->offset);
1074                         sg_dma_len(sg) = size;
1075                 }
1076                 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1077         }
1078         sg = dma->sg_rx_p;
1079         desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
1080                                         num, DMA_FROM_DEVICE,
1081                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1082         if (!desc_rx) {
1083                 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1084                         __func__);
1085                 return;
1086         }
1087         dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1088         desc_rx->callback = pch_dma_rx_complete;
1089         desc_rx->callback_param = data;
1090         dma->nent = num;
1091         dma->desc_rx = desc_rx;
1092
1093         /* TX */
1094         if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1095                 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1096                 size = PCH_DMA_TRANS_SIZE;
1097                 rem = 16;
1098         } else {
1099                 num = 1;
1100                 size = data->bpw_len;
1101                 rem = data->bpw_len;
1102         }
1103
1104         dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1105         sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1106         /* offset, length setting */
1107         sg = dma->sg_tx_p;
1108         for (i = 0; i < num; i++, sg++) {
1109                 if (i == 0) {
1110                         sg->offset = 0;
1111                         sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1112                                     sg->offset);
1113                         sg_dma_len(sg) = rem;
1114                 } else {
1115                         sg->offset = rem + size * (i - 1);
1116                         sg->offset = sg->offset * (*bpw / 8);
1117                         sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1118                                     sg->offset);
1119                         sg_dma_len(sg) = size;
1120                 }
1121                 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1122         }
1123         sg = dma->sg_tx_p;
1124         desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
1125                                         sg, num, DMA_TO_DEVICE,
1126                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1127         if (!desc_tx) {
1128                 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1129                         __func__);
1130                 return;
1131         }
1132         dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1133         desc_tx->callback = NULL;
1134         desc_tx->callback_param = data;
1135         dma->nent = num;
1136         dma->desc_tx = desc_tx;
1137
1138         dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
1139                 "0x2 to SSNXCR\n", __func__);
1140
1141         spin_lock_irqsave(&data->lock, flags);
1142         pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1143         desc_rx->tx_submit(desc_rx);
1144         desc_tx->tx_submit(desc_tx);
1145         spin_unlock_irqrestore(&data->lock, flags);
1146
1147         /* reset transfer complete flag */
1148         data->transfer_complete = false;
1149 }
1150
1151 static void pch_spi_process_messages(struct work_struct *pwork)
1152 {
1153         struct spi_message *pmsg;
1154         struct pch_spi_data *data;
1155         int bpw;
1156
1157         data = container_of(pwork, struct pch_spi_data, work);
1158         dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1159
1160         spin_lock(&data->lock);
1161         /* check if suspend has been initiated;if yes flush queue */
1162         if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1163                 dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
1164                         "flushing queue\n", __func__);
1165                 list_for_each_entry(pmsg, data->queue.next, queue) {
1166                         pmsg->status = -EIO;
1167
1168                         if (pmsg->complete != 0) {
1169                                 spin_unlock(&data->lock);
1170                                 pmsg->complete(pmsg->context);
1171                                 spin_lock(&data->lock);
1172                         }
1173
1174                         /* delete from queue */
1175                         list_del_init(&pmsg->queue);
1176                 }
1177
1178                 spin_unlock(&data->lock);
1179                 return;
1180         }
1181
1182         data->bcurrent_msg_processing = true;
1183         dev_dbg(&data->master->dev,
1184                 "%s Set data->bcurrent_msg_processing= true\n", __func__);
1185
1186         /* Get the message from the queue and delete it from there. */
1187         data->current_msg = list_entry(data->queue.next, struct spi_message,
1188                                         queue);
1189
1190         list_del_init(&data->current_msg->queue);
1191
1192         data->current_msg->status = 0;
1193
1194         pch_spi_select_chip(data, data->current_msg->spi);
1195
1196         spin_unlock(&data->lock);
1197
1198         if (data->use_dma)
1199                 pch_spi_request_dma(data,
1200                                     data->current_msg->spi->bits_per_word);
1201         pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1202         do {
1203                 /* If we are already processing a message get the next
1204                 transfer structure from the message otherwise retrieve
1205                 the 1st transfer request from the message. */
1206                 spin_lock(&data->lock);
1207                 if (data->cur_trans == NULL) {
1208                         data->cur_trans =
1209                                 list_entry(data->current_msg->transfers.next,
1210                                            struct spi_transfer, transfer_list);
1211                         dev_dbg(&data->master->dev, "%s "
1212                                 ":Getting 1st transfer message\n", __func__);
1213                 } else {
1214                         data->cur_trans =
1215                                 list_entry(data->cur_trans->transfer_list.next,
1216                                            struct spi_transfer, transfer_list);
1217                         dev_dbg(&data->master->dev, "%s "
1218                                 ":Getting next transfer message\n", __func__);
1219                 }
1220                 spin_unlock(&data->lock);
1221
1222                 if (data->use_dma) {
1223                         pch_spi_handle_dma(data, &bpw);
1224                         if (!pch_spi_start_transfer(data))
1225                                 goto out;
1226                         pch_spi_copy_rx_data_for_dma(data, bpw);
1227                 } else {
1228                         pch_spi_set_tx(data, &bpw);
1229                         pch_spi_set_ir(data);
1230                         pch_spi_copy_rx_data(data, bpw);
1231                         kfree(data->pkt_rx_buff);
1232                         data->pkt_rx_buff = NULL;
1233                         kfree(data->pkt_tx_buff);
1234                         data->pkt_tx_buff = NULL;
1235                 }
1236                 /* increment message count */
1237                 data->current_msg->actual_length += data->cur_trans->len;
1238
1239                 dev_dbg(&data->master->dev,
1240                         "%s:data->current_msg->actual_length=%d\n",
1241                         __func__, data->current_msg->actual_length);
1242
1243                 /* check for delay */
1244                 if (data->cur_trans->delay_usecs) {
1245                         dev_dbg(&data->master->dev, "%s:"
1246                                 "delay in usec=%d\n", __func__,
1247                                 data->cur_trans->delay_usecs);
1248                         udelay(data->cur_trans->delay_usecs);
1249                 }
1250
1251                 spin_lock(&data->lock);
1252
1253                 /* No more transfer in this message. */
1254                 if ((data->cur_trans->transfer_list.next) ==
1255                     &(data->current_msg->transfers)) {
1256                         pch_spi_nomore_transfer(data);
1257                 }
1258
1259                 spin_unlock(&data->lock);
1260
1261         } while (data->cur_trans != NULL);
1262
1263 out:
1264         pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1265         if (data->use_dma)
1266                 pch_spi_release_dma(data);
1267 }
1268
1269 static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1270                                    struct pch_spi_data *data)
1271 {
1272         dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1273
1274         /* free workqueue */
1275         if (data->wk != NULL) {
1276                 destroy_workqueue(data->wk);
1277                 data->wk = NULL;
1278                 dev_dbg(&board_dat->pdev->dev,
1279                         "%s destroy_workqueue invoked successfully\n",
1280                         __func__);
1281         }
1282 }
1283
1284 static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1285                                  struct pch_spi_data *data)
1286 {
1287         int retval = 0;
1288
1289         dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1290
1291         /* create workqueue */
1292         data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
1293         if (!data->wk) {
1294                 dev_err(&board_dat->pdev->dev,
1295                         "%s create_singlet hread_workqueue failed\n", __func__);
1296                 retval = -EBUSY;
1297                 goto err_return;
1298         }
1299
1300         /* reset PCH SPI h/w */
1301         pch_spi_reset(data->master);
1302         dev_dbg(&board_dat->pdev->dev,
1303                 "%s pch_spi_reset invoked successfully\n", __func__);
1304
1305         dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1306
1307 err_return:
1308         if (retval != 0) {
1309                 dev_err(&board_dat->pdev->dev,
1310                         "%s FAIL:invoking pch_spi_free_resources\n", __func__);
1311                 pch_spi_free_resources(board_dat, data);
1312         }
1313
1314         dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
1315
1316         return retval;
1317 }
1318
1319 static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1320                              struct pch_spi_data *data)
1321 {
1322         struct pch_spi_dma_ctrl *dma;
1323
1324         dma = &data->dma;
1325         if (dma->tx_buf_dma)
1326                 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1327                                   dma->tx_buf_virt, dma->tx_buf_dma);
1328         if (dma->rx_buf_dma)
1329                 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1330                                   dma->rx_buf_virt, dma->rx_buf_dma);
1331         return;
1332 }
1333
1334 static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1335                               struct pch_spi_data *data)
1336 {
1337         struct pch_spi_dma_ctrl *dma;
1338
1339         dma = &data->dma;
1340         /* Get Consistent memory for Tx DMA */
1341         dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1342                                 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1343         /* Get Consistent memory for Rx DMA */
1344         dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1345                                 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1346 }
1347
1348 static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
1349 {
1350         int ret;
1351         struct spi_master *master;
1352         struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1353         struct pch_spi_data *data;
1354
1355         dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1356
1357         master = spi_alloc_master(&board_dat->pdev->dev,
1358                                   sizeof(struct pch_spi_data));
1359         if (!master) {
1360                 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1361                         plat_dev->id);
1362                 return -ENOMEM;
1363         }
1364
1365         data = spi_master_get_devdata(master);
1366         data->master = master;
1367
1368         platform_set_drvdata(plat_dev, data);
1369
1370         /* baseaddress + address offset) */
1371         data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1372                                          PCH_ADDRESS_SIZE * plat_dev->id;
1373         data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
1374                                          PCH_ADDRESS_SIZE * plat_dev->id;
1375         if (!data->io_remap_addr) {
1376                 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1377                 ret = -ENOMEM;
1378                 goto err_pci_iomap;
1379         }
1380
1381         dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1382                 plat_dev->id, data->io_remap_addr);
1383
1384         /* initialize members of SPI master */
1385         master->bus_num = -1;
1386         master->num_chipselect = PCH_MAX_CS;
1387         master->setup = pch_spi_setup;
1388         master->transfer = pch_spi_transfer;
1389
1390         data->board_dat = board_dat;
1391         data->plat_dev = plat_dev;
1392         data->n_curnt_chip = 255;
1393         data->status = STATUS_RUNNING;
1394         data->ch = plat_dev->id;
1395         data->use_dma = use_dma;
1396
1397         INIT_LIST_HEAD(&data->queue);
1398         spin_lock_init(&data->lock);
1399         INIT_WORK(&data->work, pch_spi_process_messages);
1400         init_waitqueue_head(&data->wait);
1401
1402         ret = pch_spi_get_resources(board_dat, data);
1403         if (ret) {
1404                 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1405                 goto err_spi_get_resources;
1406         }
1407
1408         ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1409                           IRQF_SHARED, KBUILD_MODNAME, data);
1410         if (ret) {
1411                 dev_err(&plat_dev->dev,
1412                         "%s request_irq failed\n", __func__);
1413                 goto err_request_irq;
1414         }
1415         data->irq_reg_sts = true;
1416
1417         pch_spi_set_master_mode(master);
1418
1419         ret = spi_register_master(master);
1420         if (ret != 0) {
1421                 dev_err(&plat_dev->dev,
1422                         "%s spi_register_master FAILED\n", __func__);
1423                 goto err_spi_register_master;
1424         }
1425
1426         if (use_dma) {
1427                 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1428                 pch_alloc_dma_buf(board_dat, data);
1429         }
1430
1431         return 0;
1432
1433 err_spi_register_master:
1434         free_irq(board_dat->pdev->irq, board_dat);
1435 err_request_irq:
1436         pch_spi_free_resources(board_dat, data);
1437 err_spi_get_resources:
1438         pci_iounmap(board_dat->pdev, data->io_remap_addr);
1439 err_pci_iomap:
1440         spi_master_put(master);
1441
1442         return ret;
1443 }
1444
1445 static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
1446 {
1447         struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1448         struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1449         int count;
1450         unsigned long flags;
1451
1452         dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1453                 __func__, plat_dev->id, board_dat->pdev->irq);
1454
1455         if (use_dma)
1456                 pch_free_dma_buf(board_dat, data);
1457
1458         /* check for any pending messages; no action is taken if the queue
1459          * is still full; but at least we tried.  Unload anyway */
1460         count = 500;
1461         spin_lock_irqsave(&data->lock, flags);
1462         data->status = STATUS_EXITING;
1463         while ((list_empty(&data->queue) == 0) && --count) {
1464                 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1465                         __func__);
1466                 spin_unlock_irqrestore(&data->lock, flags);
1467                 msleep(PCH_SLEEP_TIME);
1468                 spin_lock_irqsave(&data->lock, flags);
1469         }
1470         spin_unlock_irqrestore(&data->lock, flags);
1471
1472         pch_spi_free_resources(board_dat, data);
1473         /* disable interrupts & free IRQ */
1474         if (data->irq_reg_sts) {
1475                 /* disable interrupts */
1476                 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1477                 data->irq_reg_sts = false;
1478                 free_irq(board_dat->pdev->irq, data);
1479         }
1480
1481         pci_iounmap(board_dat->pdev, data->io_remap_addr);
1482         spi_unregister_master(data->master);
1483         spi_master_put(data->master);
1484         platform_set_drvdata(plat_dev, NULL);
1485
1486         return 0;
1487 }
1488 #ifdef CONFIG_PM
1489 static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1490                               pm_message_t state)
1491 {
1492         u8 count;
1493         struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1494         struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1495
1496         dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1497
1498         if (!board_dat) {
1499                 dev_err(&pd_dev->dev,
1500                         "%s pci_get_drvdata returned NULL\n", __func__);
1501                 return -EFAULT;
1502         }
1503
1504         /* check if the current message is processed:
1505            Only after thats done the transfer will be suspended */
1506         count = 255;
1507         while ((--count) > 0) {
1508                 if (!(data->bcurrent_msg_processing))
1509                         break;
1510                 msleep(PCH_SLEEP_TIME);
1511         }
1512
1513         /* Free IRQ */
1514         if (data->irq_reg_sts) {
1515                 /* disable all interrupts */
1516                 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1517                 pch_spi_reset(data->master);
1518                 free_irq(board_dat->pdev->irq, data);
1519
1520                 data->irq_reg_sts = false;
1521                 dev_dbg(&pd_dev->dev,
1522                         "%s free_irq invoked successfully.\n", __func__);
1523         }
1524
1525         return 0;
1526 }
1527
1528 static int pch_spi_pd_resume(struct platform_device *pd_dev)
1529 {
1530         struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1531         struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1532         int retval;
1533
1534         if (!board_dat) {
1535                 dev_err(&pd_dev->dev,
1536                         "%s pci_get_drvdata returned NULL\n", __func__);
1537                 return -EFAULT;
1538         }
1539
1540         if (!data->irq_reg_sts) {
1541                 /* register IRQ */
1542                 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1543                                      IRQF_SHARED, KBUILD_MODNAME, data);
1544                 if (retval < 0) {
1545                         dev_err(&pd_dev->dev,
1546                                 "%s request_irq failed\n", __func__);
1547                         return retval;
1548                 }
1549
1550                 /* reset PCH SPI h/w */
1551                 pch_spi_reset(data->master);
1552                 pch_spi_set_master_mode(data->master);
1553                 data->irq_reg_sts = true;
1554         }
1555         return 0;
1556 }
1557 #else
1558 #define pch_spi_pd_suspend NULL
1559 #define pch_spi_pd_resume NULL
1560 #endif
1561
1562 static struct platform_driver pch_spi_pd_driver = {
1563         .driver = {
1564                 .name = "pch-spi",
1565                 .owner = THIS_MODULE,
1566         },
1567         .probe = pch_spi_pd_probe,
1568         .remove = __devexit_p(pch_spi_pd_remove),
1569         .suspend = pch_spi_pd_suspend,
1570         .resume = pch_spi_pd_resume
1571 };
1572
1573 static int __devinit pch_spi_probe(struct pci_dev *pdev,
1574                                    const struct pci_device_id *id)
1575 {
1576         struct pch_spi_board_data *board_dat;
1577         struct platform_device *pd_dev = NULL;
1578         int retval;
1579         int i;
1580         struct pch_pd_dev_save *pd_dev_save;
1581
1582         pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
1583         if (!pd_dev_save) {
1584                 dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
1585                 return -ENOMEM;
1586         }
1587
1588         board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1589         if (!board_dat) {
1590                 dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
1591                 retval = -ENOMEM;
1592                 goto err_no_mem;
1593         }
1594
1595         retval = pci_request_regions(pdev, KBUILD_MODNAME);
1596         if (retval) {
1597                 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1598                 goto pci_request_regions;
1599         }
1600
1601         board_dat->pdev = pdev;
1602         board_dat->num = id->driver_data;
1603         pd_dev_save->num = id->driver_data;
1604         pd_dev_save->board_dat = board_dat;
1605
1606         retval = pci_enable_device(pdev);
1607         if (retval) {
1608                 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1609                 goto pci_enable_device;
1610         }
1611
1612         for (i = 0; i < board_dat->num; i++) {
1613                 pd_dev = platform_device_alloc("pch-spi", i);
1614                 if (!pd_dev) {
1615                         dev_err(&pdev->dev, "platform_device_alloc failed\n");
1616                         goto err_platform_device;
1617                 }
1618                 pd_dev_save->pd_save[i] = pd_dev;
1619                 pd_dev->dev.parent = &pdev->dev;
1620
1621                 retval = platform_device_add_data(pd_dev, board_dat,
1622                                                   sizeof(*board_dat));
1623                 if (retval) {
1624                         dev_err(&pdev->dev,
1625                                 "platform_device_add_data failed\n");
1626                         platform_device_put(pd_dev);
1627                         goto err_platform_device;
1628                 }
1629
1630                 retval = platform_device_add(pd_dev);
1631                 if (retval) {
1632                         dev_err(&pdev->dev, "platform_device_add failed\n");
1633                         platform_device_put(pd_dev);
1634                         goto err_platform_device;
1635                 }
1636         }
1637
1638         pci_set_drvdata(pdev, pd_dev_save);
1639
1640         return 0;
1641
1642 err_platform_device:
1643         pci_disable_device(pdev);
1644 pci_enable_device:
1645         pci_release_regions(pdev);
1646 pci_request_regions:
1647         kfree(board_dat);
1648 err_no_mem:
1649         kfree(pd_dev_save);
1650
1651         return retval;
1652 }
1653
1654 static void __devexit pch_spi_remove(struct pci_dev *pdev)
1655 {
1656         int i;
1657         struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1658
1659         dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1660
1661         for (i = 0; i < pd_dev_save->num; i++)
1662                 platform_device_unregister(pd_dev_save->pd_save[i]);
1663
1664         pci_disable_device(pdev);
1665         pci_release_regions(pdev);
1666         kfree(pd_dev_save->board_dat);
1667         kfree(pd_dev_save);
1668 }
1669
1670 #ifdef CONFIG_PM
1671 static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1672 {
1673         int retval;
1674         struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1675
1676         dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1677
1678         pd_dev_save->board_dat->suspend_sts = true;
1679
1680         /* save config space */
1681         retval = pci_save_state(pdev);
1682         if (retval == 0) {
1683                 pci_enable_wake(pdev, PCI_D3hot, 0);
1684                 pci_disable_device(pdev);
1685                 pci_set_power_state(pdev, PCI_D3hot);
1686         } else {
1687                 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1688         }
1689
1690         return retval;
1691 }
1692
1693 static int pch_spi_resume(struct pci_dev *pdev)
1694 {
1695         int retval;
1696         struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1697         dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1698
1699         pci_set_power_state(pdev, PCI_D0);
1700         pci_restore_state(pdev);
1701
1702         retval = pci_enable_device(pdev);
1703         if (retval < 0) {
1704                 dev_err(&pdev->dev,
1705                         "%s pci_enable_device failed\n", __func__);
1706         } else {
1707                 pci_enable_wake(pdev, PCI_D3hot, 0);
1708
1709                 /* set suspend status to false */
1710                 pd_dev_save->board_dat->suspend_sts = false;
1711         }
1712
1713         return retval;
1714 }
1715 #else
1716 #define pch_spi_suspend NULL
1717 #define pch_spi_resume NULL
1718
1719 #endif
1720
1721 static struct pci_driver pch_spi_pcidev_driver = {
1722         .name = "pch_spi",
1723         .id_table = pch_spi_pcidev_id,
1724         .probe = pch_spi_probe,
1725         .remove = pch_spi_remove,
1726         .suspend = pch_spi_suspend,
1727         .resume = pch_spi_resume,
1728 };
1729
1730 static int __init pch_spi_init(void)
1731 {
1732         int ret;
1733         ret = platform_driver_register(&pch_spi_pd_driver);
1734         if (ret)
1735                 return ret;
1736
1737         ret = pci_register_driver(&pch_spi_pcidev_driver);
1738         if (ret)
1739                 return ret;
1740
1741         return 0;
1742 }
1743 module_init(pch_spi_init);
1744
1745 static void __exit pch_spi_exit(void)
1746 {
1747         pci_unregister_driver(&pch_spi_pcidev_driver);
1748         platform_driver_unregister(&pch_spi_pd_driver);
1749 }
1750 module_exit(pch_spi_exit);
1751
1752 module_param(use_dma, int, 0644);
1753 MODULE_PARM_DESC(use_dma,
1754                  "to use DMA for data transfers pass 1 else 0; default 1");
1755
1756 MODULE_LICENSE("GPL");
1757 MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");