2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long),
103 unsigned long tasklet_data)
105 struct dma_device *dma = &device->common;
107 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data);
115 tasklet_disable(&chan->cleanup_task);
118 static void ioat1_reset_part2(struct work_struct *work);
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
122 * @device: the device to be enumerated
124 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 struct ioat_dma_chan *ioat;
130 struct device *dev = &device->pdev->dev;
131 struct dma_device *dma = &device->common;
133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
137 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
139 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
140 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
143 for (i = 0; i < dma->chancnt; i++) {
144 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
148 ioat_init_channel(device, &ioat->base, i,
150 ioat1_cleanup_tasklet,
151 (unsigned long) ioat);
152 ioat->xfercap = xfercap;
153 spin_lock_init(&ioat->desc_lock);
154 INIT_LIST_HEAD(&ioat->free_desc);
155 INIT_LIST_HEAD(&ioat->used_desc);
162 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
164 * @chan: DMA channel handle
167 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
169 void __iomem *reg_base = ioat->base.reg_base;
171 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
172 __func__, ioat->pending);
174 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
177 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
179 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
181 if (ioat->pending > 0) {
182 spin_lock_bh(&ioat->desc_lock);
183 __ioat1_dma_memcpy_issue_pending(ioat);
184 spin_unlock_bh(&ioat->desc_lock);
189 * ioat1_reset_part2 - reinit the channel after a reset
191 static void ioat1_reset_part2(struct work_struct *work)
193 struct ioat_chan_common *chan;
194 struct ioat_dma_chan *ioat;
195 struct ioat_desc_sw *desc;
197 bool start_null = false;
199 chan = container_of(work, struct ioat_chan_common, work.work);
200 ioat = container_of(chan, struct ioat_dma_chan, base);
201 spin_lock_bh(&chan->cleanup_lock);
202 spin_lock_bh(&ioat->desc_lock);
204 *chan->completion = 0;
207 /* count the descriptors waiting */
209 if (ioat->used_desc.prev) {
210 desc = to_ioat_desc(ioat->used_desc.prev);
213 desc = to_ioat_desc(desc->node.next);
214 } while (&desc->node != ioat->used_desc.next);
219 * write the new starting descriptor address
220 * this puts channel engine into ARMED state
222 desc = to_ioat_desc(ioat->used_desc.prev);
223 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
224 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
225 writel(((u64) desc->txd.phys) >> 32,
226 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
228 writeb(IOAT_CHANCMD_START, chan->reg_base
229 + IOAT_CHANCMD_OFFSET(chan->device->version));
232 spin_unlock_bh(&ioat->desc_lock);
233 spin_unlock_bh(&chan->cleanup_lock);
235 dev_err(to_dev(chan),
236 "chan%d reset - %d descs waiting, %d total desc\n",
237 chan_num(chan), dmacount, ioat->desccount);
240 ioat1_dma_start_null_desc(ioat);
244 * ioat1_reset_channel - restart a channel
245 * @ioat: IOAT DMA channel handle
247 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
249 struct ioat_chan_common *chan = &ioat->base;
250 void __iomem *reg_base = chan->reg_base;
251 u32 chansts, chanerr;
253 if (!ioat->used_desc.prev)
256 dev_dbg(to_dev(chan), "%s\n", __func__);
257 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
258 chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS;
260 dev_err(to_dev(chan),
261 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
262 chan_num(chan), chansts, chanerr);
263 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
267 * whack it upside the head with a reset
268 * and wait for things to settle out.
269 * force the pending count to a really big negative
270 * to make sure no one forces an issue_pending
271 * while we're waiting.
274 spin_lock_bh(&ioat->desc_lock);
275 ioat->pending = INT_MIN;
276 writeb(IOAT_CHANCMD_RESET,
277 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
278 spin_unlock_bh(&ioat->desc_lock);
280 /* schedule the 2nd half instead of sleeping a long time */
281 schedule_delayed_work(&chan->work, RESET_DELAY);
285 * ioat1_chan_watchdog - watch for stuck channels
287 static void ioat1_chan_watchdog(struct work_struct *work)
289 struct ioatdma_device *device =
290 container_of(work, struct ioatdma_device, work.work);
291 struct ioat_dma_chan *ioat;
292 struct ioat_chan_common *chan;
296 unsigned long compl_desc_addr_hw;
298 for (i = 0; i < device->common.chancnt; i++) {
299 chan = ioat_chan_by_index(device, i);
300 ioat = container_of(chan, struct ioat_dma_chan, base);
302 if (/* have we started processing anything yet */
303 chan->last_completion
304 /* have we completed any since last watchdog cycle? */
305 && (chan->last_completion == chan->watchdog_completion)
306 /* has TCP stuck on one cookie since last watchdog? */
307 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
308 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
309 /* is there something in the chain to be processed? */
310 /* CB1 chain always has at least the last one processed */
311 && (ioat->used_desc.prev != ioat->used_desc.next)
312 && ioat->pending == 0) {
315 * check CHANSTS register for completed
316 * descriptor address.
317 * if it is different than completion writeback,
319 * and it has changed since the last watchdog
320 * we can assume that channel
321 * is still working correctly
322 * and the problem is in completion writeback.
323 * update completion writeback
324 * with actual CHANSTS value
326 * try resetting the channel
329 /* we need to read the low address first as this
330 * causes the chipset to latch the upper bits
331 * for the subsequent read
333 completion_low = readl(chan->reg_base +
334 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
335 completion = readl(chan->reg_base +
336 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
338 completion |= completion_low;
339 compl_desc_addr_hw = completion &
340 IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
342 if ((compl_desc_addr_hw != 0)
343 && (compl_desc_addr_hw != chan->watchdog_completion)
344 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
345 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
346 *chan->completion = completion;
348 ioat1_reset_channel(ioat);
349 chan->watchdog_completion = 0;
350 chan->last_compl_desc_addr_hw = 0;
353 chan->last_compl_desc_addr_hw = 0;
354 chan->watchdog_completion = chan->last_completion;
357 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
360 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
363 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
365 struct dma_chan *c = tx->chan;
366 struct ioat_dma_chan *ioat = to_ioat_chan(c);
367 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
368 struct ioat_desc_sw *first;
369 struct ioat_desc_sw *chain_tail;
372 spin_lock_bh(&ioat->desc_lock);
373 /* cookie incr and addition to used_list must be atomic */
380 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
382 /* write address into NextDescriptor field of last desc in chain */
383 first = to_ioat_desc(tx->tx_list.next);
384 chain_tail = to_ioat_desc(ioat->used_desc.prev);
385 /* make descriptor updates globally visible before chaining */
387 chain_tail->hw->next = first->txd.phys;
388 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
389 dump_desc_dbg(ioat, chain_tail);
390 dump_desc_dbg(ioat, first);
392 ioat->pending += desc->tx_cnt;
393 if (ioat->pending >= ioat_pending_level)
394 __ioat1_dma_memcpy_issue_pending(ioat);
395 spin_unlock_bh(&ioat->desc_lock);
401 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
402 * @ioat: the channel supplying the memory pool for the descriptors
403 * @flags: allocation flags
405 static struct ioat_desc_sw *
406 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
408 struct ioat_dma_descriptor *desc;
409 struct ioat_desc_sw *desc_sw;
410 struct ioatdma_device *ioatdma_device;
413 ioatdma_device = ioat->base.device;
414 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
418 desc_sw = kzalloc(sizeof(*desc_sw), flags);
419 if (unlikely(!desc_sw)) {
420 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
424 memset(desc, 0, sizeof(*desc));
426 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
427 desc_sw->txd.tx_submit = ioat1_tx_submit;
429 desc_sw->txd.phys = phys;
430 set_desc_id(desc_sw, -1);
435 static int ioat_initial_desc_count = 256;
436 module_param(ioat_initial_desc_count, int, 0644);
437 MODULE_PARM_DESC(ioat_initial_desc_count,
438 "ioat1: initial descriptors per channel (default: 256)");
440 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
441 * @chan: the channel to be filled out
443 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
445 struct ioat_dma_chan *ioat = to_ioat_chan(c);
446 struct ioat_chan_common *chan = &ioat->base;
447 struct ioat_desc_sw *desc;
453 /* have we already been set up? */
454 if (!list_empty(&ioat->free_desc))
455 return ioat->desccount;
457 /* Setup register to interrupt and write completion status on error */
458 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
459 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
460 IOAT_CHANCTRL_ERR_COMPLETION_EN;
461 writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET);
463 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
465 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
466 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
469 /* Allocate descriptors */
470 for (i = 0; i < ioat_initial_desc_count; i++) {
471 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
473 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
476 set_desc_id(desc, i);
477 list_add_tail(&desc->node, &tmp_list);
479 spin_lock_bh(&ioat->desc_lock);
481 list_splice(&tmp_list, &ioat->free_desc);
482 spin_unlock_bh(&ioat->desc_lock);
484 /* allocate a completion writeback area */
485 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
486 chan->completion = pci_pool_alloc(chan->device->completion_pool,
487 GFP_KERNEL, &chan->completion_dma);
488 memset(chan->completion, 0, sizeof(*chan->completion));
489 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
490 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
491 writel(((u64) chan->completion_dma) >> 32,
492 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
494 tasklet_enable(&chan->cleanup_task);
495 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
496 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
497 __func__, ioat->desccount);
498 return ioat->desccount;
502 * ioat1_dma_free_chan_resources - release all the descriptors
503 * @chan: the channel to be cleaned
505 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
507 struct ioat_dma_chan *ioat = to_ioat_chan(c);
508 struct ioat_chan_common *chan = &ioat->base;
509 struct ioatdma_device *ioatdma_device = chan->device;
510 struct ioat_desc_sw *desc, *_desc;
511 int in_use_descs = 0;
513 /* Before freeing channel resources first check
514 * if they have been previously allocated for this channel.
516 if (ioat->desccount == 0)
519 tasklet_disable(&chan->cleanup_task);
522 /* Delay 100ms after reset to allow internal DMA logic to quiesce
523 * before removing DMA descriptor resources.
525 writeb(IOAT_CHANCMD_RESET,
526 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
529 spin_lock_bh(&ioat->desc_lock);
530 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
531 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
532 __func__, desc_id(desc));
533 dump_desc_dbg(ioat, desc);
535 list_del(&desc->node);
536 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
540 list_for_each_entry_safe(desc, _desc,
541 &ioat->free_desc, node) {
542 list_del(&desc->node);
543 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
547 spin_unlock_bh(&ioat->desc_lock);
549 pci_pool_free(ioatdma_device->completion_pool,
551 chan->completion_dma);
553 /* one is ok since we left it on there on purpose */
554 if (in_use_descs > 1)
555 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
558 chan->last_completion = 0;
559 chan->completion_dma = 0;
560 chan->watchdog_completion = 0;
561 chan->last_compl_desc_addr_hw = 0;
562 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
568 * ioat1_dma_get_next_descriptor - return the next available descriptor
569 * @ioat: IOAT DMA channel handle
571 * Gets the next descriptor from the chain, and must be called with the
572 * channel's desc_lock held. Allocates more descriptors if the channel
575 static struct ioat_desc_sw *
576 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
578 struct ioat_desc_sw *new;
580 if (!list_empty(&ioat->free_desc)) {
581 new = to_ioat_desc(ioat->free_desc.next);
582 list_del(&new->node);
584 /* try to get another desc */
585 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
587 dev_err(to_dev(&ioat->base), "alloc failed\n");
591 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
592 __func__, desc_id(new));
597 static struct dma_async_tx_descriptor *
598 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
599 dma_addr_t dma_src, size_t len, unsigned long flags)
601 struct ioat_dma_chan *ioat = to_ioat_chan(c);
602 struct ioat_desc_sw *desc;
605 dma_addr_t src = dma_src;
606 dma_addr_t dest = dma_dest;
607 size_t total_len = len;
608 struct ioat_dma_descriptor *hw = NULL;
611 spin_lock_bh(&ioat->desc_lock);
612 desc = ioat1_dma_get_next_descriptor(ioat);
618 copy = min_t(size_t, len, ioat->xfercap);
626 list_add_tail(&desc->node, &chain);
632 struct ioat_desc_sw *next;
634 async_tx_ack(&desc->txd);
635 next = ioat1_dma_get_next_descriptor(ioat);
636 hw->next = next ? next->txd.phys : 0;
637 dump_desc_dbg(ioat, desc);
644 struct ioat_chan_common *chan = &ioat->base;
646 dev_err(to_dev(chan),
647 "chan%d - get_next_desc failed\n", chan_num(chan));
648 list_splice(&chain, &ioat->free_desc);
649 spin_unlock_bh(&ioat->desc_lock);
652 spin_unlock_bh(&ioat->desc_lock);
654 desc->txd.flags = flags;
655 desc->tx_cnt = tx_cnt;
656 desc->len = total_len;
657 list_splice(&chain, &desc->txd.tx_list);
658 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
659 hw->ctl_f.compl_write = 1;
660 dump_desc_dbg(ioat, desc);
665 static void ioat1_cleanup_tasklet(unsigned long data)
667 struct ioat_dma_chan *chan = (void *)data;
669 writew(IOAT_CHANCTRL_INT_DISABLE,
670 chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
673 static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
674 int direction, enum dma_ctrl_flags flags, bool dst)
676 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
677 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
678 pci_unmap_single(pdev, addr, len, direction);
680 pci_unmap_page(pdev, addr, len, direction);
684 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
685 size_t len, struct ioat_dma_descriptor *hw)
687 struct pci_dev *pdev = chan->device->pdev;
688 size_t offset = len - hw->size;
690 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
691 ioat_unmap(pdev, hw->dst_addr - offset, len,
692 PCI_DMA_FROMDEVICE, flags, 1);
694 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
695 ioat_unmap(pdev, hw->src_addr - offset, len,
696 PCI_DMA_TODEVICE, flags, 0);
699 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
701 unsigned long phys_complete;
704 completion = *chan->completion;
705 phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
707 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
708 (unsigned long long) phys_complete);
710 if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
711 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
712 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
713 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
715 /* TODO do something to salvage the situation */
718 return phys_complete;
722 * ioat1_cleanup - cleanup up finished descriptors
723 * @chan: ioat channel to be cleaned up
725 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
727 struct ioat_chan_common *chan = &ioat->base;
728 unsigned long phys_complete;
729 struct ioat_desc_sw *desc, *_desc;
730 dma_cookie_t cookie = 0;
731 struct dma_async_tx_descriptor *tx;
733 prefetch(chan->completion);
735 if (!spin_trylock_bh(&chan->cleanup_lock))
738 phys_complete = ioat_get_current_completion(chan);
739 if (phys_complete == chan->last_completion) {
740 spin_unlock_bh(&chan->cleanup_lock);
742 * perhaps we're stuck so hard that the watchdog can't go off?
743 * try to catch it after 2 seconds
745 if (time_after(jiffies,
746 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
747 ioat1_chan_watchdog(&(chan->device->work.work));
748 chan->last_completion_time = jiffies;
752 chan->last_completion_time = jiffies;
755 if (!spin_trylock_bh(&ioat->desc_lock)) {
756 spin_unlock_bh(&chan->cleanup_lock);
760 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
761 __func__, phys_complete);
762 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
765 * Incoming DMA requests may use multiple descriptors,
766 * due to exceeding xfercap, perhaps. If so, only the
767 * last one will have a cookie, and require unmapping.
769 dump_desc_dbg(ioat, desc);
772 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
774 tx->callback(tx->callback_param);
779 if (tx->phys != phys_complete) {
781 * a completed entry, but not the last, so clean
782 * up if the client is done with the descriptor
784 if (async_tx_test_ack(tx))
785 list_move_tail(&desc->node, &ioat->free_desc);
790 * last used desc. Do not remove, so we can
791 * append from it, but don't look at it next
796 /* TODO check status bits? */
801 spin_unlock_bh(&ioat->desc_lock);
803 chan->last_completion = phys_complete;
805 chan->completed_cookie = cookie;
807 spin_unlock_bh(&chan->cleanup_lock);
810 static enum dma_status
811 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
812 dma_cookie_t *done, dma_cookie_t *used)
814 struct ioat_dma_chan *ioat = to_ioat_chan(c);
816 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
821 return ioat_is_complete(c, cookie, done, used);
824 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
826 struct ioat_chan_common *chan = &ioat->base;
827 struct ioat_desc_sw *desc;
828 struct ioat_dma_descriptor *hw;
830 spin_lock_bh(&ioat->desc_lock);
832 desc = ioat1_dma_get_next_descriptor(ioat);
835 dev_err(to_dev(chan),
836 "Unable to start null desc - get next desc failed\n");
837 spin_unlock_bh(&ioat->desc_lock);
844 hw->ctl_f.int_en = 1;
845 hw->ctl_f.compl_write = 1;
846 /* set size to non-zero value (channel returns error when size is 0) */
847 hw->size = NULL_DESC_BUFFER_SIZE;
850 async_tx_ack(&desc->txd);
852 list_add_tail(&desc->node, &ioat->used_desc);
853 dump_desc_dbg(ioat, desc);
855 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
856 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
857 writel(((u64) desc->txd.phys) >> 32,
858 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
860 writeb(IOAT_CHANCMD_START, chan->reg_base
861 + IOAT_CHANCMD_OFFSET(chan->device->version));
862 spin_unlock_bh(&ioat->desc_lock);
866 * Perform a IOAT transaction to verify the HW works.
868 #define IOAT_TEST_SIZE 2000
870 static void ioat_dma_test_callback(void *dma_async_param)
872 struct completion *cmp = dma_async_param;
878 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
879 * @device: device to be tested
881 static int ioat_dma_self_test(struct ioatdma_device *device)
886 struct dma_device *dma = &device->common;
887 struct device *dev = &device->pdev->dev;
888 struct dma_chan *dma_chan;
889 struct dma_async_tx_descriptor *tx;
890 dma_addr_t dma_dest, dma_src;
893 struct completion cmp;
897 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
900 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
906 /* Fill in src buffer */
907 for (i = 0; i < IOAT_TEST_SIZE; i++)
910 /* Start copy, using first DMA channel */
911 dma_chan = container_of(dma->channels.next, struct dma_chan,
913 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
914 dev_err(dev, "selftest cannot allocate chan resource\n");
919 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
920 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
921 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
923 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
924 IOAT_TEST_SIZE, flags);
926 dev_err(dev, "Self-test prep failed, disabling\n");
932 init_completion(&cmp);
933 tx->callback = ioat_dma_test_callback;
934 tx->callback_param = &cmp;
935 cookie = tx->tx_submit(tx);
937 dev_err(dev, "Self-test setup failed, disabling\n");
941 dma->device_issue_pending(dma_chan);
943 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
946 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
948 dev_err(dev, "Self-test copy timed out, disabling\n");
952 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
953 dev_err(dev, "Self-test copy failed compare, disabling\n");
959 dma->device_free_chan_resources(dma_chan);
966 static char ioat_interrupt_style[32] = "msix";
967 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
968 sizeof(ioat_interrupt_style), 0644);
969 MODULE_PARM_DESC(ioat_interrupt_style,
970 "set ioat interrupt style: msix (default), "
971 "msix-single-vector, msi, intx)");
974 * ioat_dma_setup_interrupts - setup interrupt handler
975 * @device: ioat device
977 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
979 struct ioat_chan_common *chan;
980 struct pci_dev *pdev = device->pdev;
981 struct device *dev = &pdev->dev;
982 struct msix_entry *msix;
987 if (!strcmp(ioat_interrupt_style, "msix"))
989 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
990 goto msix_single_vector;
991 if (!strcmp(ioat_interrupt_style, "msi"))
993 if (!strcmp(ioat_interrupt_style, "intx"))
995 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
999 /* The number of MSI-X vectors should equal the number of channels */
1000 msixcnt = device->common.chancnt;
1001 for (i = 0; i < msixcnt; i++)
1002 device->msix_entries[i].entry = i;
1004 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1008 goto msix_single_vector;
1010 for (i = 0; i < msixcnt; i++) {
1011 msix = &device->msix_entries[i];
1012 chan = ioat_chan_by_index(device, i);
1013 err = devm_request_irq(dev, msix->vector,
1014 ioat_dma_do_interrupt_msix, 0,
1017 for (j = 0; j < i; j++) {
1018 msix = &device->msix_entries[j];
1019 chan = ioat_chan_by_index(device, j);
1020 devm_free_irq(dev, msix->vector, chan);
1022 goto msix_single_vector;
1025 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1029 msix = &device->msix_entries[0];
1031 err = pci_enable_msix(pdev, device->msix_entries, 1);
1035 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1036 "ioat-msix", device);
1038 pci_disable_msix(pdev);
1044 err = pci_enable_msi(pdev);
1048 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1049 "ioat-msi", device);
1051 pci_disable_msi(pdev);
1057 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1058 IRQF_SHARED, "ioat-intx", device);
1063 if (device->intr_quirk)
1064 device->intr_quirk(device);
1065 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1066 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1070 /* Disable all interrupt generation */
1071 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1072 dev_err(dev, "no usable interrupts\n");
1076 static void ioat_disable_interrupts(struct ioatdma_device *device)
1078 /* Disable all interrupt generation */
1079 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1082 int ioat_probe(struct ioatdma_device *device)
1085 struct dma_device *dma = &device->common;
1086 struct pci_dev *pdev = device->pdev;
1087 struct device *dev = &pdev->dev;
1089 /* DMA coherent memory pool for DMA descriptor allocations */
1090 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1091 sizeof(struct ioat_dma_descriptor),
1093 if (!device->dma_pool) {
1098 device->completion_pool = pci_pool_create("completion_pool", pdev,
1099 sizeof(u64), SMP_CACHE_BYTES,
1102 if (!device->completion_pool) {
1104 goto err_completion_pool;
1107 device->enumerate_channels(device);
1109 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1110 dma->dev = &pdev->dev;
1112 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1113 " %d channels, device version 0x%02x, driver version %s\n",
1114 dma->chancnt, device->version, IOAT_DMA_VERSION);
1116 if (!dma->chancnt) {
1117 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1118 "zero channels detected\n");
1119 goto err_setup_interrupts;
1122 err = ioat_dma_setup_interrupts(device);
1124 goto err_setup_interrupts;
1126 err = ioat_dma_self_test(device);
1133 ioat_disable_interrupts(device);
1134 err_setup_interrupts:
1135 pci_pool_destroy(device->completion_pool);
1136 err_completion_pool:
1137 pci_pool_destroy(device->dma_pool);
1142 int ioat_register(struct ioatdma_device *device)
1144 int err = dma_async_device_register(&device->common);
1147 ioat_disable_interrupts(device);
1148 pci_pool_destroy(device->completion_pool);
1149 pci_pool_destroy(device->dma_pool);
1155 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1156 static void ioat1_intr_quirk(struct ioatdma_device *device)
1158 struct pci_dev *pdev = device->pdev;
1161 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1162 if (pdev->msi_enabled)
1163 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1165 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1166 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1169 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1171 struct pci_dev *pdev = device->pdev;
1172 struct dma_device *dma;
1175 device->intr_quirk = ioat1_intr_quirk;
1176 device->enumerate_channels = ioat1_enumerate_channels;
1177 dma = &device->common;
1178 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1179 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1180 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1181 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1182 dma->device_is_tx_complete = ioat1_dma_is_complete;
1184 err = ioat_probe(device);
1187 ioat_set_tcp_copy_break(4096);
1188 err = ioat_register(device);
1192 device->dca = ioat_dca_init(pdev, device->reg_base);
1194 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
1195 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1200 void ioat_dma_remove(struct ioatdma_device *device)
1202 struct dma_device *dma = &device->common;
1204 if (device->version != IOAT_VER_3_0)
1205 cancel_delayed_work(&device->work);
1207 ioat_disable_interrupts(device);
1209 dma_async_device_unregister(dma);
1211 pci_pool_destroy(device->dma_pool);
1212 pci_pool_destroy(device->completion_pool);
1214 INIT_LIST_HEAD(&dma->channels);