2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
35 #include <asm/spu_priv1.h>
38 const struct spu_management_ops *spu_management_ops;
39 EXPORT_SYMBOL_GPL(spu_management_ops);
41 const struct spu_priv1_ops *spu_priv1_ops;
43 static struct list_head spu_list[MAX_NUMNODES];
44 static LIST_HEAD(spu_full_list);
45 static DEFINE_MUTEX(spu_mutex);
46 static DEFINE_SPINLOCK(spu_list_lock);
48 EXPORT_SYMBOL_GPL(spu_priv1_ops);
50 void spu_invalidate_slbs(struct spu *spu)
52 struct spu_priv2 __iomem *priv2 = spu->priv2;
54 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
55 out_be64(&priv2->slb_invalidate_all_W, 0UL);
57 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
59 /* This is called by the MM core when a segment size is changed, to
60 * request a flush of all the SPEs using a given mm
62 void spu_flush_all_slbs(struct mm_struct *mm)
67 spin_lock_irqsave(&spu_list_lock, flags);
68 list_for_each_entry(spu, &spu_full_list, full_list) {
70 spu_invalidate_slbs(spu);
72 spin_unlock_irqrestore(&spu_list_lock, flags);
75 /* The hack below stinks... try to do something better one of
76 * these days... Does it even work properly with NR_CPUS == 1 ?
78 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
80 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
82 /* Global TLBIE broadcast required with SPEs. */
83 __cpus_setall(&mm->cpu_vm_mask, nr);
86 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
90 spin_lock_irqsave(&spu_list_lock, flags);
92 spin_unlock_irqrestore(&spu_list_lock, flags);
94 mm_needs_global_tlbie(mm);
96 EXPORT_SYMBOL_GPL(spu_associate_mm);
98 static int __spu_trap_invalid_dma(struct spu *spu)
100 pr_debug("%s\n", __FUNCTION__);
101 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
105 static int __spu_trap_dma_align(struct spu *spu)
107 pr_debug("%s\n", __FUNCTION__);
108 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
112 static int __spu_trap_error(struct spu *spu)
114 pr_debug("%s\n", __FUNCTION__);
115 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
119 static void spu_restart_dma(struct spu *spu)
121 struct spu_priv2 __iomem *priv2 = spu->priv2;
123 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
124 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
127 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
129 struct spu_priv2 __iomem *priv2 = spu->priv2;
130 struct mm_struct *mm = spu->mm;
134 pr_debug("%s\n", __FUNCTION__);
136 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
137 /* SLBs are pre-loaded for context switch, so
138 * we should never get here!
140 printk("%s: invalid access during switch!\n", __func__);
143 esid = (ea & ESID_MASK) | SLB_ESID_V;
145 switch(REGION_ID(ea)) {
147 #ifdef CONFIG_PPC_MM_SLICES
148 psize = get_slice_psize(mm, ea);
150 psize = mm->context.user_psize;
152 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
155 case VMALLOC_REGION_ID:
156 if (ea < VMALLOC_END)
157 psize = mmu_vmalloc_psize;
159 psize = mmu_io_psize;
160 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
163 case KERNEL_REGION_ID:
164 psize = mmu_linear_psize;
165 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
169 /* Future: support kernel segments so that drivers
172 pr_debug("invalid region access at %016lx\n", ea);
175 llp = mmu_psize_defs[psize].sllp;
177 out_be64(&priv2->slb_index_W, spu->slb_replace);
178 out_be64(&priv2->slb_vsid_RW, vsid | llp);
179 out_be64(&priv2->slb_esid_RW, esid);
182 if (spu->slb_replace >= 8)
183 spu->slb_replace = 0;
185 spu_restart_dma(spu);
190 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
191 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
193 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
195 /* Handle kernel space hash faults immediately.
196 User hash faults need to be deferred to process context. */
197 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
198 && REGION_ID(ea) != USER_REGION_ID
199 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
200 spu_restart_dma(spu);
204 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
205 printk("%s: invalid access during switch!\n", __func__);
212 spu->stop_callback(spu);
217 spu_irq_class_0(int irq, void *data)
220 unsigned long stat, mask;
224 mask = spu_int_mask_get(spu, 0);
225 stat = spu_int_stat_get(spu, 0);
228 spin_lock(&spu->register_lock);
229 spu->class_0_pending |= stat;
230 spin_unlock(&spu->register_lock);
232 spu->stop_callback(spu);
234 spu_int_stat_clear(spu, 0, stat);
240 spu_irq_class_0_bottom(struct spu *spu)
245 spin_lock_irqsave(&spu->register_lock, flags);
246 stat = spu->class_0_pending;
247 spu->class_0_pending = 0;
249 if (stat & 1) /* invalid DMA alignment */
250 __spu_trap_dma_align(spu);
252 if (stat & 2) /* invalid MFC DMA */
253 __spu_trap_invalid_dma(spu);
255 if (stat & 4) /* error on SPU */
256 __spu_trap_error(spu);
258 spu_int_stat_clear(spu, 0, stat);
259 spin_unlock_irqrestore(&spu->register_lock, flags);
261 return (stat & 0x7) ? -EIO : 0;
263 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
266 spu_irq_class_1(int irq, void *data)
269 unsigned long stat, mask, dar, dsisr;
273 /* atomically read & clear class1 status. */
274 spin_lock(&spu->register_lock);
275 mask = spu_int_mask_get(spu, 1);
276 stat = spu_int_stat_get(spu, 1) & mask;
277 dar = spu_mfc_dar_get(spu);
278 dsisr = spu_mfc_dsisr_get(spu);
279 if (stat & 2) /* mapping fault */
280 spu_mfc_dsisr_set(spu, 0ul);
281 spu_int_stat_clear(spu, 1, stat);
282 spin_unlock(&spu->register_lock);
283 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
286 if (stat & 1) /* segment fault */
287 __spu_trap_data_seg(spu, dar);
289 if (stat & 2) { /* mapping fault */
290 __spu_trap_data_map(spu, dar, dsisr);
293 if (stat & 4) /* ls compare & suspend on get */
296 if (stat & 8) /* ls compare & suspend on put */
299 return stat ? IRQ_HANDLED : IRQ_NONE;
303 spu_irq_class_2(int irq, void *data)
310 spin_lock(&spu->register_lock);
311 stat = spu_int_stat_get(spu, 2);
312 mask = spu_int_mask_get(spu, 2);
313 /* ignore interrupts we're not waiting for */
316 * mailbox interrupts (0x1 and 0x10) are level triggered.
317 * mask them now before acknowledging.
320 spu_int_mask_and(spu, 2, ~(stat & 0x11));
321 /* acknowledge all interrupts before the callbacks */
322 spu_int_stat_clear(spu, 2, stat);
323 spin_unlock(&spu->register_lock);
325 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
327 if (stat & 1) /* PPC core mailbox */
328 spu->ibox_callback(spu);
330 if (stat & 2) /* SPU stop-and-signal */
331 spu->stop_callback(spu);
333 if (stat & 4) /* SPU halted */
334 spu->stop_callback(spu);
336 if (stat & 8) /* DMA tag group complete */
337 spu->mfc_callback(spu);
339 if (stat & 0x10) /* SPU mailbox threshold */
340 spu->wbox_callback(spu);
342 return stat ? IRQ_HANDLED : IRQ_NONE;
345 static int spu_request_irqs(struct spu *spu)
349 if (spu->irqs[0] != NO_IRQ) {
350 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
352 ret = request_irq(spu->irqs[0], spu_irq_class_0,
358 if (spu->irqs[1] != NO_IRQ) {
359 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
361 ret = request_irq(spu->irqs[1], spu_irq_class_1,
367 if (spu->irqs[2] != NO_IRQ) {
368 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
370 ret = request_irq(spu->irqs[2], spu_irq_class_2,
379 if (spu->irqs[1] != NO_IRQ)
380 free_irq(spu->irqs[1], spu);
382 if (spu->irqs[0] != NO_IRQ)
383 free_irq(spu->irqs[0], spu);
388 static void spu_free_irqs(struct spu *spu)
390 if (spu->irqs[0] != NO_IRQ)
391 free_irq(spu->irqs[0], spu);
392 if (spu->irqs[1] != NO_IRQ)
393 free_irq(spu->irqs[1], spu);
394 if (spu->irqs[2] != NO_IRQ)
395 free_irq(spu->irqs[2], spu);
398 static void spu_init_channels(struct spu *spu)
400 static const struct {
404 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
405 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
407 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
408 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
409 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
411 struct spu_priv2 __iomem *priv2;
416 /* initialize all channel data to zero */
417 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
420 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
421 for (count = 0; count < zero_list[i].count; count++)
422 out_be64(&priv2->spu_chnldata_RW, 0);
425 /* initialize channel counts to meaningful values */
426 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
427 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
428 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
432 struct spu *spu_alloc_node(int node)
434 struct spu *spu = NULL;
436 mutex_lock(&spu_mutex);
437 if (!list_empty(&spu_list[node])) {
438 spu = list_entry(spu_list[node].next, struct spu, list);
439 list_del_init(&spu->list);
440 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
442 mutex_unlock(&spu_mutex);
445 spu_init_channels(spu);
448 EXPORT_SYMBOL_GPL(spu_alloc_node);
450 struct spu *spu_alloc(void)
452 struct spu *spu = NULL;
455 for (node = 0; node < MAX_NUMNODES; node++) {
456 spu = spu_alloc_node(node);
464 void spu_free(struct spu *spu)
466 mutex_lock(&spu_mutex);
467 list_add_tail(&spu->list, &spu_list[spu->node]);
468 mutex_unlock(&spu_mutex);
470 EXPORT_SYMBOL_GPL(spu_free);
472 struct sysdev_class spu_sysdev_class = {
476 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
479 mutex_lock(&spu_mutex);
481 list_for_each_entry(spu, &spu_full_list, full_list)
482 sysdev_create_file(&spu->sysdev, attr);
484 mutex_unlock(&spu_mutex);
487 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
489 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
492 mutex_lock(&spu_mutex);
494 list_for_each_entry(spu, &spu_full_list, full_list)
495 sysfs_create_group(&spu->sysdev.kobj, attrs);
497 mutex_unlock(&spu_mutex);
500 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
503 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
506 mutex_lock(&spu_mutex);
508 list_for_each_entry(spu, &spu_full_list, full_list)
509 sysdev_remove_file(&spu->sysdev, attr);
511 mutex_unlock(&spu_mutex);
513 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
515 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
518 mutex_lock(&spu_mutex);
520 list_for_each_entry(spu, &spu_full_list, full_list)
521 sysfs_remove_group(&spu->sysdev.kobj, attrs);
523 mutex_unlock(&spu_mutex);
525 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
527 static int spu_create_sysdev(struct spu *spu)
531 spu->sysdev.id = spu->number;
532 spu->sysdev.cls = &spu_sysdev_class;
533 ret = sysdev_register(&spu->sysdev);
535 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
540 sysfs_add_device_to_node(&spu->sysdev, spu->node);
545 static int __init create_spu(void *data)
553 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
557 spin_lock_init(&spu->register_lock);
558 mutex_lock(&spu_mutex);
559 spu->number = number++;
560 mutex_unlock(&spu_mutex);
562 ret = spu_create_spu(spu, data);
567 spu_mfc_sdr_setup(spu);
568 spu_mfc_sr1_set(spu, 0x33);
569 ret = spu_request_irqs(spu);
573 ret = spu_create_sysdev(spu);
577 mutex_lock(&spu_mutex);
578 spin_lock_irqsave(&spu_list_lock, flags);
579 list_add(&spu->list, &spu_list[spu->node]);
580 list_add(&spu->full_list, &spu_full_list);
581 spin_unlock_irqrestore(&spu_list_lock, flags);
582 mutex_unlock(&spu_mutex);
589 spu_destroy_spu(spu);
596 static int __init init_spu_base(void)
600 for (i = 0; i < MAX_NUMNODES; i++)
601 INIT_LIST_HEAD(&spu_list[i]);
603 if (!spu_management_ops)
606 /* create sysdev class for spus */
607 ret = sysdev_class_register(&spu_sysdev_class);
611 ret = spu_enumerate_spus(create_spu);
614 printk(KERN_WARNING "%s: Error initializing spus\n",
616 goto out_unregister_sysdev_class;
619 xmon_register_spus(&spu_full_list);
623 out_unregister_sysdev_class:
624 sysdev_class_unregister(&spu_sysdev_class);
629 module_init(init_spu_base);
631 MODULE_LICENSE("GPL");
632 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");