- Update to 2.6.25-rc3.
[linux-flexiantxendom0-3.2.10.git] / arch / powerpc / platforms / cell / spufs / fault.c
index 917eab4..e46d300 100644 (file)
 
 #include "spufs.h"
 
-/*
- * This ought to be kept in sync with the powerpc specific do_page_fault
- * function. Currently, there are a few corner cases that we haven't had
- * to handle fortunately.
+/**
+ * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
+ *
+ * If the context was created with events, we just set the return event.
+ * Otherwise, send an appropriate signal to the process.
  */
-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
-               unsigned long dsisr, unsigned *flt)
+static void spufs_handle_event(struct spu_context *ctx,
+                               unsigned long ea, int type)
 {
-       struct vm_area_struct *vma;
-       unsigned long is_write;
-       int ret;
+       siginfo_t info;
 
-#if 0
-       if (!IS_VALID_EA(ea)) {
-               return -EFAULT;
-       }
-#endif /* XXX */
-       if (mm == NULL) {
-               return -EFAULT;
-       }
-       if (mm->pgd == NULL) {
-               return -EFAULT;
+       if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
+               ctx->event_return |= type;
+               wake_up_all(&ctx->stop_wq);
+               return;
        }
 
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, ea);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= ea)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, ea))
-               goto bad_area;
-good_area:
-       is_write = dsisr & MFC_DSISR_ACCESS_PUT;
-       if (is_write) {
-               if (!(vma->vm_flags & VM_WRITE))
-                       goto bad_area;
-       } else {
-               if (dsisr & MFC_DSISR_ACCESS_DENIED)
-                       goto bad_area;
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-                       goto bad_area;
-       }
-       ret = 0;
-       *flt = handle_mm_fault(mm, vma, ea, is_write);
-       if (unlikely(*flt & VM_FAULT_ERROR)) {
-               if (*flt & VM_FAULT_OOM) {
-                       ret = -ENOMEM;
-                       goto bad_area;
-               } else if (*flt & VM_FAULT_SIGBUS) {
-                       ret = -EFAULT;
-                       goto bad_area;
-               }
-               BUG();
+       memset(&info, 0, sizeof(info));
+
+       switch (type) {
+       case SPE_EVENT_INVALID_DMA:
+               info.si_signo = SIGBUS;
+               info.si_code = BUS_OBJERR;
+               break;
+       case SPE_EVENT_SPE_DATA_STORAGE:
+               info.si_signo = SIGSEGV;
+               info.si_addr = (void __user *)ea;
+               info.si_code = SEGV_ACCERR;
+               ctx->ops->restart_dma(ctx);
+               break;
+       case SPE_EVENT_DMA_ALIGNMENT:
+               info.si_signo = SIGBUS;
+               /* DAR isn't set for an alignment fault :( */
+               info.si_code = BUS_ADRALN;
+               break;
+       case SPE_EVENT_SPE_ERROR:
+               info.si_signo = SIGILL;
+               info.si_addr = (void __user *)(unsigned long)
+                       ctx->ops->npc_read(ctx) - 4;
+               info.si_code = ILL_ILLOPC;
+               break;
        }
-       if (*flt & VM_FAULT_MAJOR)
-               current->maj_flt++;
-       else
-               current->min_flt++;
-       up_read(&mm->mmap_sem);
-       return ret;
 
-bad_area:
-       up_read(&mm->mmap_sem);
-       return -EFAULT;
+       if (info.si_signo)
+               force_sig_info(info.si_signo, &info, current);
 }
 
-static void spufs_handle_dma_error(struct spu_context *ctx,
-                               unsigned long ea, int type)
+int spufs_handle_class0(struct spu_context *ctx)
 {
-       if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
-               ctx->event_return |= type;
-               wake_up_all(&ctx->stop_wq);
-       } else {
-               siginfo_t info;
-               memset(&info, 0, sizeof(info));
-
-               switch (type) {
-               case SPE_EVENT_INVALID_DMA:
-                       info.si_signo = SIGBUS;
-                       info.si_code = BUS_OBJERR;
-                       break;
-               case SPE_EVENT_SPE_DATA_STORAGE:
-                       info.si_signo = SIGBUS;
-                       info.si_addr = (void __user *)ea;
-                       info.si_code = BUS_ADRERR;
-                       break;
-               case SPE_EVENT_DMA_ALIGNMENT:
-                       info.si_signo = SIGBUS;
-                       /* DAR isn't set for an alignment fault :( */
-                       info.si_code = BUS_ADRALN;
-                       break;
-               case SPE_EVENT_SPE_ERROR:
-                       info.si_signo = SIGILL;
-                       info.si_addr = (void __user *)(unsigned long)
-                               ctx->ops->npc_read(ctx) - 4;
-                       info.si_code = ILL_ILLOPC;
-                       break;
-               }
-               if (info.si_signo)
-                       force_sig_info(info.si_signo, &info, current);
-       }
-}
+       unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
 
-void spufs_dma_callback(struct spu *spu, int type)
-{
-       spufs_handle_dma_error(spu->ctx, spu->dar, type);
+       if (likely(!stat))
+               return 0;
+
+       if (stat & CLASS0_DMA_ALIGNMENT_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
+
+       if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
+
+       if (stat & CLASS0_SPU_ERROR_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
+
+       return -EIO;
 }
-EXPORT_SYMBOL_GPL(spufs_dma_callback);
 
 /*
  * bottom half handler for page faults, we can't do this from
@@ -165,16 +119,8 @@ int spufs_handle_class1(struct spu_context *ctx)
         * in time, we can still expect to get the same fault
         * the immediately after the context restore.
         */
-       if (ctx->state == SPU_STATE_RUNNABLE) {
-               ea = ctx->spu->dar;
-               dsisr = ctx->spu->dsisr;
-               ctx->spu->dar= ctx->spu->dsisr = 0;
-       } else {
-               ea = ctx->csa.priv1.mfc_dar_RW;
-               dsisr = ctx->csa.priv1.mfc_dsisr_RW;
-               ctx->csa.priv1.mfc_dar_RW = 0;
-               ctx->csa.priv1.mfc_dsisr_RW = 0;
-       }
+       ea = ctx->csa.dar;
+       dsisr = ctx->csa.dsisr;
 
        if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
                return 0;
@@ -201,7 +147,19 @@ int spufs_handle_class1(struct spu_context *ctx)
        if (ret)
                ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
 
-       spu_acquire(ctx);
+       /*
+        * This is nasty: we need the state_mutex for all the bookkeeping even
+        * if the syscall was interrupted by a signal. ewww.
+        */
+       mutex_lock(&ctx->state_mutex);
+
+       /*
+        * Clear dsisr under ctxt lock after handling the fault, so that
+        * time slicing will not preempt the context while the page fault
+        * handler is running. Context switch code removes mappings.
+        */
+       ctx->csa.dar = ctx->csa.dsisr = 0;
+
        /*
         * If we handled the fault successfully and are in runnable
         * state, restart the DMA.
@@ -222,9 +180,8 @@ int spufs_handle_class1(struct spu_context *ctx)
                if (ctx->spu)
                        ctx->ops->restart_dma(ctx);
        } else
-               spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+               spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
 
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
        return ret;
 }
-EXPORT_SYMBOL_GPL(spufs_handle_class1);