drm/i915: fix user irq miss in BSD ring on g4x
[linux-flexiantxendom0-natty.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index a3cac57..bd087df 100644 (file)
 #include "i915_drv.h"
 #include "i915_drm.h"
 #include "i915_trace.h"
+#include "intel_drv.h"
 
-static void
-render_ring_flush(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               u32     invalidate_domains,
-               u32     flush_domains)
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+       int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+       if (space < 0)
+               space += ring->size;
+       return space;
+}
+
+static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 seqno;
+
+       seqno = dev_priv->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++dev_priv->next_seqno == 0)
+               dev_priv->next_seqno = 1;
+
+       return seqno;
+}
+
+static int
+render_ring_flush(struct intel_ring_buffer *ring,
+                 u32   invalidate_domains,
+                 u32   flush_domains)
+{
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 cmd;
+       int ret;
+
 #if WATCH_EXEC
        DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
                  invalidate_domains, flush_domains);
 #endif
-       u32 cmd;
-       trace_i915_gem_request_flush(dev, ring->next_seqno,
+
+       trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
                                     invalidate_domains, flush_domains);
 
        if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -80,7 +107,7 @@ render_ring_flush(struct drm_device *dev,
                if ((invalidate_domains|flush_domains) &
                    I915_GEM_DOMAIN_RENDER)
                        cmd &= ~MI_NO_WRITE_FLUSH;
-               if (!IS_I965G(dev)) {
+               if (INTEL_INFO(dev)->gen < 4) {
                        /*
                         * On the 965, the sampler cache always gets flushed
                         * and this bit is reserved.
@@ -91,460 +118,685 @@ render_ring_flush(struct drm_device *dev,
                if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
                        cmd |= MI_EXE_FLUSH;
 
+               if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+                   (IS_G4X(dev) || IS_GEN5(dev)))
+                       cmd |= MI_INVALIDATE_ISP;
+
 #if WATCH_EXEC
                DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-               intel_ring_begin(dev, ring, 2);
-               intel_ring_emit(dev, ring, cmd);
-               intel_ring_emit(dev, ring, MI_NOOP);
-               intel_ring_advance(dev, ring);
+               ret = intel_ring_begin(ring, 2);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, cmd);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
        }
-}
 
-static unsigned int render_ring_get_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       return 0;
 }
 
-static unsigned int render_ring_get_tail(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static void ring_write_tail(struct intel_ring_buffer *ring,
+                           u32 value)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       I915_WRITE_TAIL(ring, value);
 }
 
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+                       RING_ACTHD(ring->mmio_base) : ACTHD;
 
        return I915_READ(acthd_reg);
 }
 
-static void render_ring_advance_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
-static int init_ring_common(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
 {
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj = ring->obj;
        u32 head;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       obj_priv = to_intel_bo(ring->gem_object);
 
        /* Stop the ring if it's running. */
-       I915_WRITE(ring->regs.ctl, 0);
-       I915_WRITE(ring->regs.head, 0);
-       I915_WRITE(ring->regs.tail, 0);
+       I915_WRITE_CTL(ring, 0);
+       I915_WRITE_HEAD(ring, 0);
+       ring->write_tail(ring, 0);
 
        /* Initialize the ring. */
-       I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
-       head = ring->get_head(dev, ring);
+       I915_WRITE_START(ring, obj->gtt_offset);
+       head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
        /* G45 ring initialization fails to reset head to zero */
        if (head != 0) {
-               DRM_ERROR("%s head not reset to zero "
-                               "ctl %08x head %08x tail %08x start %08x\n",
-                               ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
-
-               I915_WRITE(ring->regs.head, 0);
-
-               DRM_ERROR("%s head forced to zero "
-                               "ctl %08x head %08x tail %08x start %08x\n",
-                               ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
+               DRM_DEBUG_KMS("%s head not reset to zero "
+                             "ctl %08x head %08x tail %08x start %08x\n",
+                             ring->name,
+                             I915_READ_CTL(ring),
+                             I915_READ_HEAD(ring),
+                             I915_READ_TAIL(ring),
+                             I915_READ_START(ring));
+
+               I915_WRITE_HEAD(ring, 0);
+
+               if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+                       DRM_ERROR("failed to set %s head to zero "
+                                 "ctl %08x head %08x tail %08x start %08x\n",
+                                 ring->name,
+                                 I915_READ_CTL(ring),
+                                 I915_READ_HEAD(ring),
+                                 I915_READ_TAIL(ring),
+                                 I915_READ_START(ring));
+               }
        }
 
-       I915_WRITE(ring->regs.ctl,
-                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-                       | RING_NO_REPORT | RING_VALID);
+       I915_WRITE_CTL(ring,
+                       ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_REPORT_64K | RING_VALID);
 
-       head = I915_READ(ring->regs.head) & HEAD_ADDR;
        /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
+       if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+           I915_READ_START(ring) != obj->gtt_offset ||
+           (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
+                               I915_READ_CTL(ring),
+                               I915_READ_HEAD(ring),
+                               I915_READ_TAIL(ring),
+                               I915_READ_START(ring));
                return -EIO;
        }
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
+       if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+               i915_kernel_lost_context(ring->dev);
        else {
-               ring->head = ring->get_head(dev, ring);
-               ring->tail = ring->get_tail(dev, ring);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
+               ring->head = I915_READ_HEAD(ring);
+               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+               ring->space = ring_space(ring);
        }
+
        return 0;
 }
 
-static int init_render_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+       struct drm_i915_gem_object *obj;
+       volatile u32 *cpu_page;
+       u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret = init_ring_common(dev, ring);
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                               (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       struct pipe_control *pc;
+       struct drm_i915_gem_object *obj;
+       int ret;
+
+       if (ring->private)
+               return 0;
+
+       pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+       if (!pc)
+               return -ENOMEM;
+
+       obj = i915_gem_alloc_object(ring->dev, 4096);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate seqno page\n");
+               ret = -ENOMEM;
+               goto err;
        }
+       obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, 4096, true);
+       if (ret)
+               goto err_unref;
+
+       pc->gtt_offset = obj->gtt_offset;
+       pc->cpu_page =  kmap(obj->pages[0]);
+       if (pc->cpu_page == NULL)
+               goto err_unpin;
+
+       pc->obj = obj;
+       ring->private = pc;
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(&obj->base);
+err:
+       kfree(pc);
        return ret;
 }
 
-#define PIPE_CONTROL_FLUSH(addr)                                       \
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+       struct pipe_control *pc = ring->private;
+       struct drm_i915_gem_object *obj;
+
+       if (!ring->private)
+               return;
+
+       obj = pc->obj;
+       kunmap(obj->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(&obj->base);
+
+       kfree(pc);
+       ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = init_ring_common(ring);
+
+       if (INTEL_INFO(dev)->gen > 3) {
+               int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+               if (IS_GEN6(dev))
+                       mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+               I915_WRITE(MI_MODE, mode);
+       }
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (IS_GEN5(dev)) {
+               ret = init_pipe_control(ring);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+       if (!ring->private)
+               return;
+
+       cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int id;
+
+       /*
+        * cs -> 1 = vcs, 0 = bcs
+        * vcs -> 1 = bcs, 0 = cs,
+        * bcs -> 1 = cs, 0 = vcs.
+        */
+       id = ring - dev_priv->ring;
+       id += 2 - i;
+       id %= 3;
+
+       intel_ring_emit(ring,
+                       MI_SEMAPHORE_MBOX |
+                       MI_SEMAPHORE_REGISTER |
+                       MI_SEMAPHORE_UPDATE);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring,
+                       RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+                u32 *result)
+{
+       u32 seqno;
+       int ret;
+
+       ret = intel_ring_begin(ring, 10);
+       if (ret)
+               return ret;
+
+       seqno = i915_gem_get_seqno(ring->dev);
+       update_semaphore(ring, 0, seqno);
+       update_semaphore(ring, 1, seqno);
+
+       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_advance(ring);
+
+       *result = seqno;
+       return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+               struct intel_ring_buffer *to,
+               u32 seqno)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring,
+                       MI_SEMAPHORE_MBOX |
+                       MI_SEMAPHORE_REGISTER |
+                       intel_ring_sync_index(ring, to) << 17 |
+                       MI_SEMAPHORE_COMPARE);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
 do {                                                                   \
-       OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
+       intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
                 PIPE_CONTROL_DEPTH_STALL | 2);                         \
-       OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
-       OUT_RING(0);                                                    \
-       OUT_RING(0);                                                    \
+       intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
+       intel_ring_emit(ring__, 0);                                                     \
+       intel_ring_emit(ring__, 0);                                                     \
 } while (0)
 
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_file *file_priv,
-               u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+                     u32 *result)
 {
-       u32 seqno;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       seqno = intel_ring_get_seqno(dev, ring);
-
-       if (IS_GEN6(dev)) {
-               BEGIN_LP_RING(6);
-               OUT_RING(GFX_OP_PIPE_CONTROL | 3);
-               OUT_RING(PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
-                        PIPE_CONTROL_NOTIFY);
-               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-               OUT_RING(seqno);
-               OUT_RING(0);
-               OUT_RING(0);
-               ADVANCE_LP_RING();
-       } else if (HAS_PIPE_CONTROL(dev)) {
-               u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+       struct drm_device *dev = ring->dev;
+       u32 seqno = i915_gem_get_seqno(dev);
+       struct pipe_control *pc = ring->private;
+       u32 scratch_addr = pc->gtt_offset + 128;
+       int ret;
 
-               /*
-                * Workaround qword write incoherence by flushing the
-                * PIPE_NOTIFY buffers out to memory before requesting
-                * an interrupt.
-                */
-               BEGIN_LP_RING(32);
-               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-               OUT_RING(seqno);
-               OUT_RING(0);
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128; /* write to separate cachelines */
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-                        PIPE_CONTROL_NOTIFY);
-               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-               OUT_RING(seqno);
-               OUT_RING(0);
-               ADVANCE_LP_RING();
-       } else {
-               BEGIN_LP_RING(4);
-               OUT_RING(MI_STORE_DWORD_INDEX);
-               OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(seqno);
+       /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+        * incoherent with writes to memory, i.e. completely fubar,
+        * so we need to use PIPE_NOTIFY instead.
+        *
+        * However, we also need to workaround the qword write
+        * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+        * memory before requesting an interrupt.
+        */
+       ret = intel_ring_begin(ring, 32);
+       if (ret)
+               return ret;
 
-               OUT_RING(MI_USER_INTERRUPT);
-               ADVANCE_LP_RING();
-       }
-       return seqno;
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                       PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+       intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, 0);
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       scratch_addr += 128; /* write to separate cachelines */
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       scratch_addr += 128;
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       scratch_addr += 128;
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       scratch_addr += 128;
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       scratch_addr += 128;
+       PIPE_CONTROL_FLUSH(ring, scratch_addr);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                       PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+                       PIPE_CONTROL_NOTIFY);
+       intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
+
+       *result = seqno;
+       return 0;
+}
+
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+                       u32 *result)
+{
+       struct drm_device *dev = ring->dev;
+       u32 seqno = i915_gem_get_seqno(dev);
+       int ret;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_advance(ring);
+
+       *result = seqno;
+       return 0;
 }
 
 static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       if (HAS_PIPE_CONTROL(dev))
-               return ((volatile u32 *)(dev_priv->seqno_page))[0];
-       else
-               return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
+{
+       struct pipe_control *pc = ring->private;
+       return pc->cpu_page[0];
 }
 
 static void
-render_ring_get_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
+       dev_priv->gt_irq_mask &= ~mask;
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       POSTING_READ(GTIMR);
+}
 
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+       dev_priv->gt_irq_mask |= mask;
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+       dev_priv->irq_mask &= ~mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+       dev_priv->irq_mask |= mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ(IMR);
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (!dev->irq_enabled)
+               return false;
+
+       spin_lock(&ring->irq_lock);
+       if (ring->irq_refcount++ == 0) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+                       ironlake_enable_irq(dev_priv,
+                                           GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
        }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock(&ring->irq_lock);
+
+       return true;
 }
 
 static void
-render_ring_put_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+render_ring_put_irq(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
-       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+       spin_lock(&ring->irq_lock);
+       if (--ring->irq_refcount == 0) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+                       ironlake_disable_irq(dev_priv,
+                                            GT_USER_INTERRUPT |
+                                            GT_PIPE_NOTIFY);
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
        }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock(&ring->irq_lock);
 }
 
-static void render_setup_status_page(struct drm_device *dev,
-       struct  intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
-       } else {
-               I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
-       }
-
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       u32 mmio = IS_GEN6(ring->dev) ?
+               RING_HWS_PGA_GEN6(ring->mmio_base) :
+               RING_HWS_PGA(ring->mmio_base);
+       I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+       POSTING_READ(mmio);
 }
 
-void
-bsd_ring_flush(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               u32     invalidate_domains,
-               u32     flush_domains)
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+              u32     invalidate_domains,
+              u32     flush_domains)
 {
-       intel_ring_begin(dev, ring, 2);
-       intel_ring_emit(dev, ring, MI_FLUSH);
-       intel_ring_emit(dev, ring, MI_NOOP);
-       intel_ring_advance(dev, ring);
-}
+       int ret;
 
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+       if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+               return 0;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_FLUSH);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       return 0;
 }
 
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+                u32 *result)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+       u32 seqno;
+       int ret;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       seqno = i915_gem_get_seqno(ring->dev);
+
+       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_advance(ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+       *result = seqno;
+       return 0;
 }
 
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_ACTHD);
+
+       if (!dev->irq_enabled)
+              return false;
+
+       spin_lock(&ring->irq_lock);
+       if (ring->irq_refcount++ == 0)
+               ironlake_enable_irq(dev_priv, flag);
+       spin_unlock(&ring->irq_lock);
+
+       return true;
 }
 
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static void
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
 
-static int init_bsd_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       return init_ring_common(dev, ring);
+       spin_lock(&ring->irq_lock);
+       if (--ring->irq_refcount == 0)
+               ironlake_disable_irq(dev_priv, flag);
+       spin_unlock(&ring->irq_lock);
 }
 
-static u32
-bsd_ring_add_request(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_file *file_priv,
-               u32 flush_domains)
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
-       u32 seqno;
-       seqno = intel_ring_get_seqno(dev, ring);
-       intel_ring_begin(dev, ring, 4);
-       intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(dev, ring,
-                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(dev, ring, seqno);
-       intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-       intel_ring_advance(dev, ring);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+       if (!dev->irq_enabled)
+              return false;
 
-       return seqno;
+       spin_lock(&ring->irq_lock);
+       if (ring->irq_refcount++ == 0) {
+               ring->irq_mask &= ~rflag;
+               I915_WRITE_IMR(ring, ring->irq_mask);
+               ironlake_enable_irq(dev_priv, gflag);
+       }
+       spin_unlock(&ring->irq_lock);
+
+       return true;
 }
 
-static void bsd_setup_status_page(struct drm_device *dev,
-               struct  intel_ring_buffer *ring)
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
-       I915_READ(BSD_HWS_PGA);
+
+       spin_lock(&ring->irq_lock);
+       if (--ring->irq_refcount == 0) {
+               ring->irq_mask |= rflag;
+               I915_WRITE_IMR(ring, ring->irq_mask);
+               ironlake_disable_irq(dev_priv, gflag);
+       }
+       spin_unlock(&ring->irq_lock);
 }
 
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
 {
-       /* do nothing */
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (!dev->irq_enabled)
+               return false;
+
+       spin_lock(&ring->irq_lock);
+       if (ring->irq_refcount++ == 0) {
+               if (IS_G4X(dev))
+                       i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+               else
+                       ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+       }
+       spin_unlock(&ring->irq_lock);
+
+       return true;
 }
 static void
-bsd_ring_put_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
 {
-       /* do nothing */
-}
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       spin_lock(&ring->irq_lock);
+       if (--ring->irq_refcount == 0) {
+               if (IS_G4X(dev))
+                       i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+               else
+                       ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+       }
+       spin_unlock(&ring->irq_lock);
 }
 
 static int
-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_i915_gem_execbuffer2 *exec,
-               struct drm_clip_rect *cliprects,
-               uint64_t exec_offset)
-{
-       uint32_t exec_start;
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-       intel_ring_begin(dev, ring, 2);
-       intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
-                       (2 << 6) | MI_BATCH_NON_SECURE_I965);
-       intel_ring_emit(dev, ring, exec_start);
-       intel_ring_advance(dev, ring);
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START | (2 << 6) |
+                       MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
+
        return 0;
 }
 
-
 static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_i915_gem_execbuffer2 *exec,
-               struct drm_clip_rect *cliprects,
-               uint64_t exec_offset)
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                               u32 offset, u32 len)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int nbox = exec->num_cliprects;
-       int i = 0, count;
-       uint32_t exec_start, exec_len;
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-       exec_len = (uint32_t) exec->batch_len;
-
-       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-       count = nbox ? nbox : 1;
-
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       int ret = i915_emit_box(dev, cliprects, i,
-                                               exec->DR1, exec->DR4);
-                       if (ret)
-                               return ret;
-               }
+       int ret;
+
+       trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
 
-               if (IS_I830(dev) || IS_845G(dev)) {
-                       intel_ring_begin(dev, ring, 4);
-                       intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
-                       intel_ring_emit(dev, ring,
-                                       exec_start | MI_BATCH_NON_SECURE);
-                       intel_ring_emit(dev, ring, exec_start + exec_len - 4);
-                       intel_ring_emit(dev, ring, 0);
+       if (IS_I830(dev) || IS_845G(dev)) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, MI_BATCH_BUFFER);
+               intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+               intel_ring_emit(ring, offset + len - 8);
+               intel_ring_emit(ring, 0);
+       } else {
+               ret = intel_ring_begin(ring, 2);
+               if (ret)
+                       return ret;
+
+               if (INTEL_INFO(dev)->gen >= 4) {
+                       intel_ring_emit(ring,
+                                       MI_BATCH_BUFFER_START | (2 << 6) |
+                                       MI_BATCH_NON_SECURE_I965);
+                       intel_ring_emit(ring, offset);
                } else {
-                       intel_ring_begin(dev, ring, 4);
-                       if (IS_I965G(dev)) {
-                               intel_ring_emit(dev, ring,
-                                               MI_BATCH_BUFFER_START | (2 << 6)
-                                               | MI_BATCH_NON_SECURE_I965);
-                               intel_ring_emit(dev, ring, exec_start);
-                       } else {
-                               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
-                                               | (2 << 6));
-                               intel_ring_emit(dev, ring, exec_start |
-                                               MI_BATCH_NON_SECURE);
-                       }
+                       intel_ring_emit(ring,
+                                       MI_BATCH_BUFFER_START | (2 << 6));
+                       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
                }
-               intel_ring_advance(dev, ring);
        }
+       intel_ring_advance(ring);
 
-       /* XXX breadcrumb */
        return 0;
 }
 
-static void cleanup_status_page(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj;
 
        obj = ring->status_page.obj;
        if (obj == NULL)
                return;
-       obj_priv = to_intel_bo(obj);
 
-       kunmap(obj_priv->pages[0]);
+       kunmap(obj->pages[0]);
        i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
 
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
-static int init_status_page(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        obj = i915_gem_alloc_object(dev, 4096);
@@ -553,16 +805,15 @@ static int init_status_page(struct drm_device *dev,
                ret = -ENOMEM;
                goto err;
        }
-       obj_priv = to_intel_bo(obj);
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+       obj->agp_type = AGP_USER_CACHED_MEMORY;
 
-       ret = i915_gem_object_pin(obj, 4096);
+       ret = i915_gem_object_pin(obj, 4096, true);
        if (ret != 0) {
                goto err_unref;
        }
 
-       ring->status_page.gfx_addr = obj_priv->gtt_offset;
-       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.page_addr = kmap(obj->pages[0]);
        if (ring->status_page.page_addr == NULL) {
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
                goto err_unpin;
@@ -570,7 +821,7 @@ static int init_status_page(struct drm_device *dev,
        ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-       ring->setup_status_page(dev, ring);
+       intel_ring_setup_status_page(ring);
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                        ring->name, ring->status_page.gfx_addr);
 
@@ -579,22 +830,27 @@ static int init_status_page(struct drm_device *dev,
 err_unpin:
        i915_gem_object_unpin(obj);
 err_unref:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 err:
        return ret;
 }
 
-
 int intel_init_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                          struct intel_ring_buffer *ring)
 {
+       struct drm_i915_gem_object *obj;
        int ret;
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
+
        ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+
+       spin_lock_init(&ring->irq_lock);
+       ring->irq_mask = ~0;
 
        if (I915_NEED_GFX_HWS(dev)) {
-               ret = init_status_page(dev, ring);
+               ret = init_status_page(ring);
                if (ret)
                        return ret;
        }
@@ -603,20 +859,17 @@ int intel_init_ring_buffer(struct drm_device *dev,
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate ringbuffer\n");
                ret = -ENOMEM;
-               goto cleanup;
+               goto err_hws;
        }
 
-       ring->gem_object = obj;
+       ring->obj = obj;
 
-       ret = i915_gem_object_pin(obj, ring->alignment);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               goto cleanup;
-       }
+       ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+       if (ret)
+               goto err_unref;
 
-       obj_priv = to_intel_bo(obj);
        ring->map.size = ring->size;
-       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.offset = dev->agp->base + obj->gtt_offset;
        ring->map.type = 0;
        ring->map.flags = 0;
        ring->map.mtrr = 0;
@@ -624,87 +877,111 @@ int intel_init_ring_buffer(struct drm_device *dev,
        drm_core_ioremap_wc(&ring->map, dev);
        if (ring->map.handle == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
                ret = -EINVAL;
-               goto cleanup;
+               goto err_unpin;
        }
 
        ring->virtual_start = ring->map.handle;
-       ret = ring->init(dev, ring);
-       if (ret != 0) {
-               intel_cleanup_ring_buffer(dev, ring);
-               return ret;
-       }
+       ret = ring->init(ring);
+       if (ret)
+               goto err_unmap;
+
+       /* Workaround an erratum on the i830 which causes a hang if
+        * the TAIL pointer points to within the last 2 cachelines
+        * of the buffer.
+        */
+       ring->effective_size = ring->size;
+       if (IS_I830(ring->dev))
+               ring->effective_size -= 128;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
-       else {
-               ring->head = ring->get_head(dev, ring);
-               ring->tail = ring->get_tail(dev, ring);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
-       }
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       return ret;
-cleanup:
-       cleanup_status_page(dev, ring);
+       return 0;
+
+err_unmap:
+       drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(&obj->base);
+       ring->obj = NULL;
+err_hws:
+       cleanup_status_page(ring);
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 {
-       if (ring->gem_object == NULL)
+       struct drm_i915_private *dev_priv;
+       int ret;
+
+       if (ring->obj == NULL)
                return;
 
-       drm_core_ioremapfree(&ring->map, dev);
+       /* Disable the ring buffer. The ring must be idle at this point */
+       dev_priv = ring->dev->dev_private;
+       ret = intel_wait_ring_buffer(ring, ring->size - 8);
+       I915_WRITE_CTL(ring, 0);
+
+       drm_core_ioremapfree(&ring->map, ring->dev);
+
+       i915_gem_object_unpin(ring->obj);
+       drm_gem_object_unreference(&ring->obj->base);
+       ring->obj = NULL;
 
-       i915_gem_object_unpin(ring->gem_object);
-       drm_gem_object_unreference(ring->gem_object);
-       ring->gem_object = NULL;
-       cleanup_status_page(dev, ring);
+       if (ring->cleanup)
+               ring->cleanup(ring);
+
+       cleanup_status_page(ring);
 }
 
-int intel_wrap_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
        unsigned int *virt;
-       int rem;
-       rem = ring->size - ring->tail;
+       int rem = ring->size - ring->tail;
 
        if (ring->space < rem) {
-               int ret = intel_wait_ring_buffer(dev, ring, rem);
+               int ret = intel_wait_ring_buffer(ring, rem);
                if (ret)
                        return ret;
        }
 
        virt = (unsigned int *)(ring->virtual_start + ring->tail);
-       rem /= 4;
-       while (rem--)
+       rem /= 8;
+       while (rem--) {
+               *virt++ = MI_NOOP;
                *virt++ = MI_NOOP;
+       }
 
        ring->tail = 0;
+       ring->space = ring_space(ring);
 
        return 0;
 }
 
-int intel_wait_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 {
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long end;
+       u32 head;
+
+       /* If the reported head position has wrapped or hasn't advanced,
+        * fallback to the slow and accurate path.
+        */
+       head = intel_read_status_page(ring, 4);
+       if (head > ring->head) {
+               ring->head = head;
+               ring->space = ring_space(ring);
+               if (ring->space >= n)
+                       return 0;
+       }
 
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
        do {
-               ring->head = ring->get_head(dev, ring);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
+               ring->head = I915_READ_HEAD(ring);
+               ring->space = ring_space(ring);
                if (ring->space >= n) {
-                       trace_i915_ring_wait_end (dev);
+                       trace_i915_ring_wait_end(dev);
                        return 0;
                }
 
@@ -714,137 +991,406 @@ int intel_wait_ring_buffer(struct drm_device *dev,
                                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
                }
 
-               yield();
+               msleep(1);
+               if (atomic_read(&dev_priv->mm.wedged))
+                       return -EAGAIN;
        } while (!time_after(jiffies, end));
        trace_i915_ring_wait_end (dev);
        return -EBUSY;
 }
 
-void intel_ring_begin(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+                    int num_dwords)
 {
        int n = 4*num_dwords;
-       if (unlikely(ring->tail + n > ring->size))
-               intel_wrap_ring_buffer(dev, ring);
-       if (unlikely(ring->space < n))
-               intel_wait_ring_buffer(dev, ring, n);
-}
+       int ret;
 
-void intel_ring_emit(struct drm_device *dev,
-               struct intel_ring_buffer *ring, unsigned int data)
-{
-       unsigned int *virt = ring->virtual_start + ring->tail;
-       *virt = data;
-       ring->tail += 4;
-       ring->tail &= ring->size - 1;
-       ring->space -= 4;
-}
+       if (unlikely(ring->tail + n > ring->effective_size)) {
+               ret = intel_wrap_ring_buffer(ring);
+               if (unlikely(ret))
+                       return ret;
+       }
 
-void intel_ring_advance(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       ring->advance_ring(dev, ring);
-}
+       if (unlikely(ring->space < n)) {
+               ret = intel_wait_ring_buffer(ring, n);
+               if (unlikely(ret))
+                       return ret;
+       }
 
-void intel_fill_struct(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               void *data,
-               unsigned int len)
-{
-       unsigned int *virt = ring->virtual_start + ring->tail;
-       BUG_ON((len&~(4-1)) != 0);
-       intel_ring_begin(dev, ring, len/4);
-       memcpy(virt, data, len);
-       ring->tail += len;
-       ring->tail &= ring->size - 1;
-       ring->space -= len;
-       intel_ring_advance(dev, ring);
+       ring->space -= n;
+       return 0;
 }
 
-u32 intel_ring_get_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
 {
-       u32 seqno;
-       seqno = ring->next_seqno;
-
-       /* reserve 0 for non-seqno */
-       if (++ring->next_seqno == 0)
-               ring->next_seqno = 1;
-       return seqno;
+       ring->tail &= ring->size - 1;
+       ring->write_tail(ring, ring->tail);
 }
 
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
        .name                   = "render ring",
-       .regs                   = {
-               .ctl = PRB0_CTL,
-               .head = PRB0_HEAD,
-               .tail = PRB0_TAIL,
-               .start = PRB0_START
-       },
-       .ring_flag              = I915_EXEC_RENDER,
+       .id                     = RING_RENDER,
+       .mmio_base              = RENDER_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .alignment              = PAGE_SIZE,
-       .virtual_start          = NULL,
-       .dev                    = NULL,
-       .gem_object             = NULL,
-       .head                   = 0,
-       .tail                   = 0,
-       .space                  = 0,
-       .next_seqno             = 1,
-       .user_irq_refcount      = 0,
-       .irq_gem_seqno          = 0,
-       .waiting_gem_seqno      = 0,
-       .setup_status_page      = render_setup_status_page,
        .init                   = init_render_ring,
-       .get_head               = render_ring_get_head,
-       .get_tail               = render_ring_get_tail,
-       .get_active_head        = render_ring_get_active_head,
-       .advance_ring           = render_ring_advance_ring,
+       .write_tail             = ring_write_tail,
        .flush                  = render_ring_flush,
        .add_request            = render_ring_add_request,
-       .get_gem_seqno          = render_ring_get_gem_seqno,
-       .user_irq_get           = render_ring_get_user_irq,
-       .user_irq_put           = render_ring_put_user_irq,
-       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
-       .status_page            = {NULL, 0, NULL},
-       .map                    = {0,}
+       .get_seqno              = ring_get_seqno,
+       .irq_get                = render_ring_get_irq,
+       .irq_put                = render_ring_put_irq,
+       .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
+       .cleanup                        = render_ring_cleanup,
 };
 
 /* ring buffer for bit-stream decoder */
 
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
        .name                   = "bsd ring",
-       .regs                   = {
-               .ctl = BSD_RING_CTL,
-               .head = BSD_RING_HEAD,
-               .tail = BSD_RING_TAIL,
-               .start = BSD_RING_START
-       },
-       .ring_flag              = I915_EXEC_BSD,
+       .id                     = RING_BSD,
+       .mmio_base              = BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .alignment              = PAGE_SIZE,
-       .virtual_start          = NULL,
-       .dev                    = NULL,
-       .gem_object             = NULL,
-       .head                   = 0,
-       .tail                   = 0,
-       .space                  = 0,
-       .next_seqno             = 1,
-       .user_irq_refcount      = 0,
-       .irq_gem_seqno          = 0,
-       .waiting_gem_seqno      = 0,
-       .setup_status_page      = bsd_setup_status_page,
-       .init                   = init_bsd_ring,
-       .get_head               = bsd_ring_get_head,
-       .get_tail               = bsd_ring_get_tail,
-       .get_active_head        = bsd_ring_get_active_head,
-       .advance_ring           = bsd_ring_advance_ring,
+       .init                   = init_ring_common,
+       .write_tail             = ring_write_tail,
        .flush                  = bsd_ring_flush,
-       .add_request            = bsd_ring_add_request,
-       .get_gem_seqno          = bsd_ring_get_gem_seqno,
-       .user_irq_get           = bsd_ring_get_user_irq,
-       .user_irq_put           = bsd_ring_put_user_irq,
-       .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
-       .status_page            = {NULL, 0, NULL},
-       .map                    = {0,}
+       .add_request            = ring_add_request,
+       .get_seqno              = ring_get_seqno,
+       .irq_get                = bsd_ring_get_irq,
+       .irq_put                = bsd_ring_put_irq,
+       .dispatch_execbuffer    = ring_dispatch_execbuffer,
+};
+
+
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+                                    u32 value)
+{
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+       /* Every tail move must follow the sequence below */
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+       I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+                       50))
+               DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+       I915_WRITE_TAIL(ring, value);
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+                          u32 invalidate, u32 flush)
+{
+       uint32_t cmd;
+       int ret;
+
+       if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
+               return 0;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       cmd = MI_FLUSH_DW;
+       if (invalidate & I915_GEM_GPU_DOMAINS)
+               cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                             u32 offset, u32 len)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+              return ret;
+
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       /* bit0-7 is the length on GEN6+ */
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       return gen6_ring_get_irq(ring,
+                                GT_USER_INTERRUPT,
+                                GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+       return gen6_ring_put_irq(ring,
+                                GT_USER_INTERRUPT,
+                                GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       return gen6_ring_get_irq(ring,
+                                GT_GEN6_BSD_USER_INTERRUPT,
+                                GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+       return gen6_ring_put_irq(ring,
+                                GT_GEN6_BSD_USER_INTERRUPT,
+                                GEN6_BSD_USER_INTERRUPT);
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+       .name                   = "gen6 bsd ring",
+       .id                     = RING_BSD,
+       .mmio_base              = GEN6_BSD_RING_BASE,
+       .size                   = 32 * PAGE_SIZE,
+       .init                   = init_ring_common,
+       .write_tail             = gen6_bsd_ring_write_tail,
+       .flush                  = gen6_ring_flush,
+       .add_request            = gen6_add_request,
+       .get_seqno              = ring_get_seqno,
+       .irq_get                = gen6_bsd_ring_get_irq,
+       .irq_put                = gen6_bsd_ring_put_irq,
+       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
 };
+
+/* Blitter support (SandyBridge+) */
+
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       return gen6_ring_get_irq(ring,
+                                GT_BLT_USER_INTERRUPT,
+                                GEN6_BLITTER_USER_INTERRUPT);
+}
+
+static void
+blt_ring_put_irq(struct intel_ring_buffer *ring)
+{
+       gen6_ring_put_irq(ring,
+                         GT_BLT_USER_INTERRUPT,
+                         GEN6_BLITTER_USER_INTERRUPT);
+}
+
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+       (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+       return ring->private;
+}
+
+static int blt_ring_init(struct intel_ring_buffer *ring)
+{
+       if (NEED_BLT_WORKAROUND(ring->dev)) {
+               struct drm_i915_gem_object *obj;
+               u32 *ptr;
+               int ret;
+
+               obj = i915_gem_alloc_object(ring->dev, 4096);
+               if (obj == NULL)
+                       return -ENOMEM;
+
+               ret = i915_gem_object_pin(obj, 4096, true);
+               if (ret) {
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ptr = kmap(obj->pages[0]);
+               *ptr++ = MI_BATCH_BUFFER_END;
+               *ptr++ = MI_NOOP;
+               kunmap(obj->pages[0]);
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
+               if (ret) {
+                       i915_gem_object_unpin(obj);
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ring->private = obj;
+       }
+
+       return init_ring_common(ring);
+}
+
+static int blt_ring_begin(struct intel_ring_buffer *ring,
+                         int num_dwords)
+{
+       if (ring->private) {
+               int ret = intel_ring_begin(ring, num_dwords+2);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+               intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+               return 0;
+       } else
+               return intel_ring_begin(ring, 4);
+}
+
+static int blt_ring_flush(struct intel_ring_buffer *ring,
+                         u32 invalidate, u32 flush)
+{
+       uint32_t cmd;
+       int ret;
+
+       if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
+               return 0;
+
+       ret = blt_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       cmd = MI_FLUSH_DW;
+       if (invalidate & I915_GEM_DOMAIN_RENDER)
+               cmd |= MI_INVALIDATE_TLB;
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       return 0;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+       if (!ring->private)
+               return;
+
+       i915_gem_object_unpin(ring->private);
+       drm_gem_object_unreference(ring->private);
+       ring->private = NULL;
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+       .name                   = "blt ring",
+       .id                     = RING_BLT,
+       .mmio_base              = BLT_RING_BASE,
+       .size                   = 32 * PAGE_SIZE,
+       .init                   = blt_ring_init,
+       .write_tail             = ring_write_tail,
+       .flush                  = blt_ring_flush,
+       .add_request            = gen6_add_request,
+       .get_seqno              = ring_get_seqno,
+       .irq_get                        = blt_ring_get_irq,
+       .irq_put                        = blt_ring_put_irq,
+       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
+       .cleanup                        = blt_ring_cleanup,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+       *ring = render_ring;
+       if (INTEL_INFO(dev)->gen >= 6) {
+               ring->add_request = gen6_add_request;
+               ring->irq_get = gen6_render_ring_get_irq;
+               ring->irq_put = gen6_render_ring_put_irq;
+       } else if (IS_GEN5(dev)) {
+               ring->add_request = pc_render_add_request;
+               ring->get_seqno = pc_render_get_seqno;
+       }
+
+       if (!I915_NEED_GFX_HWS(dev)) {
+               ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+               memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       }
+
+       return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+       *ring = render_ring;
+       if (INTEL_INFO(dev)->gen >= 6) {
+               ring->add_request = gen6_add_request;
+               ring->irq_get = gen6_render_ring_get_irq;
+               ring->irq_put = gen6_render_ring_put_irq;
+       } else if (IS_GEN5(dev)) {
+               ring->add_request = pc_render_add_request;
+               ring->get_seqno = pc_render_get_seqno;
+       }
+
+       ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+
+       ring->size = size;
+       ring->effective_size = ring->size;
+       if (IS_I830(ring->dev))
+               ring->effective_size -= 128;
+
+       ring->map.offset = start;
+       ring->map.size = size;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+       if (ring->map.handle == NULL) {
+               DRM_ERROR("can not ioremap virtual address for"
+                         " ring buffer\n");
+               return -ENOMEM;
+       }
+
+       ring->virtual_start = (void __force __iomem *)ring->map.handle;
+       return 0;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+
+       if (IS_GEN6(dev))
+               *ring = gen6_bsd_ring;
+       else
+               *ring = bsd_ring;
+
+       return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+
+       *ring = gen6_blt_ring;
+
+       return intel_init_ring_buffer(dev, ring);
+}