drm/i915: fix user irq miss in BSD ring on g4x
[linux-flexiantxendom0-natty.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static inline int ring_space(struct intel_ring_buffer *ring)
38 {
39         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40         if (space < 0)
41                 space += ring->size;
42         return space;
43 }
44
45 static u32 i915_gem_get_seqno(struct drm_device *dev)
46 {
47         drm_i915_private_t *dev_priv = dev->dev_private;
48         u32 seqno;
49
50         seqno = dev_priv->next_seqno;
51
52         /* reserve 0 for non-seqno */
53         if (++dev_priv->next_seqno == 0)
54                 dev_priv->next_seqno = 1;
55
56         return seqno;
57 }
58
59 static int
60 render_ring_flush(struct intel_ring_buffer *ring,
61                   u32   invalidate_domains,
62                   u32   flush_domains)
63 {
64         struct drm_device *dev = ring->dev;
65         drm_i915_private_t *dev_priv = dev->dev_private;
66         u32 cmd;
67         int ret;
68
69 #if WATCH_EXEC
70         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71                   invalidate_domains, flush_domains);
72 #endif
73
74         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75                                      invalidate_domains, flush_domains);
76
77         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
78                 /*
79                  * read/write caches:
80                  *
81                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
83                  * also flushed at 2d versus 3d pipeline switches.
84                  *
85                  * read-only caches:
86                  *
87                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88                  * MI_READ_FLUSH is set, and is always flushed on 965.
89                  *
90                  * I915_GEM_DOMAIN_COMMAND may not exist?
91                  *
92                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93                  * invalidated when MI_EXE_FLUSH is set.
94                  *
95                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96                  * invalidated with every MI_FLUSH.
97                  *
98                  * TLBs:
99                  *
100                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103                  * are flushed at any MI_FLUSH.
104                  */
105
106                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107                 if ((invalidate_domains|flush_domains) &
108                     I915_GEM_DOMAIN_RENDER)
109                         cmd &= ~MI_NO_WRITE_FLUSH;
110                 if (INTEL_INFO(dev)->gen < 4) {
111                         /*
112                          * On the 965, the sampler cache always gets flushed
113                          * and this bit is reserved.
114                          */
115                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116                                 cmd |= MI_READ_FLUSH;
117                 }
118                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119                         cmd |= MI_EXE_FLUSH;
120
121                 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122                     (IS_G4X(dev) || IS_GEN5(dev)))
123                         cmd |= MI_INVALIDATE_ISP;
124
125 #if WATCH_EXEC
126                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
127 #endif
128                 ret = intel_ring_begin(ring, 2);
129                 if (ret)
130                         return ret;
131
132                 intel_ring_emit(ring, cmd);
133                 intel_ring_emit(ring, MI_NOOP);
134                 intel_ring_advance(ring);
135         }
136
137         return 0;
138 }
139
140 static void ring_write_tail(struct intel_ring_buffer *ring,
141                             u32 value)
142 {
143         drm_i915_private_t *dev_priv = ring->dev->dev_private;
144         I915_WRITE_TAIL(ring, value);
145 }
146
147 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
148 {
149         drm_i915_private_t *dev_priv = ring->dev->dev_private;
150         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
151                         RING_ACTHD(ring->mmio_base) : ACTHD;
152
153         return I915_READ(acthd_reg);
154 }
155
156 static int init_ring_common(struct intel_ring_buffer *ring)
157 {
158         drm_i915_private_t *dev_priv = ring->dev->dev_private;
159         struct drm_i915_gem_object *obj = ring->obj;
160         u32 head;
161
162         /* Stop the ring if it's running. */
163         I915_WRITE_CTL(ring, 0);
164         I915_WRITE_HEAD(ring, 0);
165         ring->write_tail(ring, 0);
166
167         /* Initialize the ring. */
168         I915_WRITE_START(ring, obj->gtt_offset);
169         head = I915_READ_HEAD(ring) & HEAD_ADDR;
170
171         /* G45 ring initialization fails to reset head to zero */
172         if (head != 0) {
173                 DRM_DEBUG_KMS("%s head not reset to zero "
174                               "ctl %08x head %08x tail %08x start %08x\n",
175                               ring->name,
176                               I915_READ_CTL(ring),
177                               I915_READ_HEAD(ring),
178                               I915_READ_TAIL(ring),
179                               I915_READ_START(ring));
180
181                 I915_WRITE_HEAD(ring, 0);
182
183                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
184                         DRM_ERROR("failed to set %s head to zero "
185                                   "ctl %08x head %08x tail %08x start %08x\n",
186                                   ring->name,
187                                   I915_READ_CTL(ring),
188                                   I915_READ_HEAD(ring),
189                                   I915_READ_TAIL(ring),
190                                   I915_READ_START(ring));
191                 }
192         }
193
194         I915_WRITE_CTL(ring,
195                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
196                         | RING_REPORT_64K | RING_VALID);
197
198         /* If the head is still not zero, the ring is dead */
199         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
200             I915_READ_START(ring) != obj->gtt_offset ||
201             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
202                 DRM_ERROR("%s initialization failed "
203                                 "ctl %08x head %08x tail %08x start %08x\n",
204                                 ring->name,
205                                 I915_READ_CTL(ring),
206                                 I915_READ_HEAD(ring),
207                                 I915_READ_TAIL(ring),
208                                 I915_READ_START(ring));
209                 return -EIO;
210         }
211
212         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
213                 i915_kernel_lost_context(ring->dev);
214         else {
215                 ring->head = I915_READ_HEAD(ring);
216                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
217                 ring->space = ring_space(ring);
218         }
219
220         return 0;
221 }
222
223 /*
224  * 965+ support PIPE_CONTROL commands, which provide finer grained control
225  * over cache flushing.
226  */
227 struct pipe_control {
228         struct drm_i915_gem_object *obj;
229         volatile u32 *cpu_page;
230         u32 gtt_offset;
231 };
232
233 static int
234 init_pipe_control(struct intel_ring_buffer *ring)
235 {
236         struct pipe_control *pc;
237         struct drm_i915_gem_object *obj;
238         int ret;
239
240         if (ring->private)
241                 return 0;
242
243         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
244         if (!pc)
245                 return -ENOMEM;
246
247         obj = i915_gem_alloc_object(ring->dev, 4096);
248         if (obj == NULL) {
249                 DRM_ERROR("Failed to allocate seqno page\n");
250                 ret = -ENOMEM;
251                 goto err;
252         }
253         obj->agp_type = AGP_USER_CACHED_MEMORY;
254
255         ret = i915_gem_object_pin(obj, 4096, true);
256         if (ret)
257                 goto err_unref;
258
259         pc->gtt_offset = obj->gtt_offset;
260         pc->cpu_page =  kmap(obj->pages[0]);
261         if (pc->cpu_page == NULL)
262                 goto err_unpin;
263
264         pc->obj = obj;
265         ring->private = pc;
266         return 0;
267
268 err_unpin:
269         i915_gem_object_unpin(obj);
270 err_unref:
271         drm_gem_object_unreference(&obj->base);
272 err:
273         kfree(pc);
274         return ret;
275 }
276
277 static void
278 cleanup_pipe_control(struct intel_ring_buffer *ring)
279 {
280         struct pipe_control *pc = ring->private;
281         struct drm_i915_gem_object *obj;
282
283         if (!ring->private)
284                 return;
285
286         obj = pc->obj;
287         kunmap(obj->pages[0]);
288         i915_gem_object_unpin(obj);
289         drm_gem_object_unreference(&obj->base);
290
291         kfree(pc);
292         ring->private = NULL;
293 }
294
295 static int init_render_ring(struct intel_ring_buffer *ring)
296 {
297         struct drm_device *dev = ring->dev;
298         struct drm_i915_private *dev_priv = dev->dev_private;
299         int ret = init_ring_common(ring);
300
301         if (INTEL_INFO(dev)->gen > 3) {
302                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
303                 if (IS_GEN6(dev))
304                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
305                 I915_WRITE(MI_MODE, mode);
306         }
307
308         if (INTEL_INFO(dev)->gen >= 6) {
309         } else if (IS_GEN5(dev)) {
310                 ret = init_pipe_control(ring);
311                 if (ret)
312                         return ret;
313         }
314
315         return ret;
316 }
317
318 static void render_ring_cleanup(struct intel_ring_buffer *ring)
319 {
320         if (!ring->private)
321                 return;
322
323         cleanup_pipe_control(ring);
324 }
325
326 static void
327 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
328 {
329         struct drm_device *dev = ring->dev;
330         struct drm_i915_private *dev_priv = dev->dev_private;
331         int id;
332
333         /*
334          * cs -> 1 = vcs, 0 = bcs
335          * vcs -> 1 = bcs, 0 = cs,
336          * bcs -> 1 = cs, 0 = vcs.
337          */
338         id = ring - dev_priv->ring;
339         id += 2 - i;
340         id %= 3;
341
342         intel_ring_emit(ring,
343                         MI_SEMAPHORE_MBOX |
344                         MI_SEMAPHORE_REGISTER |
345                         MI_SEMAPHORE_UPDATE);
346         intel_ring_emit(ring, seqno);
347         intel_ring_emit(ring,
348                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
349 }
350
351 static int
352 gen6_add_request(struct intel_ring_buffer *ring,
353                  u32 *result)
354 {
355         u32 seqno;
356         int ret;
357
358         ret = intel_ring_begin(ring, 10);
359         if (ret)
360                 return ret;
361
362         seqno = i915_gem_get_seqno(ring->dev);
363         update_semaphore(ring, 0, seqno);
364         update_semaphore(ring, 1, seqno);
365
366         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
367         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
368         intel_ring_emit(ring, seqno);
369         intel_ring_emit(ring, MI_USER_INTERRUPT);
370         intel_ring_advance(ring);
371
372         *result = seqno;
373         return 0;
374 }
375
376 int
377 intel_ring_sync(struct intel_ring_buffer *ring,
378                 struct intel_ring_buffer *to,
379                 u32 seqno)
380 {
381         int ret;
382
383         ret = intel_ring_begin(ring, 4);
384         if (ret)
385                 return ret;
386
387         intel_ring_emit(ring,
388                         MI_SEMAPHORE_MBOX |
389                         MI_SEMAPHORE_REGISTER |
390                         intel_ring_sync_index(ring, to) << 17 |
391                         MI_SEMAPHORE_COMPARE);
392         intel_ring_emit(ring, seqno);
393         intel_ring_emit(ring, 0);
394         intel_ring_emit(ring, MI_NOOP);
395         intel_ring_advance(ring);
396
397         return 0;
398 }
399
400 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
401 do {                                                                    \
402         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
403                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
404         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
405         intel_ring_emit(ring__, 0);                                                     \
406         intel_ring_emit(ring__, 0);                                                     \
407 } while (0)
408
409 static int
410 pc_render_add_request(struct intel_ring_buffer *ring,
411                       u32 *result)
412 {
413         struct drm_device *dev = ring->dev;
414         u32 seqno = i915_gem_get_seqno(dev);
415         struct pipe_control *pc = ring->private;
416         u32 scratch_addr = pc->gtt_offset + 128;
417         int ret;
418
419         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
420          * incoherent with writes to memory, i.e. completely fubar,
421          * so we need to use PIPE_NOTIFY instead.
422          *
423          * However, we also need to workaround the qword write
424          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
425          * memory before requesting an interrupt.
426          */
427         ret = intel_ring_begin(ring, 32);
428         if (ret)
429                 return ret;
430
431         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
433         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
434         intel_ring_emit(ring, seqno);
435         intel_ring_emit(ring, 0);
436         PIPE_CONTROL_FLUSH(ring, scratch_addr);
437         scratch_addr += 128; /* write to separate cachelines */
438         PIPE_CONTROL_FLUSH(ring, scratch_addr);
439         scratch_addr += 128;
440         PIPE_CONTROL_FLUSH(ring, scratch_addr);
441         scratch_addr += 128;
442         PIPE_CONTROL_FLUSH(ring, scratch_addr);
443         scratch_addr += 128;
444         PIPE_CONTROL_FLUSH(ring, scratch_addr);
445         scratch_addr += 128;
446         PIPE_CONTROL_FLUSH(ring, scratch_addr);
447         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
448                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
449                         PIPE_CONTROL_NOTIFY);
450         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
451         intel_ring_emit(ring, seqno);
452         intel_ring_emit(ring, 0);
453         intel_ring_advance(ring);
454
455         *result = seqno;
456         return 0;
457 }
458
459 static int
460 render_ring_add_request(struct intel_ring_buffer *ring,
461                         u32 *result)
462 {
463         struct drm_device *dev = ring->dev;
464         u32 seqno = i915_gem_get_seqno(dev);
465         int ret;
466
467         ret = intel_ring_begin(ring, 4);
468         if (ret)
469                 return ret;
470
471         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
472         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
473         intel_ring_emit(ring, seqno);
474         intel_ring_emit(ring, MI_USER_INTERRUPT);
475         intel_ring_advance(ring);
476
477         *result = seqno;
478         return 0;
479 }
480
481 static u32
482 ring_get_seqno(struct intel_ring_buffer *ring)
483 {
484         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
485 }
486
487 static u32
488 pc_render_get_seqno(struct intel_ring_buffer *ring)
489 {
490         struct pipe_control *pc = ring->private;
491         return pc->cpu_page[0];
492 }
493
494 static void
495 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
496 {
497         dev_priv->gt_irq_mask &= ~mask;
498         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
499         POSTING_READ(GTIMR);
500 }
501
502 static void
503 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
504 {
505         dev_priv->gt_irq_mask |= mask;
506         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
507         POSTING_READ(GTIMR);
508 }
509
510 static void
511 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
512 {
513         dev_priv->irq_mask &= ~mask;
514         I915_WRITE(IMR, dev_priv->irq_mask);
515         POSTING_READ(IMR);
516 }
517
518 static void
519 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
520 {
521         dev_priv->irq_mask |= mask;
522         I915_WRITE(IMR, dev_priv->irq_mask);
523         POSTING_READ(IMR);
524 }
525
526 static bool
527 render_ring_get_irq(struct intel_ring_buffer *ring)
528 {
529         struct drm_device *dev = ring->dev;
530         drm_i915_private_t *dev_priv = dev->dev_private;
531
532         if (!dev->irq_enabled)
533                 return false;
534
535         spin_lock(&ring->irq_lock);
536         if (ring->irq_refcount++ == 0) {
537                 if (HAS_PCH_SPLIT(dev))
538                         ironlake_enable_irq(dev_priv,
539                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
540                 else
541                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
542         }
543         spin_unlock(&ring->irq_lock);
544
545         return true;
546 }
547
548 static void
549 render_ring_put_irq(struct intel_ring_buffer *ring)
550 {
551         struct drm_device *dev = ring->dev;
552         drm_i915_private_t *dev_priv = dev->dev_private;
553
554         spin_lock(&ring->irq_lock);
555         if (--ring->irq_refcount == 0) {
556                 if (HAS_PCH_SPLIT(dev))
557                         ironlake_disable_irq(dev_priv,
558                                              GT_USER_INTERRUPT |
559                                              GT_PIPE_NOTIFY);
560                 else
561                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
562         }
563         spin_unlock(&ring->irq_lock);
564 }
565
566 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
567 {
568         drm_i915_private_t *dev_priv = ring->dev->dev_private;
569         u32 mmio = IS_GEN6(ring->dev) ?
570                 RING_HWS_PGA_GEN6(ring->mmio_base) :
571                 RING_HWS_PGA(ring->mmio_base);
572         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
573         POSTING_READ(mmio);
574 }
575
576 static int
577 bsd_ring_flush(struct intel_ring_buffer *ring,
578                u32     invalidate_domains,
579                u32     flush_domains)
580 {
581         int ret;
582
583         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
584                 return 0;
585
586         ret = intel_ring_begin(ring, 2);
587         if (ret)
588                 return ret;
589
590         intel_ring_emit(ring, MI_FLUSH);
591         intel_ring_emit(ring, MI_NOOP);
592         intel_ring_advance(ring);
593         return 0;
594 }
595
596 static int
597 ring_add_request(struct intel_ring_buffer *ring,
598                  u32 *result)
599 {
600         u32 seqno;
601         int ret;
602
603         ret = intel_ring_begin(ring, 4);
604         if (ret)
605                 return ret;
606
607         seqno = i915_gem_get_seqno(ring->dev);
608
609         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
610         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
611         intel_ring_emit(ring, seqno);
612         intel_ring_emit(ring, MI_USER_INTERRUPT);
613         intel_ring_advance(ring);
614
615         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
616         *result = seqno;
617         return 0;
618 }
619
620 static bool
621 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
622 {
623         struct drm_device *dev = ring->dev;
624         drm_i915_private_t *dev_priv = dev->dev_private;
625
626         if (!dev->irq_enabled)
627                return false;
628
629         spin_lock(&ring->irq_lock);
630         if (ring->irq_refcount++ == 0)
631                 ironlake_enable_irq(dev_priv, flag);
632         spin_unlock(&ring->irq_lock);
633
634         return true;
635 }
636
637 static void
638 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
639 {
640         struct drm_device *dev = ring->dev;
641         drm_i915_private_t *dev_priv = dev->dev_private;
642
643         spin_lock(&ring->irq_lock);
644         if (--ring->irq_refcount == 0)
645                 ironlake_disable_irq(dev_priv, flag);
646         spin_unlock(&ring->irq_lock);
647 }
648
649 static bool
650 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
651 {
652         struct drm_device *dev = ring->dev;
653         drm_i915_private_t *dev_priv = dev->dev_private;
654
655         if (!dev->irq_enabled)
656                return false;
657
658         spin_lock(&ring->irq_lock);
659         if (ring->irq_refcount++ == 0) {
660                 ring->irq_mask &= ~rflag;
661                 I915_WRITE_IMR(ring, ring->irq_mask);
662                 ironlake_enable_irq(dev_priv, gflag);
663         }
664         spin_unlock(&ring->irq_lock);
665
666         return true;
667 }
668
669 static void
670 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
671 {
672         struct drm_device *dev = ring->dev;
673         drm_i915_private_t *dev_priv = dev->dev_private;
674
675         spin_lock(&ring->irq_lock);
676         if (--ring->irq_refcount == 0) {
677                 ring->irq_mask |= rflag;
678                 I915_WRITE_IMR(ring, ring->irq_mask);
679                 ironlake_disable_irq(dev_priv, gflag);
680         }
681         spin_unlock(&ring->irq_lock);
682 }
683
684 static bool
685 bsd_ring_get_irq(struct intel_ring_buffer *ring)
686 {
687         struct drm_device *dev = ring->dev;
688         drm_i915_private_t *dev_priv = dev->dev_private;
689
690         if (!dev->irq_enabled)
691                 return false;
692
693         spin_lock(&ring->irq_lock);
694         if (ring->irq_refcount++ == 0) {
695                 if (IS_G4X(dev))
696                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
697                 else
698                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
699         }
700         spin_unlock(&ring->irq_lock);
701
702         return true;
703 }
704 static void
705 bsd_ring_put_irq(struct intel_ring_buffer *ring)
706 {
707         struct drm_device *dev = ring->dev;
708         drm_i915_private_t *dev_priv = dev->dev_private;
709
710         spin_lock(&ring->irq_lock);
711         if (--ring->irq_refcount == 0) {
712                 if (IS_G4X(dev))
713                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
714                 else
715                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
716         }
717         spin_unlock(&ring->irq_lock);
718 }
719
720 static int
721 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
722 {
723         int ret;
724
725         ret = intel_ring_begin(ring, 2);
726         if (ret)
727                 return ret;
728
729         intel_ring_emit(ring,
730                         MI_BATCH_BUFFER_START | (2 << 6) |
731                         MI_BATCH_NON_SECURE_I965);
732         intel_ring_emit(ring, offset);
733         intel_ring_advance(ring);
734
735         return 0;
736 }
737
738 static int
739 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
740                                 u32 offset, u32 len)
741 {
742         struct drm_device *dev = ring->dev;
743         drm_i915_private_t *dev_priv = dev->dev_private;
744         int ret;
745
746         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
747
748         if (IS_I830(dev) || IS_845G(dev)) {
749                 ret = intel_ring_begin(ring, 4);
750                 if (ret)
751                         return ret;
752
753                 intel_ring_emit(ring, MI_BATCH_BUFFER);
754                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
755                 intel_ring_emit(ring, offset + len - 8);
756                 intel_ring_emit(ring, 0);
757         } else {
758                 ret = intel_ring_begin(ring, 2);
759                 if (ret)
760                         return ret;
761
762                 if (INTEL_INFO(dev)->gen >= 4) {
763                         intel_ring_emit(ring,
764                                         MI_BATCH_BUFFER_START | (2 << 6) |
765                                         MI_BATCH_NON_SECURE_I965);
766                         intel_ring_emit(ring, offset);
767                 } else {
768                         intel_ring_emit(ring,
769                                         MI_BATCH_BUFFER_START | (2 << 6));
770                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
771                 }
772         }
773         intel_ring_advance(ring);
774
775         return 0;
776 }
777
778 static void cleanup_status_page(struct intel_ring_buffer *ring)
779 {
780         drm_i915_private_t *dev_priv = ring->dev->dev_private;
781         struct drm_i915_gem_object *obj;
782
783         obj = ring->status_page.obj;
784         if (obj == NULL)
785                 return;
786
787         kunmap(obj->pages[0]);
788         i915_gem_object_unpin(obj);
789         drm_gem_object_unreference(&obj->base);
790         ring->status_page.obj = NULL;
791
792         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
793 }
794
795 static int init_status_page(struct intel_ring_buffer *ring)
796 {
797         struct drm_device *dev = ring->dev;
798         drm_i915_private_t *dev_priv = dev->dev_private;
799         struct drm_i915_gem_object *obj;
800         int ret;
801
802         obj = i915_gem_alloc_object(dev, 4096);
803         if (obj == NULL) {
804                 DRM_ERROR("Failed to allocate status page\n");
805                 ret = -ENOMEM;
806                 goto err;
807         }
808         obj->agp_type = AGP_USER_CACHED_MEMORY;
809
810         ret = i915_gem_object_pin(obj, 4096, true);
811         if (ret != 0) {
812                 goto err_unref;
813         }
814
815         ring->status_page.gfx_addr = obj->gtt_offset;
816         ring->status_page.page_addr = kmap(obj->pages[0]);
817         if (ring->status_page.page_addr == NULL) {
818                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
819                 goto err_unpin;
820         }
821         ring->status_page.obj = obj;
822         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
823
824         intel_ring_setup_status_page(ring);
825         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
826                         ring->name, ring->status_page.gfx_addr);
827
828         return 0;
829
830 err_unpin:
831         i915_gem_object_unpin(obj);
832 err_unref:
833         drm_gem_object_unreference(&obj->base);
834 err:
835         return ret;
836 }
837
838 int intel_init_ring_buffer(struct drm_device *dev,
839                            struct intel_ring_buffer *ring)
840 {
841         struct drm_i915_gem_object *obj;
842         int ret;
843
844         ring->dev = dev;
845         INIT_LIST_HEAD(&ring->active_list);
846         INIT_LIST_HEAD(&ring->request_list);
847         INIT_LIST_HEAD(&ring->gpu_write_list);
848
849         spin_lock_init(&ring->irq_lock);
850         ring->irq_mask = ~0;
851
852         if (I915_NEED_GFX_HWS(dev)) {
853                 ret = init_status_page(ring);
854                 if (ret)
855                         return ret;
856         }
857
858         obj = i915_gem_alloc_object(dev, ring->size);
859         if (obj == NULL) {
860                 DRM_ERROR("Failed to allocate ringbuffer\n");
861                 ret = -ENOMEM;
862                 goto err_hws;
863         }
864
865         ring->obj = obj;
866
867         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
868         if (ret)
869                 goto err_unref;
870
871         ring->map.size = ring->size;
872         ring->map.offset = dev->agp->base + obj->gtt_offset;
873         ring->map.type = 0;
874         ring->map.flags = 0;
875         ring->map.mtrr = 0;
876
877         drm_core_ioremap_wc(&ring->map, dev);
878         if (ring->map.handle == NULL) {
879                 DRM_ERROR("Failed to map ringbuffer.\n");
880                 ret = -EINVAL;
881                 goto err_unpin;
882         }
883
884         ring->virtual_start = ring->map.handle;
885         ret = ring->init(ring);
886         if (ret)
887                 goto err_unmap;
888
889         /* Workaround an erratum on the i830 which causes a hang if
890          * the TAIL pointer points to within the last 2 cachelines
891          * of the buffer.
892          */
893         ring->effective_size = ring->size;
894         if (IS_I830(ring->dev))
895                 ring->effective_size -= 128;
896
897         return 0;
898
899 err_unmap:
900         drm_core_ioremapfree(&ring->map, dev);
901 err_unpin:
902         i915_gem_object_unpin(obj);
903 err_unref:
904         drm_gem_object_unreference(&obj->base);
905         ring->obj = NULL;
906 err_hws:
907         cleanup_status_page(ring);
908         return ret;
909 }
910
911 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
912 {
913         struct drm_i915_private *dev_priv;
914         int ret;
915
916         if (ring->obj == NULL)
917                 return;
918
919         /* Disable the ring buffer. The ring must be idle at this point */
920         dev_priv = ring->dev->dev_private;
921         ret = intel_wait_ring_buffer(ring, ring->size - 8);
922         I915_WRITE_CTL(ring, 0);
923
924         drm_core_ioremapfree(&ring->map, ring->dev);
925
926         i915_gem_object_unpin(ring->obj);
927         drm_gem_object_unreference(&ring->obj->base);
928         ring->obj = NULL;
929
930         if (ring->cleanup)
931                 ring->cleanup(ring);
932
933         cleanup_status_page(ring);
934 }
935
936 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
937 {
938         unsigned int *virt;
939         int rem = ring->size - ring->tail;
940
941         if (ring->space < rem) {
942                 int ret = intel_wait_ring_buffer(ring, rem);
943                 if (ret)
944                         return ret;
945         }
946
947         virt = (unsigned int *)(ring->virtual_start + ring->tail);
948         rem /= 8;
949         while (rem--) {
950                 *virt++ = MI_NOOP;
951                 *virt++ = MI_NOOP;
952         }
953
954         ring->tail = 0;
955         ring->space = ring_space(ring);
956
957         return 0;
958 }
959
960 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
961 {
962         struct drm_device *dev = ring->dev;
963         struct drm_i915_private *dev_priv = dev->dev_private;
964         unsigned long end;
965         u32 head;
966
967         /* If the reported head position has wrapped or hasn't advanced,
968          * fallback to the slow and accurate path.
969          */
970         head = intel_read_status_page(ring, 4);
971         if (head > ring->head) {
972                 ring->head = head;
973                 ring->space = ring_space(ring);
974                 if (ring->space >= n)
975                         return 0;
976         }
977
978         trace_i915_ring_wait_begin (dev);
979         end = jiffies + 3 * HZ;
980         do {
981                 ring->head = I915_READ_HEAD(ring);
982                 ring->space = ring_space(ring);
983                 if (ring->space >= n) {
984                         trace_i915_ring_wait_end(dev);
985                         return 0;
986                 }
987
988                 if (dev->primary->master) {
989                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
990                         if (master_priv->sarea_priv)
991                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
992                 }
993
994                 msleep(1);
995                 if (atomic_read(&dev_priv->mm.wedged))
996                         return -EAGAIN;
997         } while (!time_after(jiffies, end));
998         trace_i915_ring_wait_end (dev);
999         return -EBUSY;
1000 }
1001
1002 int intel_ring_begin(struct intel_ring_buffer *ring,
1003                      int num_dwords)
1004 {
1005         int n = 4*num_dwords;
1006         int ret;
1007
1008         if (unlikely(ring->tail + n > ring->effective_size)) {
1009                 ret = intel_wrap_ring_buffer(ring);
1010                 if (unlikely(ret))
1011                         return ret;
1012         }
1013
1014         if (unlikely(ring->space < n)) {
1015                 ret = intel_wait_ring_buffer(ring, n);
1016                 if (unlikely(ret))
1017                         return ret;
1018         }
1019
1020         ring->space -= n;
1021         return 0;
1022 }
1023
1024 void intel_ring_advance(struct intel_ring_buffer *ring)
1025 {
1026         ring->tail &= ring->size - 1;
1027         ring->write_tail(ring, ring->tail);
1028 }
1029
1030 static const struct intel_ring_buffer render_ring = {
1031         .name                   = "render ring",
1032         .id                     = RING_RENDER,
1033         .mmio_base              = RENDER_RING_BASE,
1034         .size                   = 32 * PAGE_SIZE,
1035         .init                   = init_render_ring,
1036         .write_tail             = ring_write_tail,
1037         .flush                  = render_ring_flush,
1038         .add_request            = render_ring_add_request,
1039         .get_seqno              = ring_get_seqno,
1040         .irq_get                = render_ring_get_irq,
1041         .irq_put                = render_ring_put_irq,
1042         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1043        .cleanup                 = render_ring_cleanup,
1044 };
1045
1046 /* ring buffer for bit-stream decoder */
1047
1048 static const struct intel_ring_buffer bsd_ring = {
1049         .name                   = "bsd ring",
1050         .id                     = RING_BSD,
1051         .mmio_base              = BSD_RING_BASE,
1052         .size                   = 32 * PAGE_SIZE,
1053         .init                   = init_ring_common,
1054         .write_tail             = ring_write_tail,
1055         .flush                  = bsd_ring_flush,
1056         .add_request            = ring_add_request,
1057         .get_seqno              = ring_get_seqno,
1058         .irq_get                = bsd_ring_get_irq,
1059         .irq_put                = bsd_ring_put_irq,
1060         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1061 };
1062
1063
1064 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1065                                      u32 value)
1066 {
1067        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1068
1069        /* Every tail move must follow the sequence below */
1070        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1071                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1072                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1073        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1074
1075        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1076                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1077                        50))
1078                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1079
1080        I915_WRITE_TAIL(ring, value);
1081        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1082                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1083                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1084 }
1085
1086 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1087                            u32 invalidate, u32 flush)
1088 {
1089         uint32_t cmd;
1090         int ret;
1091
1092         if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1093                 return 0;
1094
1095         ret = intel_ring_begin(ring, 4);
1096         if (ret)
1097                 return ret;
1098
1099         cmd = MI_FLUSH_DW;
1100         if (invalidate & I915_GEM_GPU_DOMAINS)
1101                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1102         intel_ring_emit(ring, cmd);
1103         intel_ring_emit(ring, 0);
1104         intel_ring_emit(ring, 0);
1105         intel_ring_emit(ring, MI_NOOP);
1106         intel_ring_advance(ring);
1107         return 0;
1108 }
1109
1110 static int
1111 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1112                               u32 offset, u32 len)
1113 {
1114        int ret;
1115
1116        ret = intel_ring_begin(ring, 2);
1117        if (ret)
1118                return ret;
1119
1120        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1121        /* bit0-7 is the length on GEN6+ */
1122        intel_ring_emit(ring, offset);
1123        intel_ring_advance(ring);
1124
1125        return 0;
1126 }
1127
1128 static bool
1129 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1130 {
1131         return gen6_ring_get_irq(ring,
1132                                  GT_USER_INTERRUPT,
1133                                  GEN6_RENDER_USER_INTERRUPT);
1134 }
1135
1136 static void
1137 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1138 {
1139         return gen6_ring_put_irq(ring,
1140                                  GT_USER_INTERRUPT,
1141                                  GEN6_RENDER_USER_INTERRUPT);
1142 }
1143
1144 static bool
1145 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1146 {
1147         return gen6_ring_get_irq(ring,
1148                                  GT_GEN6_BSD_USER_INTERRUPT,
1149                                  GEN6_BSD_USER_INTERRUPT);
1150 }
1151
1152 static void
1153 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1154 {
1155         return gen6_ring_put_irq(ring,
1156                                  GT_GEN6_BSD_USER_INTERRUPT,
1157                                  GEN6_BSD_USER_INTERRUPT);
1158 }
1159
1160 /* ring buffer for Video Codec for Gen6+ */
1161 static const struct intel_ring_buffer gen6_bsd_ring = {
1162         .name                   = "gen6 bsd ring",
1163         .id                     = RING_BSD,
1164         .mmio_base              = GEN6_BSD_RING_BASE,
1165         .size                   = 32 * PAGE_SIZE,
1166         .init                   = init_ring_common,
1167         .write_tail             = gen6_bsd_ring_write_tail,
1168         .flush                  = gen6_ring_flush,
1169         .add_request            = gen6_add_request,
1170         .get_seqno              = ring_get_seqno,
1171         .irq_get                = gen6_bsd_ring_get_irq,
1172         .irq_put                = gen6_bsd_ring_put_irq,
1173         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1174 };
1175
1176 /* Blitter support (SandyBridge+) */
1177
1178 static bool
1179 blt_ring_get_irq(struct intel_ring_buffer *ring)
1180 {
1181         return gen6_ring_get_irq(ring,
1182                                  GT_BLT_USER_INTERRUPT,
1183                                  GEN6_BLITTER_USER_INTERRUPT);
1184 }
1185
1186 static void
1187 blt_ring_put_irq(struct intel_ring_buffer *ring)
1188 {
1189         gen6_ring_put_irq(ring,
1190                           GT_BLT_USER_INTERRUPT,
1191                           GEN6_BLITTER_USER_INTERRUPT);
1192 }
1193
1194
1195 /* Workaround for some stepping of SNB,
1196  * each time when BLT engine ring tail moved,
1197  * the first command in the ring to be parsed
1198  * should be MI_BATCH_BUFFER_START
1199  */
1200 #define NEED_BLT_WORKAROUND(dev) \
1201         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1202
1203 static inline struct drm_i915_gem_object *
1204 to_blt_workaround(struct intel_ring_buffer *ring)
1205 {
1206         return ring->private;
1207 }
1208
1209 static int blt_ring_init(struct intel_ring_buffer *ring)
1210 {
1211         if (NEED_BLT_WORKAROUND(ring->dev)) {
1212                 struct drm_i915_gem_object *obj;
1213                 u32 *ptr;
1214                 int ret;
1215
1216                 obj = i915_gem_alloc_object(ring->dev, 4096);
1217                 if (obj == NULL)
1218                         return -ENOMEM;
1219
1220                 ret = i915_gem_object_pin(obj, 4096, true);
1221                 if (ret) {
1222                         drm_gem_object_unreference(&obj->base);
1223                         return ret;
1224                 }
1225
1226                 ptr = kmap(obj->pages[0]);
1227                 *ptr++ = MI_BATCH_BUFFER_END;
1228                 *ptr++ = MI_NOOP;
1229                 kunmap(obj->pages[0]);
1230
1231                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1232                 if (ret) {
1233                         i915_gem_object_unpin(obj);
1234                         drm_gem_object_unreference(&obj->base);
1235                         return ret;
1236                 }
1237
1238                 ring->private = obj;
1239         }
1240
1241         return init_ring_common(ring);
1242 }
1243
1244 static int blt_ring_begin(struct intel_ring_buffer *ring,
1245                           int num_dwords)
1246 {
1247         if (ring->private) {
1248                 int ret = intel_ring_begin(ring, num_dwords+2);
1249                 if (ret)
1250                         return ret;
1251
1252                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1253                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1254
1255                 return 0;
1256         } else
1257                 return intel_ring_begin(ring, 4);
1258 }
1259
1260 static int blt_ring_flush(struct intel_ring_buffer *ring,
1261                           u32 invalidate, u32 flush)
1262 {
1263         uint32_t cmd;
1264         int ret;
1265
1266         if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1267                 return 0;
1268
1269         ret = blt_ring_begin(ring, 4);
1270         if (ret)
1271                 return ret;
1272
1273         cmd = MI_FLUSH_DW;
1274         if (invalidate & I915_GEM_DOMAIN_RENDER)
1275                 cmd |= MI_INVALIDATE_TLB;
1276         intel_ring_emit(ring, cmd);
1277         intel_ring_emit(ring, 0);
1278         intel_ring_emit(ring, 0);
1279         intel_ring_emit(ring, MI_NOOP);
1280         intel_ring_advance(ring);
1281         return 0;
1282 }
1283
1284 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1285 {
1286         if (!ring->private)
1287                 return;
1288
1289         i915_gem_object_unpin(ring->private);
1290         drm_gem_object_unreference(ring->private);
1291         ring->private = NULL;
1292 }
1293
1294 static const struct intel_ring_buffer gen6_blt_ring = {
1295        .name                    = "blt ring",
1296        .id                      = RING_BLT,
1297        .mmio_base               = BLT_RING_BASE,
1298        .size                    = 32 * PAGE_SIZE,
1299        .init                    = blt_ring_init,
1300        .write_tail              = ring_write_tail,
1301        .flush                   = blt_ring_flush,
1302        .add_request             = gen6_add_request,
1303        .get_seqno               = ring_get_seqno,
1304        .irq_get                 = blt_ring_get_irq,
1305        .irq_put                 = blt_ring_put_irq,
1306        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1307        .cleanup                 = blt_ring_cleanup,
1308 };
1309
1310 int intel_init_render_ring_buffer(struct drm_device *dev)
1311 {
1312         drm_i915_private_t *dev_priv = dev->dev_private;
1313         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1314
1315         *ring = render_ring;
1316         if (INTEL_INFO(dev)->gen >= 6) {
1317                 ring->add_request = gen6_add_request;
1318                 ring->irq_get = gen6_render_ring_get_irq;
1319                 ring->irq_put = gen6_render_ring_put_irq;
1320         } else if (IS_GEN5(dev)) {
1321                 ring->add_request = pc_render_add_request;
1322                 ring->get_seqno = pc_render_get_seqno;
1323         }
1324
1325         if (!I915_NEED_GFX_HWS(dev)) {
1326                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1327                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1328         }
1329
1330         return intel_init_ring_buffer(dev, ring);
1331 }
1332
1333 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1334 {
1335         drm_i915_private_t *dev_priv = dev->dev_private;
1336         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1337
1338         *ring = render_ring;
1339         if (INTEL_INFO(dev)->gen >= 6) {
1340                 ring->add_request = gen6_add_request;
1341                 ring->irq_get = gen6_render_ring_get_irq;
1342                 ring->irq_put = gen6_render_ring_put_irq;
1343         } else if (IS_GEN5(dev)) {
1344                 ring->add_request = pc_render_add_request;
1345                 ring->get_seqno = pc_render_get_seqno;
1346         }
1347
1348         ring->dev = dev;
1349         INIT_LIST_HEAD(&ring->active_list);
1350         INIT_LIST_HEAD(&ring->request_list);
1351         INIT_LIST_HEAD(&ring->gpu_write_list);
1352
1353         ring->size = size;
1354         ring->effective_size = ring->size;
1355         if (IS_I830(ring->dev))
1356                 ring->effective_size -= 128;
1357
1358         ring->map.offset = start;
1359         ring->map.size = size;
1360         ring->map.type = 0;
1361         ring->map.flags = 0;
1362         ring->map.mtrr = 0;
1363
1364         drm_core_ioremap_wc(&ring->map, dev);
1365         if (ring->map.handle == NULL) {
1366                 DRM_ERROR("can not ioremap virtual address for"
1367                           " ring buffer\n");
1368                 return -ENOMEM;
1369         }
1370
1371         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1372         return 0;
1373 }
1374
1375 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1376 {
1377         drm_i915_private_t *dev_priv = dev->dev_private;
1378         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1379
1380         if (IS_GEN6(dev))
1381                 *ring = gen6_bsd_ring;
1382         else
1383                 *ring = bsd_ring;
1384
1385         return intel_init_ring_buffer(dev, ring);
1386 }
1387
1388 int intel_init_blt_ring_buffer(struct drm_device *dev)
1389 {
1390         drm_i915_private_t *dev_priv = dev->dev_private;
1391         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1392
1393         *ring = gen6_blt_ring;
1394
1395         return intel_init_ring_buffer(dev, ring);
1396 }