Merge branch 'drm-intel-fixes' into drm-intel-next
[linux-flexiantxendom0.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static inline int ring_space(struct intel_ring_buffer *ring)
38 {
39         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40         if (space < 0)
41                 space += ring->size;
42         return space;
43 }
44
45 static u32 i915_gem_get_seqno(struct drm_device *dev)
46 {
47         drm_i915_private_t *dev_priv = dev->dev_private;
48         u32 seqno;
49
50         seqno = dev_priv->next_seqno;
51
52         /* reserve 0 for non-seqno */
53         if (++dev_priv->next_seqno == 0)
54                 dev_priv->next_seqno = 1;
55
56         return seqno;
57 }
58
59 static int
60 render_ring_flush(struct intel_ring_buffer *ring,
61                   u32   invalidate_domains,
62                   u32   flush_domains)
63 {
64         struct drm_device *dev = ring->dev;
65         drm_i915_private_t *dev_priv = dev->dev_private;
66         u32 cmd;
67         int ret;
68
69 #if WATCH_EXEC
70         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71                   invalidate_domains, flush_domains);
72 #endif
73
74         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75                                      invalidate_domains, flush_domains);
76
77         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
78                 /*
79                  * read/write caches:
80                  *
81                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
83                  * also flushed at 2d versus 3d pipeline switches.
84                  *
85                  * read-only caches:
86                  *
87                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88                  * MI_READ_FLUSH is set, and is always flushed on 965.
89                  *
90                  * I915_GEM_DOMAIN_COMMAND may not exist?
91                  *
92                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93                  * invalidated when MI_EXE_FLUSH is set.
94                  *
95                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96                  * invalidated with every MI_FLUSH.
97                  *
98                  * TLBs:
99                  *
100                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103                  * are flushed at any MI_FLUSH.
104                  */
105
106                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107                 if ((invalidate_domains|flush_domains) &
108                     I915_GEM_DOMAIN_RENDER)
109                         cmd &= ~MI_NO_WRITE_FLUSH;
110                 if (INTEL_INFO(dev)->gen < 4) {
111                         /*
112                          * On the 965, the sampler cache always gets flushed
113                          * and this bit is reserved.
114                          */
115                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116                                 cmd |= MI_READ_FLUSH;
117                 }
118                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119                         cmd |= MI_EXE_FLUSH;
120
121                 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122                     (IS_G4X(dev) || IS_GEN5(dev)))
123                         cmd |= MI_INVALIDATE_ISP;
124
125 #if WATCH_EXEC
126                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
127 #endif
128                 ret = intel_ring_begin(ring, 2);
129                 if (ret)
130                         return ret;
131
132                 intel_ring_emit(ring, cmd);
133                 intel_ring_emit(ring, MI_NOOP);
134                 intel_ring_advance(ring);
135         }
136
137         return 0;
138 }
139
140 static void ring_write_tail(struct intel_ring_buffer *ring,
141                             u32 value)
142 {
143         drm_i915_private_t *dev_priv = ring->dev->dev_private;
144         I915_WRITE_TAIL(ring, value);
145 }
146
147 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
148 {
149         drm_i915_private_t *dev_priv = ring->dev->dev_private;
150         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
151                         RING_ACTHD(ring->mmio_base) : ACTHD;
152
153         return I915_READ(acthd_reg);
154 }
155
156 static int init_ring_common(struct intel_ring_buffer *ring)
157 {
158         drm_i915_private_t *dev_priv = ring->dev->dev_private;
159         struct drm_i915_gem_object *obj = ring->obj;
160         u32 head;
161
162         /* Stop the ring if it's running. */
163         I915_WRITE_CTL(ring, 0);
164         I915_WRITE_HEAD(ring, 0);
165         ring->write_tail(ring, 0);
166
167         /* Initialize the ring. */
168         I915_WRITE_START(ring, obj->gtt_offset);
169         head = I915_READ_HEAD(ring) & HEAD_ADDR;
170
171         /* G45 ring initialization fails to reset head to zero */
172         if (head != 0) {
173                 DRM_DEBUG_KMS("%s head not reset to zero "
174                               "ctl %08x head %08x tail %08x start %08x\n",
175                               ring->name,
176                               I915_READ_CTL(ring),
177                               I915_READ_HEAD(ring),
178                               I915_READ_TAIL(ring),
179                               I915_READ_START(ring));
180
181                 I915_WRITE_HEAD(ring, 0);
182
183                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
184                         DRM_ERROR("failed to set %s head to zero "
185                                   "ctl %08x head %08x tail %08x start %08x\n",
186                                   ring->name,
187                                   I915_READ_CTL(ring),
188                                   I915_READ_HEAD(ring),
189                                   I915_READ_TAIL(ring),
190                                   I915_READ_START(ring));
191                 }
192         }
193
194         I915_WRITE_CTL(ring,
195                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
196                         | RING_REPORT_64K | RING_VALID);
197
198         /* If the head is still not zero, the ring is dead */
199         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
200             I915_READ_START(ring) != obj->gtt_offset ||
201             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
202                 DRM_ERROR("%s initialization failed "
203                                 "ctl %08x head %08x tail %08x start %08x\n",
204                                 ring->name,
205                                 I915_READ_CTL(ring),
206                                 I915_READ_HEAD(ring),
207                                 I915_READ_TAIL(ring),
208                                 I915_READ_START(ring));
209                 return -EIO;
210         }
211
212         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
213                 i915_kernel_lost_context(ring->dev);
214         else {
215                 ring->head = I915_READ_HEAD(ring);
216                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
217                 ring->space = ring_space(ring);
218         }
219
220         return 0;
221 }
222
223 /*
224  * 965+ support PIPE_CONTROL commands, which provide finer grained control
225  * over cache flushing.
226  */
227 struct pipe_control {
228         struct drm_i915_gem_object *obj;
229         volatile u32 *cpu_page;
230         u32 gtt_offset;
231 };
232
233 static int
234 init_pipe_control(struct intel_ring_buffer *ring)
235 {
236         struct pipe_control *pc;
237         struct drm_i915_gem_object *obj;
238         int ret;
239
240         if (ring->private)
241                 return 0;
242
243         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
244         if (!pc)
245                 return -ENOMEM;
246
247         obj = i915_gem_alloc_object(ring->dev, 4096);
248         if (obj == NULL) {
249                 DRM_ERROR("Failed to allocate seqno page\n");
250                 ret = -ENOMEM;
251                 goto err;
252         }
253         obj->agp_type = AGP_USER_CACHED_MEMORY;
254
255         ret = i915_gem_object_pin(obj, 4096, true);
256         if (ret)
257                 goto err_unref;
258
259         pc->gtt_offset = obj->gtt_offset;
260         pc->cpu_page =  kmap(obj->pages[0]);
261         if (pc->cpu_page == NULL)
262                 goto err_unpin;
263
264         pc->obj = obj;
265         ring->private = pc;
266         return 0;
267
268 err_unpin:
269         i915_gem_object_unpin(obj);
270 err_unref:
271         drm_gem_object_unreference(&obj->base);
272 err:
273         kfree(pc);
274         return ret;
275 }
276
277 static void
278 cleanup_pipe_control(struct intel_ring_buffer *ring)
279 {
280         struct pipe_control *pc = ring->private;
281         struct drm_i915_gem_object *obj;
282
283         if (!ring->private)
284                 return;
285
286         obj = pc->obj;
287         kunmap(obj->pages[0]);
288         i915_gem_object_unpin(obj);
289         drm_gem_object_unreference(&obj->base);
290
291         kfree(pc);
292         ring->private = NULL;
293 }
294
295 static int init_render_ring(struct intel_ring_buffer *ring)
296 {
297         struct drm_device *dev = ring->dev;
298         struct drm_i915_private *dev_priv = dev->dev_private;
299         int ret = init_ring_common(ring);
300
301         if (INTEL_INFO(dev)->gen > 3) {
302                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
303                 if (IS_GEN6(dev))
304                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
305                 I915_WRITE(MI_MODE, mode);
306         }
307
308         if (INTEL_INFO(dev)->gen >= 6) {
309         } else if (IS_GEN5(dev)) {
310                 ret = init_pipe_control(ring);
311                 if (ret)
312                         return ret;
313         }
314
315         return ret;
316 }
317
318 static void render_ring_cleanup(struct intel_ring_buffer *ring)
319 {
320         if (!ring->private)
321                 return;
322
323         cleanup_pipe_control(ring);
324 }
325
326 static void
327 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
328 {
329         struct drm_device *dev = ring->dev;
330         struct drm_i915_private *dev_priv = dev->dev_private;
331         int id;
332
333         /*
334          * cs -> 1 = vcs, 0 = bcs
335          * vcs -> 1 = bcs, 0 = cs,
336          * bcs -> 1 = cs, 0 = vcs.
337          */
338         id = ring - dev_priv->ring;
339         id += 2 - i;
340         id %= 3;
341
342         intel_ring_emit(ring,
343                         MI_SEMAPHORE_MBOX |
344                         MI_SEMAPHORE_REGISTER |
345                         MI_SEMAPHORE_UPDATE);
346         intel_ring_emit(ring, seqno);
347         intel_ring_emit(ring,
348                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
349 }
350
351 static int
352 gen6_add_request(struct intel_ring_buffer *ring,
353                  u32 *result)
354 {
355         u32 seqno;
356         int ret;
357
358         ret = intel_ring_begin(ring, 10);
359         if (ret)
360                 return ret;
361
362         seqno = i915_gem_get_seqno(ring->dev);
363         update_semaphore(ring, 0, seqno);
364         update_semaphore(ring, 1, seqno);
365
366         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
367         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
368         intel_ring_emit(ring, seqno);
369         intel_ring_emit(ring, MI_USER_INTERRUPT);
370         intel_ring_advance(ring);
371
372         *result = seqno;
373         return 0;
374 }
375
376 int
377 intel_ring_sync(struct intel_ring_buffer *ring,
378                 struct intel_ring_buffer *to,
379                 u32 seqno)
380 {
381         int ret;
382
383         ret = intel_ring_begin(ring, 4);
384         if (ret)
385                 return ret;
386
387         intel_ring_emit(ring,
388                         MI_SEMAPHORE_MBOX |
389                         MI_SEMAPHORE_REGISTER |
390                         intel_ring_sync_index(ring, to) << 17 |
391                         MI_SEMAPHORE_COMPARE);
392         intel_ring_emit(ring, seqno);
393         intel_ring_emit(ring, 0);
394         intel_ring_emit(ring, MI_NOOP);
395         intel_ring_advance(ring);
396
397         return 0;
398 }
399
400 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
401 do {                                                                    \
402         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
403                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
404         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
405         intel_ring_emit(ring__, 0);                                                     \
406         intel_ring_emit(ring__, 0);                                                     \
407 } while (0)
408
409 static int
410 pc_render_add_request(struct intel_ring_buffer *ring,
411                       u32 *result)
412 {
413         struct drm_device *dev = ring->dev;
414         u32 seqno = i915_gem_get_seqno(dev);
415         struct pipe_control *pc = ring->private;
416         u32 scratch_addr = pc->gtt_offset + 128;
417         int ret;
418
419         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
420          * incoherent with writes to memory, i.e. completely fubar,
421          * so we need to use PIPE_NOTIFY instead.
422          *
423          * However, we also need to workaround the qword write
424          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
425          * memory before requesting an interrupt.
426          */
427         ret = intel_ring_begin(ring, 32);
428         if (ret)
429                 return ret;
430
431         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
433         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
434         intel_ring_emit(ring, seqno);
435         intel_ring_emit(ring, 0);
436         PIPE_CONTROL_FLUSH(ring, scratch_addr);
437         scratch_addr += 128; /* write to separate cachelines */
438         PIPE_CONTROL_FLUSH(ring, scratch_addr);
439         scratch_addr += 128;
440         PIPE_CONTROL_FLUSH(ring, scratch_addr);
441         scratch_addr += 128;
442         PIPE_CONTROL_FLUSH(ring, scratch_addr);
443         scratch_addr += 128;
444         PIPE_CONTROL_FLUSH(ring, scratch_addr);
445         scratch_addr += 128;
446         PIPE_CONTROL_FLUSH(ring, scratch_addr);
447         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
448                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
449                         PIPE_CONTROL_NOTIFY);
450         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
451         intel_ring_emit(ring, seqno);
452         intel_ring_emit(ring, 0);
453         intel_ring_advance(ring);
454
455         *result = seqno;
456         return 0;
457 }
458
459 static int
460 render_ring_add_request(struct intel_ring_buffer *ring,
461                         u32 *result)
462 {
463         struct drm_device *dev = ring->dev;
464         u32 seqno = i915_gem_get_seqno(dev);
465         int ret;
466
467         ret = intel_ring_begin(ring, 4);
468         if (ret)
469                 return ret;
470
471         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
472         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
473         intel_ring_emit(ring, seqno);
474         intel_ring_emit(ring, MI_USER_INTERRUPT);
475         intel_ring_advance(ring);
476
477         *result = seqno;
478         return 0;
479 }
480
481 static u32
482 ring_get_seqno(struct intel_ring_buffer *ring)
483 {
484         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
485 }
486
487 static u32
488 pc_render_get_seqno(struct intel_ring_buffer *ring)
489 {
490         struct pipe_control *pc = ring->private;
491         return pc->cpu_page[0];
492 }
493
494 static void
495 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
496 {
497         dev_priv->gt_irq_mask &= ~mask;
498         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
499         POSTING_READ(GTIMR);
500 }
501
502 static void
503 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
504 {
505         dev_priv->gt_irq_mask |= mask;
506         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
507         POSTING_READ(GTIMR);
508 }
509
510 static void
511 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
512 {
513         dev_priv->irq_mask &= ~mask;
514         I915_WRITE(IMR, dev_priv->irq_mask);
515         POSTING_READ(IMR);
516 }
517
518 static void
519 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
520 {
521         dev_priv->irq_mask |= mask;
522         I915_WRITE(IMR, dev_priv->irq_mask);
523         POSTING_READ(IMR);
524 }
525
526 static bool
527 render_ring_get_irq(struct intel_ring_buffer *ring)
528 {
529         struct drm_device *dev = ring->dev;
530         drm_i915_private_t *dev_priv = dev->dev_private;
531
532         if (!dev->irq_enabled)
533                 return false;
534
535         spin_lock(&ring->irq_lock);
536         if (ring->irq_refcount++ == 0) {
537                 if (HAS_PCH_SPLIT(dev))
538                         ironlake_enable_irq(dev_priv,
539                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
540                 else
541                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
542         }
543         spin_unlock(&ring->irq_lock);
544
545         return true;
546 }
547
548 static void
549 render_ring_put_irq(struct intel_ring_buffer *ring)
550 {
551         struct drm_device *dev = ring->dev;
552         drm_i915_private_t *dev_priv = dev->dev_private;
553
554         spin_lock(&ring->irq_lock);
555         if (--ring->irq_refcount == 0) {
556                 if (HAS_PCH_SPLIT(dev))
557                         ironlake_disable_irq(dev_priv,
558                                              GT_USER_INTERRUPT |
559                                              GT_PIPE_NOTIFY);
560                 else
561                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
562         }
563         spin_unlock(&ring->irq_lock);
564 }
565
566 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
567 {
568         drm_i915_private_t *dev_priv = ring->dev->dev_private;
569         u32 mmio = IS_GEN6(ring->dev) ?
570                 RING_HWS_PGA_GEN6(ring->mmio_base) :
571                 RING_HWS_PGA(ring->mmio_base);
572         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
573         POSTING_READ(mmio);
574 }
575
576 static int
577 bsd_ring_flush(struct intel_ring_buffer *ring,
578                u32     invalidate_domains,
579                u32     flush_domains)
580 {
581         int ret;
582
583         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
584                 return 0;
585
586         ret = intel_ring_begin(ring, 2);
587         if (ret)
588                 return ret;
589
590         intel_ring_emit(ring, MI_FLUSH);
591         intel_ring_emit(ring, MI_NOOP);
592         intel_ring_advance(ring);
593         return 0;
594 }
595
596 static int
597 ring_add_request(struct intel_ring_buffer *ring,
598                  u32 *result)
599 {
600         u32 seqno;
601         int ret;
602
603         ret = intel_ring_begin(ring, 4);
604         if (ret)
605                 return ret;
606
607         seqno = i915_gem_get_seqno(ring->dev);
608
609         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
610         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
611         intel_ring_emit(ring, seqno);
612         intel_ring_emit(ring, MI_USER_INTERRUPT);
613         intel_ring_advance(ring);
614
615         *result = seqno;
616         return 0;
617 }
618
619 static bool
620 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
621 {
622         struct drm_device *dev = ring->dev;
623         drm_i915_private_t *dev_priv = dev->dev_private;
624
625         if (!dev->irq_enabled)
626                return false;
627
628         spin_lock(&ring->irq_lock);
629         if (ring->irq_refcount++ == 0)
630                 ironlake_enable_irq(dev_priv, flag);
631         spin_unlock(&ring->irq_lock);
632
633         return true;
634 }
635
636 static void
637 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
638 {
639         struct drm_device *dev = ring->dev;
640         drm_i915_private_t *dev_priv = dev->dev_private;
641
642         spin_lock(&ring->irq_lock);
643         if (--ring->irq_refcount == 0)
644                 ironlake_disable_irq(dev_priv, flag);
645         spin_unlock(&ring->irq_lock);
646 }
647
648 static bool
649 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
650 {
651         struct drm_device *dev = ring->dev;
652         drm_i915_private_t *dev_priv = dev->dev_private;
653
654         if (!dev->irq_enabled)
655                return false;
656
657         spin_lock(&ring->irq_lock);
658         if (ring->irq_refcount++ == 0) {
659                 ring->irq_mask &= ~rflag;
660                 I915_WRITE_IMR(ring, ring->irq_mask);
661                 ironlake_enable_irq(dev_priv, gflag);
662         }
663         spin_unlock(&ring->irq_lock);
664
665         return true;
666 }
667
668 static void
669 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
670 {
671         struct drm_device *dev = ring->dev;
672         drm_i915_private_t *dev_priv = dev->dev_private;
673
674         spin_lock(&ring->irq_lock);
675         if (--ring->irq_refcount == 0) {
676                 ring->irq_mask |= rflag;
677                 I915_WRITE_IMR(ring, ring->irq_mask);
678                 ironlake_disable_irq(dev_priv, gflag);
679         }
680         spin_unlock(&ring->irq_lock);
681 }
682
683 static bool
684 bsd_ring_get_irq(struct intel_ring_buffer *ring)
685 {
686         return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
687 }
688 static void
689 bsd_ring_put_irq(struct intel_ring_buffer *ring)
690 {
691         ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
692 }
693
694 static int
695 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
696 {
697         int ret;
698
699         ret = intel_ring_begin(ring, 2);
700         if (ret)
701                 return ret;
702
703         intel_ring_emit(ring,
704                         MI_BATCH_BUFFER_START | (2 << 6) |
705                         MI_BATCH_NON_SECURE_I965);
706         intel_ring_emit(ring, offset);
707         intel_ring_advance(ring);
708
709         return 0;
710 }
711
712 static int
713 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
714                                 u32 offset, u32 len)
715 {
716         struct drm_device *dev = ring->dev;
717         drm_i915_private_t *dev_priv = dev->dev_private;
718         int ret;
719
720         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
721
722         if (IS_I830(dev) || IS_845G(dev)) {
723                 ret = intel_ring_begin(ring, 4);
724                 if (ret)
725                         return ret;
726
727                 intel_ring_emit(ring, MI_BATCH_BUFFER);
728                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
729                 intel_ring_emit(ring, offset + len - 8);
730                 intel_ring_emit(ring, 0);
731         } else {
732                 ret = intel_ring_begin(ring, 2);
733                 if (ret)
734                         return ret;
735
736                 if (INTEL_INFO(dev)->gen >= 4) {
737                         intel_ring_emit(ring,
738                                         MI_BATCH_BUFFER_START | (2 << 6) |
739                                         MI_BATCH_NON_SECURE_I965);
740                         intel_ring_emit(ring, offset);
741                 } else {
742                         intel_ring_emit(ring,
743                                         MI_BATCH_BUFFER_START | (2 << 6));
744                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
745                 }
746         }
747         intel_ring_advance(ring);
748
749         return 0;
750 }
751
752 static void cleanup_status_page(struct intel_ring_buffer *ring)
753 {
754         drm_i915_private_t *dev_priv = ring->dev->dev_private;
755         struct drm_i915_gem_object *obj;
756
757         obj = ring->status_page.obj;
758         if (obj == NULL)
759                 return;
760
761         kunmap(obj->pages[0]);
762         i915_gem_object_unpin(obj);
763         drm_gem_object_unreference(&obj->base);
764         ring->status_page.obj = NULL;
765
766         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
767 }
768
769 static int init_status_page(struct intel_ring_buffer *ring)
770 {
771         struct drm_device *dev = ring->dev;
772         drm_i915_private_t *dev_priv = dev->dev_private;
773         struct drm_i915_gem_object *obj;
774         int ret;
775
776         obj = i915_gem_alloc_object(dev, 4096);
777         if (obj == NULL) {
778                 DRM_ERROR("Failed to allocate status page\n");
779                 ret = -ENOMEM;
780                 goto err;
781         }
782         obj->agp_type = AGP_USER_CACHED_MEMORY;
783
784         ret = i915_gem_object_pin(obj, 4096, true);
785         if (ret != 0) {
786                 goto err_unref;
787         }
788
789         ring->status_page.gfx_addr = obj->gtt_offset;
790         ring->status_page.page_addr = kmap(obj->pages[0]);
791         if (ring->status_page.page_addr == NULL) {
792                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
793                 goto err_unpin;
794         }
795         ring->status_page.obj = obj;
796         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
797
798         intel_ring_setup_status_page(ring);
799         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
800                         ring->name, ring->status_page.gfx_addr);
801
802         return 0;
803
804 err_unpin:
805         i915_gem_object_unpin(obj);
806 err_unref:
807         drm_gem_object_unreference(&obj->base);
808 err:
809         return ret;
810 }
811
812 int intel_init_ring_buffer(struct drm_device *dev,
813                            struct intel_ring_buffer *ring)
814 {
815         struct drm_i915_gem_object *obj;
816         int ret;
817
818         ring->dev = dev;
819         INIT_LIST_HEAD(&ring->active_list);
820         INIT_LIST_HEAD(&ring->request_list);
821         INIT_LIST_HEAD(&ring->gpu_write_list);
822
823         spin_lock_init(&ring->irq_lock);
824         ring->irq_mask = ~0;
825
826         if (I915_NEED_GFX_HWS(dev)) {
827                 ret = init_status_page(ring);
828                 if (ret)
829                         return ret;
830         }
831
832         obj = i915_gem_alloc_object(dev, ring->size);
833         if (obj == NULL) {
834                 DRM_ERROR("Failed to allocate ringbuffer\n");
835                 ret = -ENOMEM;
836                 goto err_hws;
837         }
838
839         ring->obj = obj;
840
841         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
842         if (ret)
843                 goto err_unref;
844
845         ring->map.size = ring->size;
846         ring->map.offset = dev->agp->base + obj->gtt_offset;
847         ring->map.type = 0;
848         ring->map.flags = 0;
849         ring->map.mtrr = 0;
850
851         drm_core_ioremap_wc(&ring->map, dev);
852         if (ring->map.handle == NULL) {
853                 DRM_ERROR("Failed to map ringbuffer.\n");
854                 ret = -EINVAL;
855                 goto err_unpin;
856         }
857
858         ring->virtual_start = ring->map.handle;
859         ret = ring->init(ring);
860         if (ret)
861                 goto err_unmap;
862
863         /* Workaround an erratum on the i830 which causes a hang if
864          * the TAIL pointer points to within the last 2 cachelines
865          * of the buffer.
866          */
867         ring->effective_size = ring->size;
868         if (IS_I830(ring->dev))
869                 ring->effective_size -= 128;
870
871         return 0;
872
873 err_unmap:
874         drm_core_ioremapfree(&ring->map, dev);
875 err_unpin:
876         i915_gem_object_unpin(obj);
877 err_unref:
878         drm_gem_object_unreference(&obj->base);
879         ring->obj = NULL;
880 err_hws:
881         cleanup_status_page(ring);
882         return ret;
883 }
884
885 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
886 {
887         struct drm_i915_private *dev_priv;
888         int ret;
889
890         if (ring->obj == NULL)
891                 return;
892
893         /* Disable the ring buffer. The ring must be idle at this point */
894         dev_priv = ring->dev->dev_private;
895         ret = intel_wait_ring_buffer(ring, ring->size - 8);
896         I915_WRITE_CTL(ring, 0);
897
898         drm_core_ioremapfree(&ring->map, ring->dev);
899
900         i915_gem_object_unpin(ring->obj);
901         drm_gem_object_unreference(&ring->obj->base);
902         ring->obj = NULL;
903
904         if (ring->cleanup)
905                 ring->cleanup(ring);
906
907         cleanup_status_page(ring);
908 }
909
910 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
911 {
912         unsigned int *virt;
913         int rem = ring->size - ring->tail;
914
915         if (ring->space < rem) {
916                 int ret = intel_wait_ring_buffer(ring, rem);
917                 if (ret)
918                         return ret;
919         }
920
921         virt = (unsigned int *)(ring->virtual_start + ring->tail);
922         rem /= 8;
923         while (rem--) {
924                 *virt++ = MI_NOOP;
925                 *virt++ = MI_NOOP;
926         }
927
928         ring->tail = 0;
929         ring->space = ring_space(ring);
930
931         return 0;
932 }
933
934 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
935 {
936         struct drm_device *dev = ring->dev;
937         struct drm_i915_private *dev_priv = dev->dev_private;
938         unsigned long end;
939         u32 head;
940
941         /* If the reported head position has wrapped or hasn't advanced,
942          * fallback to the slow and accurate path.
943          */
944         head = intel_read_status_page(ring, 4);
945         if (head > ring->head) {
946                 ring->head = head;
947                 ring->space = ring_space(ring);
948                 if (ring->space >= n)
949                         return 0;
950         }
951
952         trace_i915_ring_wait_begin (dev);
953         end = jiffies + 3 * HZ;
954         do {
955                 ring->head = I915_READ_HEAD(ring);
956                 ring->space = ring_space(ring);
957                 if (ring->space >= n) {
958                         trace_i915_ring_wait_end(dev);
959                         return 0;
960                 }
961
962                 if (dev->primary->master) {
963                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
964                         if (master_priv->sarea_priv)
965                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
966                 }
967
968                 msleep(1);
969                 if (atomic_read(&dev_priv->mm.wedged))
970                         return -EAGAIN;
971         } while (!time_after(jiffies, end));
972         trace_i915_ring_wait_end (dev);
973         return -EBUSY;
974 }
975
976 int intel_ring_begin(struct intel_ring_buffer *ring,
977                      int num_dwords)
978 {
979         int n = 4*num_dwords;
980         int ret;
981
982         if (unlikely(ring->tail + n > ring->effective_size)) {
983                 ret = intel_wrap_ring_buffer(ring);
984                 if (unlikely(ret))
985                         return ret;
986         }
987
988         if (unlikely(ring->space < n)) {
989                 ret = intel_wait_ring_buffer(ring, n);
990                 if (unlikely(ret))
991                         return ret;
992         }
993
994         ring->space -= n;
995         return 0;
996 }
997
998 void intel_ring_advance(struct intel_ring_buffer *ring)
999 {
1000         ring->tail &= ring->size - 1;
1001         ring->write_tail(ring, ring->tail);
1002 }
1003
1004 static const struct intel_ring_buffer render_ring = {
1005         .name                   = "render ring",
1006         .id                     = RING_RENDER,
1007         .mmio_base              = RENDER_RING_BASE,
1008         .size                   = 32 * PAGE_SIZE,
1009         .init                   = init_render_ring,
1010         .write_tail             = ring_write_tail,
1011         .flush                  = render_ring_flush,
1012         .add_request            = render_ring_add_request,
1013         .get_seqno              = ring_get_seqno,
1014         .irq_get                = render_ring_get_irq,
1015         .irq_put                = render_ring_put_irq,
1016         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1017        .cleanup                 = render_ring_cleanup,
1018 };
1019
1020 /* ring buffer for bit-stream decoder */
1021
1022 static const struct intel_ring_buffer bsd_ring = {
1023         .name                   = "bsd ring",
1024         .id                     = RING_BSD,
1025         .mmio_base              = BSD_RING_BASE,
1026         .size                   = 32 * PAGE_SIZE,
1027         .init                   = init_ring_common,
1028         .write_tail             = ring_write_tail,
1029         .flush                  = bsd_ring_flush,
1030         .add_request            = ring_add_request,
1031         .get_seqno              = ring_get_seqno,
1032         .irq_get                = bsd_ring_get_irq,
1033         .irq_put                = bsd_ring_put_irq,
1034         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1035 };
1036
1037
1038 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1039                                      u32 value)
1040 {
1041        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1042
1043        /* Every tail move must follow the sequence below */
1044        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1045                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1046                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1047        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1048
1049        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1050                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1051                        50))
1052                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1053
1054        I915_WRITE_TAIL(ring, value);
1055        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1056                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1057                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1058 }
1059
1060 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1061                            u32 invalidate_domains,
1062                            u32 flush_domains)
1063 {
1064         int ret;
1065
1066         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1067                 return 0;
1068
1069         ret = intel_ring_begin(ring, 4);
1070         if (ret)
1071                 return ret;
1072
1073         intel_ring_emit(ring, MI_FLUSH_DW);
1074         intel_ring_emit(ring, 0);
1075         intel_ring_emit(ring, 0);
1076         intel_ring_emit(ring, 0);
1077         intel_ring_advance(ring);
1078         return 0;
1079 }
1080
1081 static int
1082 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1083                               u32 offset, u32 len)
1084 {
1085        int ret;
1086
1087        ret = intel_ring_begin(ring, 2);
1088        if (ret)
1089                return ret;
1090
1091        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1092        /* bit0-7 is the length on GEN6+ */
1093        intel_ring_emit(ring, offset);
1094        intel_ring_advance(ring);
1095
1096        return 0;
1097 }
1098
1099 static bool
1100 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1101 {
1102         return gen6_ring_get_irq(ring,
1103                                  GT_USER_INTERRUPT,
1104                                  GEN6_RENDER_USER_INTERRUPT);
1105 }
1106
1107 static void
1108 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1109 {
1110         return gen6_ring_put_irq(ring,
1111                                  GT_USER_INTERRUPT,
1112                                  GEN6_RENDER_USER_INTERRUPT);
1113 }
1114
1115 static bool
1116 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1117 {
1118         return gen6_ring_get_irq(ring,
1119                                  GT_GEN6_BSD_USER_INTERRUPT,
1120                                  GEN6_BSD_USER_INTERRUPT);
1121 }
1122
1123 static void
1124 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1125 {
1126         return gen6_ring_put_irq(ring,
1127                                  GT_GEN6_BSD_USER_INTERRUPT,
1128                                  GEN6_BSD_USER_INTERRUPT);
1129 }
1130
1131 /* ring buffer for Video Codec for Gen6+ */
1132 static const struct intel_ring_buffer gen6_bsd_ring = {
1133         .name                   = "gen6 bsd ring",
1134         .id                     = RING_BSD,
1135         .mmio_base              = GEN6_BSD_RING_BASE,
1136         .size                   = 32 * PAGE_SIZE,
1137         .init                   = init_ring_common,
1138         .write_tail             = gen6_bsd_ring_write_tail,
1139         .flush                  = gen6_ring_flush,
1140         .add_request            = gen6_add_request,
1141         .get_seqno              = ring_get_seqno,
1142         .irq_get                = gen6_bsd_ring_get_irq,
1143         .irq_put                = gen6_bsd_ring_put_irq,
1144         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1145 };
1146
1147 /* Blitter support (SandyBridge+) */
1148
1149 static bool
1150 blt_ring_get_irq(struct intel_ring_buffer *ring)
1151 {
1152         return gen6_ring_get_irq(ring,
1153                                  GT_BLT_USER_INTERRUPT,
1154                                  GEN6_BLITTER_USER_INTERRUPT);
1155 }
1156
1157 static void
1158 blt_ring_put_irq(struct intel_ring_buffer *ring)
1159 {
1160         gen6_ring_put_irq(ring,
1161                           GT_BLT_USER_INTERRUPT,
1162                           GEN6_BLITTER_USER_INTERRUPT);
1163 }
1164
1165
1166 /* Workaround for some stepping of SNB,
1167  * each time when BLT engine ring tail moved,
1168  * the first command in the ring to be parsed
1169  * should be MI_BATCH_BUFFER_START
1170  */
1171 #define NEED_BLT_WORKAROUND(dev) \
1172         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1173
1174 static inline struct drm_i915_gem_object *
1175 to_blt_workaround(struct intel_ring_buffer *ring)
1176 {
1177         return ring->private;
1178 }
1179
1180 static int blt_ring_init(struct intel_ring_buffer *ring)
1181 {
1182         if (NEED_BLT_WORKAROUND(ring->dev)) {
1183                 struct drm_i915_gem_object *obj;
1184                 u32 *ptr;
1185                 int ret;
1186
1187                 obj = i915_gem_alloc_object(ring->dev, 4096);
1188                 if (obj == NULL)
1189                         return -ENOMEM;
1190
1191                 ret = i915_gem_object_pin(obj, 4096, true);
1192                 if (ret) {
1193                         drm_gem_object_unreference(&obj->base);
1194                         return ret;
1195                 }
1196
1197                 ptr = kmap(obj->pages[0]);
1198                 *ptr++ = MI_BATCH_BUFFER_END;
1199                 *ptr++ = MI_NOOP;
1200                 kunmap(obj->pages[0]);
1201
1202                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1203                 if (ret) {
1204                         i915_gem_object_unpin(obj);
1205                         drm_gem_object_unreference(&obj->base);
1206                         return ret;
1207                 }
1208
1209                 ring->private = obj;
1210         }
1211
1212         return init_ring_common(ring);
1213 }
1214
1215 static int blt_ring_begin(struct intel_ring_buffer *ring,
1216                           int num_dwords)
1217 {
1218         if (ring->private) {
1219                 int ret = intel_ring_begin(ring, num_dwords+2);
1220                 if (ret)
1221                         return ret;
1222
1223                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1224                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1225
1226                 return 0;
1227         } else
1228                 return intel_ring_begin(ring, 4);
1229 }
1230
1231 static int blt_ring_flush(struct intel_ring_buffer *ring,
1232                            u32 invalidate_domains,
1233                            u32 flush_domains)
1234 {
1235         int ret;
1236
1237         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1238                 return 0;
1239
1240         ret = blt_ring_begin(ring, 4);
1241         if (ret)
1242                 return ret;
1243
1244         intel_ring_emit(ring, MI_FLUSH_DW);
1245         intel_ring_emit(ring, 0);
1246         intel_ring_emit(ring, 0);
1247         intel_ring_emit(ring, 0);
1248         intel_ring_advance(ring);
1249         return 0;
1250 }
1251
1252 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1253 {
1254         if (!ring->private)
1255                 return;
1256
1257         i915_gem_object_unpin(ring->private);
1258         drm_gem_object_unreference(ring->private);
1259         ring->private = NULL;
1260 }
1261
1262 static const struct intel_ring_buffer gen6_blt_ring = {
1263        .name                    = "blt ring",
1264        .id                      = RING_BLT,
1265        .mmio_base               = BLT_RING_BASE,
1266        .size                    = 32 * PAGE_SIZE,
1267        .init                    = blt_ring_init,
1268        .write_tail              = ring_write_tail,
1269        .flush                   = blt_ring_flush,
1270        .add_request             = gen6_add_request,
1271        .get_seqno               = ring_get_seqno,
1272        .irq_get                 = blt_ring_get_irq,
1273        .irq_put                 = blt_ring_put_irq,
1274        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1275        .cleanup                 = blt_ring_cleanup,
1276 };
1277
1278 int intel_init_render_ring_buffer(struct drm_device *dev)
1279 {
1280         drm_i915_private_t *dev_priv = dev->dev_private;
1281         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1282
1283         *ring = render_ring;
1284         if (INTEL_INFO(dev)->gen >= 6) {
1285                 ring->add_request = gen6_add_request;
1286                 ring->irq_get = gen6_render_ring_get_irq;
1287                 ring->irq_put = gen6_render_ring_put_irq;
1288         } else if (IS_GEN5(dev)) {
1289                 ring->add_request = pc_render_add_request;
1290                 ring->get_seqno = pc_render_get_seqno;
1291         }
1292
1293         if (!I915_NEED_GFX_HWS(dev)) {
1294                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1295                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1296         }
1297
1298         return intel_init_ring_buffer(dev, ring);
1299 }
1300
1301 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1302 {
1303         drm_i915_private_t *dev_priv = dev->dev_private;
1304         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1305
1306         *ring = render_ring;
1307         if (INTEL_INFO(dev)->gen >= 6) {
1308                 ring->add_request = gen6_add_request;
1309                 ring->irq_get = gen6_render_ring_get_irq;
1310                 ring->irq_put = gen6_render_ring_put_irq;
1311         } else if (IS_GEN5(dev)) {
1312                 ring->add_request = pc_render_add_request;
1313                 ring->get_seqno = pc_render_get_seqno;
1314         }
1315
1316         ring->dev = dev;
1317         INIT_LIST_HEAD(&ring->active_list);
1318         INIT_LIST_HEAD(&ring->request_list);
1319         INIT_LIST_HEAD(&ring->gpu_write_list);
1320
1321         ring->size = size;
1322         ring->effective_size = ring->size;
1323         if (IS_I830(ring->dev))
1324                 ring->effective_size -= 128;
1325
1326         ring->map.offset = start;
1327         ring->map.size = size;
1328         ring->map.type = 0;
1329         ring->map.flags = 0;
1330         ring->map.mtrr = 0;
1331
1332         drm_core_ioremap_wc(&ring->map, dev);
1333         if (ring->map.handle == NULL) {
1334                 DRM_ERROR("can not ioremap virtual address for"
1335                           " ring buffer\n");
1336                 return -ENOMEM;
1337         }
1338
1339         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1340         return 0;
1341 }
1342
1343 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1344 {
1345         drm_i915_private_t *dev_priv = dev->dev_private;
1346         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1347
1348         if (IS_GEN6(dev))
1349                 *ring = gen6_bsd_ring;
1350         else
1351                 *ring = bsd_ring;
1352
1353         return intel_init_ring_buffer(dev, ring);
1354 }
1355
1356 int intel_init_blt_ring_buffer(struct drm_device *dev)
1357 {
1358         drm_i915_private_t *dev_priv = dev->dev_private;
1359         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1360
1361         *ring = gen6_blt_ring;
1362
1363         return intel_init_ring_buffer(dev, ring);
1364 }