drm/i915: Initialise ring vfuncs for old DRI paths
[linux-flexiantxendom0-natty.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static u32 i915_gem_get_seqno(struct drm_device *dev)
38 {
39         drm_i915_private_t *dev_priv = dev->dev_private;
40         u32 seqno;
41
42         seqno = dev_priv->next_seqno;
43
44         /* reserve 0 for non-seqno */
45         if (++dev_priv->next_seqno == 0)
46                 dev_priv->next_seqno = 1;
47
48         return seqno;
49 }
50
51 static int
52 render_ring_flush(struct intel_ring_buffer *ring,
53                   u32   invalidate_domains,
54                   u32   flush_domains)
55 {
56         struct drm_device *dev = ring->dev;
57         drm_i915_private_t *dev_priv = dev->dev_private;
58         u32 cmd;
59         int ret;
60
61 #if WATCH_EXEC
62         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
63                   invalidate_domains, flush_domains);
64 #endif
65
66         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
67                                      invalidate_domains, flush_domains);
68
69         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
70                 /*
71                  * read/write caches:
72                  *
73                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
75                  * also flushed at 2d versus 3d pipeline switches.
76                  *
77                  * read-only caches:
78                  *
79                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80                  * MI_READ_FLUSH is set, and is always flushed on 965.
81                  *
82                  * I915_GEM_DOMAIN_COMMAND may not exist?
83                  *
84                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85                  * invalidated when MI_EXE_FLUSH is set.
86                  *
87                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88                  * invalidated with every MI_FLUSH.
89                  *
90                  * TLBs:
91                  *
92                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95                  * are flushed at any MI_FLUSH.
96                  */
97
98                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99                 if ((invalidate_domains|flush_domains) &
100                     I915_GEM_DOMAIN_RENDER)
101                         cmd &= ~MI_NO_WRITE_FLUSH;
102                 if (INTEL_INFO(dev)->gen < 4) {
103                         /*
104                          * On the 965, the sampler cache always gets flushed
105                          * and this bit is reserved.
106                          */
107                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108                                 cmd |= MI_READ_FLUSH;
109                 }
110                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111                         cmd |= MI_EXE_FLUSH;
112
113                 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114                     (IS_G4X(dev) || IS_GEN5(dev)))
115                         cmd |= MI_INVALIDATE_ISP;
116
117 #if WATCH_EXEC
118                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
119 #endif
120                 ret = intel_ring_begin(ring, 2);
121                 if (ret)
122                         return ret;
123
124                 intel_ring_emit(ring, cmd);
125                 intel_ring_emit(ring, MI_NOOP);
126                 intel_ring_advance(ring);
127         }
128
129         return 0;
130 }
131
132 static void ring_write_tail(struct intel_ring_buffer *ring,
133                             u32 value)
134 {
135         drm_i915_private_t *dev_priv = ring->dev->dev_private;
136         I915_WRITE_TAIL(ring, value);
137 }
138
139 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
140 {
141         drm_i915_private_t *dev_priv = ring->dev->dev_private;
142         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
143                         RING_ACTHD(ring->mmio_base) : ACTHD;
144
145         return I915_READ(acthd_reg);
146 }
147
148 static int init_ring_common(struct intel_ring_buffer *ring)
149 {
150         drm_i915_private_t *dev_priv = ring->dev->dev_private;
151         struct drm_i915_gem_object *obj = ring->obj;
152         u32 head;
153
154         /* Stop the ring if it's running. */
155         I915_WRITE_CTL(ring, 0);
156         I915_WRITE_HEAD(ring, 0);
157         ring->write_tail(ring, 0);
158
159         /* Initialize the ring. */
160         I915_WRITE_START(ring, obj->gtt_offset);
161         head = I915_READ_HEAD(ring) & HEAD_ADDR;
162
163         /* G45 ring initialization fails to reset head to zero */
164         if (head != 0) {
165                 DRM_DEBUG_KMS("%s head not reset to zero "
166                               "ctl %08x head %08x tail %08x start %08x\n",
167                               ring->name,
168                               I915_READ_CTL(ring),
169                               I915_READ_HEAD(ring),
170                               I915_READ_TAIL(ring),
171                               I915_READ_START(ring));
172
173                 I915_WRITE_HEAD(ring, 0);
174
175                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
176                         DRM_ERROR("failed to set %s head to zero "
177                                   "ctl %08x head %08x tail %08x start %08x\n",
178                                   ring->name,
179                                   I915_READ_CTL(ring),
180                                   I915_READ_HEAD(ring),
181                                   I915_READ_TAIL(ring),
182                                   I915_READ_START(ring));
183                 }
184         }
185
186         I915_WRITE_CTL(ring,
187                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
188                         | RING_REPORT_64K | RING_VALID);
189
190         /* If the head is still not zero, the ring is dead */
191         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
192             I915_READ_START(ring) != obj->gtt_offset ||
193             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
194                 DRM_ERROR("%s initialization failed "
195                                 "ctl %08x head %08x tail %08x start %08x\n",
196                                 ring->name,
197                                 I915_READ_CTL(ring),
198                                 I915_READ_HEAD(ring),
199                                 I915_READ_TAIL(ring),
200                                 I915_READ_START(ring));
201                 return -EIO;
202         }
203
204         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205                 i915_kernel_lost_context(ring->dev);
206         else {
207                 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
208                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209                 ring->space = ring->head - (ring->tail + 8);
210                 if (ring->space < 0)
211                         ring->space += ring->size;
212         }
213
214         return 0;
215 }
216
217 /*
218  * 965+ support PIPE_CONTROL commands, which provide finer grained control
219  * over cache flushing.
220  */
221 struct pipe_control {
222         struct drm_i915_gem_object *obj;
223         volatile u32 *cpu_page;
224         u32 gtt_offset;
225 };
226
227 static int
228 init_pipe_control(struct intel_ring_buffer *ring)
229 {
230         struct pipe_control *pc;
231         struct drm_i915_gem_object *obj;
232         int ret;
233
234         if (ring->private)
235                 return 0;
236
237         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
238         if (!pc)
239                 return -ENOMEM;
240
241         obj = i915_gem_alloc_object(ring->dev, 4096);
242         if (obj == NULL) {
243                 DRM_ERROR("Failed to allocate seqno page\n");
244                 ret = -ENOMEM;
245                 goto err;
246         }
247         obj->agp_type = AGP_USER_CACHED_MEMORY;
248
249         ret = i915_gem_object_pin(obj, 4096, true);
250         if (ret)
251                 goto err_unref;
252
253         pc->gtt_offset = obj->gtt_offset;
254         pc->cpu_page =  kmap(obj->pages[0]);
255         if (pc->cpu_page == NULL)
256                 goto err_unpin;
257
258         pc->obj = obj;
259         ring->private = pc;
260         return 0;
261
262 err_unpin:
263         i915_gem_object_unpin(obj);
264 err_unref:
265         drm_gem_object_unreference(&obj->base);
266 err:
267         kfree(pc);
268         return ret;
269 }
270
271 static void
272 cleanup_pipe_control(struct intel_ring_buffer *ring)
273 {
274         struct pipe_control *pc = ring->private;
275         struct drm_i915_gem_object *obj;
276
277         if (!ring->private)
278                 return;
279
280         obj = pc->obj;
281         kunmap(obj->pages[0]);
282         i915_gem_object_unpin(obj);
283         drm_gem_object_unreference(&obj->base);
284
285         kfree(pc);
286         ring->private = NULL;
287 }
288
289 static int init_render_ring(struct intel_ring_buffer *ring)
290 {
291         struct drm_device *dev = ring->dev;
292         struct drm_i915_private *dev_priv = dev->dev_private;
293         int ret = init_ring_common(ring);
294
295         if (INTEL_INFO(dev)->gen > 3) {
296                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
297                 if (IS_GEN6(dev))
298                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
299                 I915_WRITE(MI_MODE, mode);
300         }
301
302         if (INTEL_INFO(dev)->gen >= 6) {
303         } else if (IS_GEN5(dev)) {
304                 ret = init_pipe_control(ring);
305                 if (ret)
306                         return ret;
307         }
308
309         return ret;
310 }
311
312 static void render_ring_cleanup(struct intel_ring_buffer *ring)
313 {
314         if (!ring->private)
315                 return;
316
317         cleanup_pipe_control(ring);
318 }
319
320 static void
321 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
322 {
323         struct drm_device *dev = ring->dev;
324         struct drm_i915_private *dev_priv = dev->dev_private;
325         int id;
326
327         /*
328          * cs -> 1 = vcs, 0 = bcs
329          * vcs -> 1 = bcs, 0 = cs,
330          * bcs -> 1 = cs, 0 = vcs.
331          */
332         id = ring - dev_priv->ring;
333         id += 2 - i;
334         id %= 3;
335
336         intel_ring_emit(ring,
337                         MI_SEMAPHORE_MBOX |
338                         MI_SEMAPHORE_REGISTER |
339                         MI_SEMAPHORE_UPDATE);
340         intel_ring_emit(ring, seqno);
341         intel_ring_emit(ring,
342                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
343 }
344
345 static int
346 gen6_add_request(struct intel_ring_buffer *ring,
347                  u32 *result)
348 {
349         u32 seqno;
350         int ret;
351
352         ret = intel_ring_begin(ring, 10);
353         if (ret)
354                 return ret;
355
356         seqno = i915_gem_get_seqno(ring->dev);
357         update_semaphore(ring, 0, seqno);
358         update_semaphore(ring, 1, seqno);
359
360         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
361         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
362         intel_ring_emit(ring, seqno);
363         intel_ring_emit(ring, MI_USER_INTERRUPT);
364         intel_ring_advance(ring);
365
366         *result = seqno;
367         return 0;
368 }
369
370 int
371 intel_ring_sync(struct intel_ring_buffer *ring,
372                 struct intel_ring_buffer *to,
373                 u32 seqno)
374 {
375         int ret;
376
377         ret = intel_ring_begin(ring, 4);
378         if (ret)
379                 return ret;
380
381         intel_ring_emit(ring,
382                         MI_SEMAPHORE_MBOX |
383                         MI_SEMAPHORE_REGISTER |
384                         intel_ring_sync_index(ring, to) << 17 |
385                         MI_SEMAPHORE_COMPARE);
386         intel_ring_emit(ring, seqno);
387         intel_ring_emit(ring, 0);
388         intel_ring_emit(ring, MI_NOOP);
389         intel_ring_advance(ring);
390
391         return 0;
392 }
393
394 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
395 do {                                                                    \
396         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
397                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
398         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
399         intel_ring_emit(ring__, 0);                                                     \
400         intel_ring_emit(ring__, 0);                                                     \
401 } while (0)
402
403 static int
404 pc_render_add_request(struct intel_ring_buffer *ring,
405                       u32 *result)
406 {
407         struct drm_device *dev = ring->dev;
408         u32 seqno = i915_gem_get_seqno(dev);
409         struct pipe_control *pc = ring->private;
410         u32 scratch_addr = pc->gtt_offset + 128;
411         int ret;
412
413         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
414          * incoherent with writes to memory, i.e. completely fubar,
415          * so we need to use PIPE_NOTIFY instead.
416          *
417          * However, we also need to workaround the qword write
418          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
419          * memory before requesting an interrupt.
420          */
421         ret = intel_ring_begin(ring, 32);
422         if (ret)
423                 return ret;
424
425         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
426                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
427         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
428         intel_ring_emit(ring, seqno);
429         intel_ring_emit(ring, 0);
430         PIPE_CONTROL_FLUSH(ring, scratch_addr);
431         scratch_addr += 128; /* write to separate cachelines */
432         PIPE_CONTROL_FLUSH(ring, scratch_addr);
433         scratch_addr += 128;
434         PIPE_CONTROL_FLUSH(ring, scratch_addr);
435         scratch_addr += 128;
436         PIPE_CONTROL_FLUSH(ring, scratch_addr);
437         scratch_addr += 128;
438         PIPE_CONTROL_FLUSH(ring, scratch_addr);
439         scratch_addr += 128;
440         PIPE_CONTROL_FLUSH(ring, scratch_addr);
441         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
442                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
443                         PIPE_CONTROL_NOTIFY);
444         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
445         intel_ring_emit(ring, seqno);
446         intel_ring_emit(ring, 0);
447         intel_ring_advance(ring);
448
449         *result = seqno;
450         return 0;
451 }
452
453 static int
454 render_ring_add_request(struct intel_ring_buffer *ring,
455                         u32 *result)
456 {
457         struct drm_device *dev = ring->dev;
458         u32 seqno = i915_gem_get_seqno(dev);
459         int ret;
460
461         ret = intel_ring_begin(ring, 4);
462         if (ret)
463                 return ret;
464
465         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
466         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
467         intel_ring_emit(ring, seqno);
468         intel_ring_emit(ring, MI_USER_INTERRUPT);
469         intel_ring_advance(ring);
470
471         *result = seqno;
472         return 0;
473 }
474
475 static u32
476 ring_get_seqno(struct intel_ring_buffer *ring)
477 {
478         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
479 }
480
481 static u32
482 pc_render_get_seqno(struct intel_ring_buffer *ring)
483 {
484         struct pipe_control *pc = ring->private;
485         return pc->cpu_page[0];
486 }
487
488 static void
489 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
490 {
491         dev_priv->gt_irq_mask &= ~mask;
492         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493         POSTING_READ(GTIMR);
494 }
495
496 static void
497 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
498 {
499         dev_priv->gt_irq_mask |= mask;
500         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
501         POSTING_READ(GTIMR);
502 }
503
504 static void
505 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
506 {
507         dev_priv->irq_mask &= ~mask;
508         I915_WRITE(IMR, dev_priv->irq_mask);
509         POSTING_READ(IMR);
510 }
511
512 static void
513 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
514 {
515         dev_priv->irq_mask |= mask;
516         I915_WRITE(IMR, dev_priv->irq_mask);
517         POSTING_READ(IMR);
518 }
519
520 static bool
521 render_ring_get_irq(struct intel_ring_buffer *ring)
522 {
523         struct drm_device *dev = ring->dev;
524         drm_i915_private_t *dev_priv = dev->dev_private;
525
526         if (!dev->irq_enabled)
527                 return false;
528
529         spin_lock(&ring->irq_lock);
530         if (ring->irq_refcount++ == 0) {
531                 if (HAS_PCH_SPLIT(dev))
532                         ironlake_enable_irq(dev_priv,
533                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
534                 else
535                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
536         }
537         spin_unlock(&ring->irq_lock);
538
539         return true;
540 }
541
542 static void
543 render_ring_put_irq(struct intel_ring_buffer *ring)
544 {
545         struct drm_device *dev = ring->dev;
546         drm_i915_private_t *dev_priv = dev->dev_private;
547
548         spin_lock(&ring->irq_lock);
549         if (--ring->irq_refcount == 0) {
550                 if (HAS_PCH_SPLIT(dev))
551                         ironlake_disable_irq(dev_priv,
552                                              GT_USER_INTERRUPT |
553                                              GT_PIPE_NOTIFY);
554                 else
555                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
556         }
557         spin_unlock(&ring->irq_lock);
558 }
559
560 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
561 {
562         drm_i915_private_t *dev_priv = ring->dev->dev_private;
563         u32 mmio = IS_GEN6(ring->dev) ?
564                 RING_HWS_PGA_GEN6(ring->mmio_base) :
565                 RING_HWS_PGA(ring->mmio_base);
566         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
567         POSTING_READ(mmio);
568 }
569
570 static int
571 bsd_ring_flush(struct intel_ring_buffer *ring,
572                u32     invalidate_domains,
573                u32     flush_domains)
574 {
575         int ret;
576
577         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
578                 return 0;
579
580         ret = intel_ring_begin(ring, 2);
581         if (ret)
582                 return ret;
583
584         intel_ring_emit(ring, MI_FLUSH);
585         intel_ring_emit(ring, MI_NOOP);
586         intel_ring_advance(ring);
587         return 0;
588 }
589
590 static int
591 ring_add_request(struct intel_ring_buffer *ring,
592                  u32 *result)
593 {
594         u32 seqno;
595         int ret;
596
597         ret = intel_ring_begin(ring, 4);
598         if (ret)
599                 return ret;
600
601         seqno = i915_gem_get_seqno(ring->dev);
602
603         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
604         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
605         intel_ring_emit(ring, seqno);
606         intel_ring_emit(ring, MI_USER_INTERRUPT);
607         intel_ring_advance(ring);
608
609         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
610         *result = seqno;
611         return 0;
612 }
613
614 static bool
615 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
616 {
617         struct drm_device *dev = ring->dev;
618         drm_i915_private_t *dev_priv = dev->dev_private;
619
620         if (!dev->irq_enabled)
621                return false;
622
623         spin_lock(&ring->irq_lock);
624         if (ring->irq_refcount++ == 0)
625                 ironlake_enable_irq(dev_priv, flag);
626         spin_unlock(&ring->irq_lock);
627
628         return true;
629 }
630
631 static void
632 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
633 {
634         struct drm_device *dev = ring->dev;
635         drm_i915_private_t *dev_priv = dev->dev_private;
636
637         spin_lock(&ring->irq_lock);
638         if (--ring->irq_refcount == 0)
639                 ironlake_disable_irq(dev_priv, flag);
640         spin_unlock(&ring->irq_lock);
641 }
642
643 static bool
644 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
645 {
646         struct drm_device *dev = ring->dev;
647         drm_i915_private_t *dev_priv = dev->dev_private;
648
649         if (!dev->irq_enabled)
650                return false;
651
652         spin_lock(&ring->irq_lock);
653         if (ring->irq_refcount++ == 0) {
654                 ring->irq_mask &= ~rflag;
655                 I915_WRITE_IMR(ring, ring->irq_mask);
656                 ironlake_enable_irq(dev_priv, gflag);
657         }
658         spin_unlock(&ring->irq_lock);
659
660         return true;
661 }
662
663 static void
664 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
665 {
666         struct drm_device *dev = ring->dev;
667         drm_i915_private_t *dev_priv = dev->dev_private;
668
669         spin_lock(&ring->irq_lock);
670         if (--ring->irq_refcount == 0) {
671                 ring->irq_mask |= rflag;
672                 I915_WRITE_IMR(ring, ring->irq_mask);
673                 ironlake_disable_irq(dev_priv, gflag);
674         }
675         spin_unlock(&ring->irq_lock);
676 }
677
678 static bool
679 bsd_ring_get_irq(struct intel_ring_buffer *ring)
680 {
681         return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
682 }
683 static void
684 bsd_ring_put_irq(struct intel_ring_buffer *ring)
685 {
686         ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
687 }
688
689 static int
690 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
691 {
692         int ret;
693
694         ret = intel_ring_begin(ring, 2);
695         if (ret)
696                 return ret;
697
698         intel_ring_emit(ring,
699                         MI_BATCH_BUFFER_START | (2 << 6) |
700                         MI_BATCH_NON_SECURE_I965);
701         intel_ring_emit(ring, offset);
702         intel_ring_advance(ring);
703
704         return 0;
705 }
706
707 static int
708 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
709                                 u32 offset, u32 len)
710 {
711         struct drm_device *dev = ring->dev;
712         drm_i915_private_t *dev_priv = dev->dev_private;
713         int ret;
714
715         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
716
717         if (IS_I830(dev) || IS_845G(dev)) {
718                 ret = intel_ring_begin(ring, 4);
719                 if (ret)
720                         return ret;
721
722                 intel_ring_emit(ring, MI_BATCH_BUFFER);
723                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
724                 intel_ring_emit(ring, offset + len - 8);
725                 intel_ring_emit(ring, 0);
726         } else {
727                 ret = intel_ring_begin(ring, 2);
728                 if (ret)
729                         return ret;
730
731                 if (INTEL_INFO(dev)->gen >= 4) {
732                         intel_ring_emit(ring,
733                                         MI_BATCH_BUFFER_START | (2 << 6) |
734                                         MI_BATCH_NON_SECURE_I965);
735                         intel_ring_emit(ring, offset);
736                 } else {
737                         intel_ring_emit(ring,
738                                         MI_BATCH_BUFFER_START | (2 << 6));
739                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
740                 }
741         }
742         intel_ring_advance(ring);
743
744         return 0;
745 }
746
747 static void cleanup_status_page(struct intel_ring_buffer *ring)
748 {
749         drm_i915_private_t *dev_priv = ring->dev->dev_private;
750         struct drm_i915_gem_object *obj;
751
752         obj = ring->status_page.obj;
753         if (obj == NULL)
754                 return;
755
756         kunmap(obj->pages[0]);
757         i915_gem_object_unpin(obj);
758         drm_gem_object_unreference(&obj->base);
759         ring->status_page.obj = NULL;
760
761         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
762 }
763
764 static int init_status_page(struct intel_ring_buffer *ring)
765 {
766         struct drm_device *dev = ring->dev;
767         drm_i915_private_t *dev_priv = dev->dev_private;
768         struct drm_i915_gem_object *obj;
769         int ret;
770
771         obj = i915_gem_alloc_object(dev, 4096);
772         if (obj == NULL) {
773                 DRM_ERROR("Failed to allocate status page\n");
774                 ret = -ENOMEM;
775                 goto err;
776         }
777         obj->agp_type = AGP_USER_CACHED_MEMORY;
778
779         ret = i915_gem_object_pin(obj, 4096, true);
780         if (ret != 0) {
781                 goto err_unref;
782         }
783
784         ring->status_page.gfx_addr = obj->gtt_offset;
785         ring->status_page.page_addr = kmap(obj->pages[0]);
786         if (ring->status_page.page_addr == NULL) {
787                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
788                 goto err_unpin;
789         }
790         ring->status_page.obj = obj;
791         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
792
793         intel_ring_setup_status_page(ring);
794         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
795                         ring->name, ring->status_page.gfx_addr);
796
797         return 0;
798
799 err_unpin:
800         i915_gem_object_unpin(obj);
801 err_unref:
802         drm_gem_object_unreference(&obj->base);
803 err:
804         return ret;
805 }
806
807 int intel_init_ring_buffer(struct drm_device *dev,
808                            struct intel_ring_buffer *ring)
809 {
810         struct drm_i915_gem_object *obj;
811         int ret;
812
813         ring->dev = dev;
814         INIT_LIST_HEAD(&ring->active_list);
815         INIT_LIST_HEAD(&ring->request_list);
816         INIT_LIST_HEAD(&ring->gpu_write_list);
817
818         spin_lock_init(&ring->irq_lock);
819         ring->irq_mask = ~0;
820
821         if (I915_NEED_GFX_HWS(dev)) {
822                 ret = init_status_page(ring);
823                 if (ret)
824                         return ret;
825         }
826
827         obj = i915_gem_alloc_object(dev, ring->size);
828         if (obj == NULL) {
829                 DRM_ERROR("Failed to allocate ringbuffer\n");
830                 ret = -ENOMEM;
831                 goto err_hws;
832         }
833
834         ring->obj = obj;
835
836         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
837         if (ret)
838                 goto err_unref;
839
840         ring->map.size = ring->size;
841         ring->map.offset = dev->agp->base + obj->gtt_offset;
842         ring->map.type = 0;
843         ring->map.flags = 0;
844         ring->map.mtrr = 0;
845
846         drm_core_ioremap_wc(&ring->map, dev);
847         if (ring->map.handle == NULL) {
848                 DRM_ERROR("Failed to map ringbuffer.\n");
849                 ret = -EINVAL;
850                 goto err_unpin;
851         }
852
853         ring->virtual_start = ring->map.handle;
854         ret = ring->init(ring);
855         if (ret)
856                 goto err_unmap;
857
858         /* Workaround an erratum on the i830 which causes a hang if
859          * the TAIL pointer points to within the last 2 cachelines
860          * of the buffer.
861          */
862         ring->effective_size = ring->size;
863         if (IS_I830(ring->dev))
864                 ring->effective_size -= 128;
865
866         return 0;
867
868 err_unmap:
869         drm_core_ioremapfree(&ring->map, dev);
870 err_unpin:
871         i915_gem_object_unpin(obj);
872 err_unref:
873         drm_gem_object_unreference(&obj->base);
874         ring->obj = NULL;
875 err_hws:
876         cleanup_status_page(ring);
877         return ret;
878 }
879
880 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
881 {
882         struct drm_i915_private *dev_priv;
883         int ret;
884
885         if (ring->obj == NULL)
886                 return;
887
888         /* Disable the ring buffer. The ring must be idle at this point */
889         dev_priv = ring->dev->dev_private;
890         ret = intel_wait_ring_buffer(ring, ring->size - 8);
891         I915_WRITE_CTL(ring, 0);
892
893         drm_core_ioremapfree(&ring->map, ring->dev);
894
895         i915_gem_object_unpin(ring->obj);
896         drm_gem_object_unreference(&ring->obj->base);
897         ring->obj = NULL;
898
899         if (ring->cleanup)
900                 ring->cleanup(ring);
901
902         cleanup_status_page(ring);
903 }
904
905 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
906 {
907         unsigned int *virt;
908         int rem = ring->size - ring->tail;
909
910         if (ring->space < rem) {
911                 int ret = intel_wait_ring_buffer(ring, rem);
912                 if (ret)
913                         return ret;
914         }
915
916         virt = (unsigned int *)(ring->virtual_start + ring->tail);
917         rem /= 8;
918         while (rem--) {
919                 *virt++ = MI_NOOP;
920                 *virt++ = MI_NOOP;
921         }
922
923         ring->tail = 0;
924         ring->space = ring->head - 8;
925
926         return 0;
927 }
928
929 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
930 {
931         struct drm_device *dev = ring->dev;
932         struct drm_i915_private *dev_priv = dev->dev_private;
933         unsigned long end;
934         u32 head;
935
936         trace_i915_ring_wait_begin (dev);
937         end = jiffies + 3 * HZ;
938         do {
939                 /* If the reported head position has wrapped or hasn't advanced,
940                  * fallback to the slow and accurate path.
941                  */
942                 head = intel_read_status_page(ring, 4);
943                 if (head < ring->actual_head)
944                         head = I915_READ_HEAD(ring);
945                 ring->actual_head = head;
946                 ring->head = head & HEAD_ADDR;
947                 ring->space = ring->head - (ring->tail + 8);
948                 if (ring->space < 0)
949                         ring->space += ring->size;
950                 if (ring->space >= n) {
951                         trace_i915_ring_wait_end(dev);
952                         return 0;
953                 }
954
955                 if (dev->primary->master) {
956                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
957                         if (master_priv->sarea_priv)
958                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
959                 }
960
961                 msleep(1);
962                 if (atomic_read(&dev_priv->mm.wedged))
963                         return -EAGAIN;
964         } while (!time_after(jiffies, end));
965         trace_i915_ring_wait_end (dev);
966         return -EBUSY;
967 }
968
969 int intel_ring_begin(struct intel_ring_buffer *ring,
970                      int num_dwords)
971 {
972         int n = 4*num_dwords;
973         int ret;
974
975         if (unlikely(ring->tail + n > ring->effective_size)) {
976                 ret = intel_wrap_ring_buffer(ring);
977                 if (unlikely(ret))
978                         return ret;
979         }
980
981         if (unlikely(ring->space < n)) {
982                 ret = intel_wait_ring_buffer(ring, n);
983                 if (unlikely(ret))
984                         return ret;
985         }
986
987         ring->space -= n;
988         return 0;
989 }
990
991 void intel_ring_advance(struct intel_ring_buffer *ring)
992 {
993         ring->tail &= ring->size - 1;
994         ring->write_tail(ring, ring->tail);
995 }
996
997 static const struct intel_ring_buffer render_ring = {
998         .name                   = "render ring",
999         .id                     = RING_RENDER,
1000         .mmio_base              = RENDER_RING_BASE,
1001         .size                   = 32 * PAGE_SIZE,
1002         .init                   = init_render_ring,
1003         .write_tail             = ring_write_tail,
1004         .flush                  = render_ring_flush,
1005         .add_request            = render_ring_add_request,
1006         .get_seqno              = ring_get_seqno,
1007         .irq_get                = render_ring_get_irq,
1008         .irq_put                = render_ring_put_irq,
1009         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1010        .cleanup                 = render_ring_cleanup,
1011 };
1012
1013 /* ring buffer for bit-stream decoder */
1014
1015 static const struct intel_ring_buffer bsd_ring = {
1016         .name                   = "bsd ring",
1017         .id                     = RING_BSD,
1018         .mmio_base              = BSD_RING_BASE,
1019         .size                   = 32 * PAGE_SIZE,
1020         .init                   = init_ring_common,
1021         .write_tail             = ring_write_tail,
1022         .flush                  = bsd_ring_flush,
1023         .add_request            = ring_add_request,
1024         .get_seqno              = ring_get_seqno,
1025         .irq_get                = bsd_ring_get_irq,
1026         .irq_put                = bsd_ring_put_irq,
1027         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1028 };
1029
1030
1031 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1032                                      u32 value)
1033 {
1034        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1035
1036        /* Every tail move must follow the sequence below */
1037        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1038                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1039                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1040        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1041
1042        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1043                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1044                        50))
1045                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1046
1047        I915_WRITE_TAIL(ring, value);
1048        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1049                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1050                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1051 }
1052
1053 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1054                            u32 invalidate_domains,
1055                            u32 flush_domains)
1056 {
1057         int ret;
1058
1059         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1060                 return 0;
1061
1062         ret = intel_ring_begin(ring, 4);
1063         if (ret)
1064                 return ret;
1065
1066         intel_ring_emit(ring, MI_FLUSH_DW);
1067         intel_ring_emit(ring, 0);
1068         intel_ring_emit(ring, 0);
1069         intel_ring_emit(ring, 0);
1070         intel_ring_advance(ring);
1071         return 0;
1072 }
1073
1074 static int
1075 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1076                               u32 offset, u32 len)
1077 {
1078        int ret;
1079
1080        ret = intel_ring_begin(ring, 2);
1081        if (ret)
1082                return ret;
1083
1084        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1085        /* bit0-7 is the length on GEN6+ */
1086        intel_ring_emit(ring, offset);
1087        intel_ring_advance(ring);
1088
1089        return 0;
1090 }
1091
1092 static bool
1093 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1094 {
1095         return gen6_ring_get_irq(ring,
1096                                  GT_USER_INTERRUPT,
1097                                  GEN6_RENDER_USER_INTERRUPT);
1098 }
1099
1100 static void
1101 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1102 {
1103         return gen6_ring_put_irq(ring,
1104                                  GT_USER_INTERRUPT,
1105                                  GEN6_RENDER_USER_INTERRUPT);
1106 }
1107
1108 static bool
1109 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1110 {
1111         return gen6_ring_get_irq(ring,
1112                                  GT_GEN6_BSD_USER_INTERRUPT,
1113                                  GEN6_BSD_USER_INTERRUPT);
1114 }
1115
1116 static void
1117 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1118 {
1119         return gen6_ring_put_irq(ring,
1120                                  GT_GEN6_BSD_USER_INTERRUPT,
1121                                  GEN6_BSD_USER_INTERRUPT);
1122 }
1123
1124 /* ring buffer for Video Codec for Gen6+ */
1125 static const struct intel_ring_buffer gen6_bsd_ring = {
1126         .name                   = "gen6 bsd ring",
1127         .id                     = RING_BSD,
1128         .mmio_base              = GEN6_BSD_RING_BASE,
1129         .size                   = 32 * PAGE_SIZE,
1130         .init                   = init_ring_common,
1131         .write_tail             = gen6_bsd_ring_write_tail,
1132         .flush                  = gen6_ring_flush,
1133         .add_request            = gen6_add_request,
1134         .get_seqno              = ring_get_seqno,
1135         .irq_get                = gen6_bsd_ring_get_irq,
1136         .irq_put                = gen6_bsd_ring_put_irq,
1137         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1138 };
1139
1140 /* Blitter support (SandyBridge+) */
1141
1142 static bool
1143 blt_ring_get_irq(struct intel_ring_buffer *ring)
1144 {
1145         return gen6_ring_get_irq(ring,
1146                                  GT_BLT_USER_INTERRUPT,
1147                                  GEN6_BLITTER_USER_INTERRUPT);
1148 }
1149
1150 static void
1151 blt_ring_put_irq(struct intel_ring_buffer *ring)
1152 {
1153         gen6_ring_put_irq(ring,
1154                           GT_BLT_USER_INTERRUPT,
1155                           GEN6_BLITTER_USER_INTERRUPT);
1156 }
1157
1158
1159 /* Workaround for some stepping of SNB,
1160  * each time when BLT engine ring tail moved,
1161  * the first command in the ring to be parsed
1162  * should be MI_BATCH_BUFFER_START
1163  */
1164 #define NEED_BLT_WORKAROUND(dev) \
1165         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1166
1167 static inline struct drm_i915_gem_object *
1168 to_blt_workaround(struct intel_ring_buffer *ring)
1169 {
1170         return ring->private;
1171 }
1172
1173 static int blt_ring_init(struct intel_ring_buffer *ring)
1174 {
1175         if (NEED_BLT_WORKAROUND(ring->dev)) {
1176                 struct drm_i915_gem_object *obj;
1177                 u32 *ptr;
1178                 int ret;
1179
1180                 obj = i915_gem_alloc_object(ring->dev, 4096);
1181                 if (obj == NULL)
1182                         return -ENOMEM;
1183
1184                 ret = i915_gem_object_pin(obj, 4096, true);
1185                 if (ret) {
1186                         drm_gem_object_unreference(&obj->base);
1187                         return ret;
1188                 }
1189
1190                 ptr = kmap(obj->pages[0]);
1191                 *ptr++ = MI_BATCH_BUFFER_END;
1192                 *ptr++ = MI_NOOP;
1193                 kunmap(obj->pages[0]);
1194
1195                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1196                 if (ret) {
1197                         i915_gem_object_unpin(obj);
1198                         drm_gem_object_unreference(&obj->base);
1199                         return ret;
1200                 }
1201
1202                 ring->private = obj;
1203         }
1204
1205         return init_ring_common(ring);
1206 }
1207
1208 static int blt_ring_begin(struct intel_ring_buffer *ring,
1209                           int num_dwords)
1210 {
1211         if (ring->private) {
1212                 int ret = intel_ring_begin(ring, num_dwords+2);
1213                 if (ret)
1214                         return ret;
1215
1216                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1217                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1218
1219                 return 0;
1220         } else
1221                 return intel_ring_begin(ring, 4);
1222 }
1223
1224 static int blt_ring_flush(struct intel_ring_buffer *ring,
1225                            u32 invalidate_domains,
1226                            u32 flush_domains)
1227 {
1228         int ret;
1229
1230         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1231                 return 0;
1232
1233         ret = blt_ring_begin(ring, 4);
1234         if (ret)
1235                 return ret;
1236
1237         intel_ring_emit(ring, MI_FLUSH_DW);
1238         intel_ring_emit(ring, 0);
1239         intel_ring_emit(ring, 0);
1240         intel_ring_emit(ring, 0);
1241         intel_ring_advance(ring);
1242         return 0;
1243 }
1244
1245 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1246 {
1247         if (!ring->private)
1248                 return;
1249
1250         i915_gem_object_unpin(ring->private);
1251         drm_gem_object_unreference(ring->private);
1252         ring->private = NULL;
1253 }
1254
1255 static const struct intel_ring_buffer gen6_blt_ring = {
1256        .name                    = "blt ring",
1257        .id                      = RING_BLT,
1258        .mmio_base               = BLT_RING_BASE,
1259        .size                    = 32 * PAGE_SIZE,
1260        .init                    = blt_ring_init,
1261        .write_tail              = ring_write_tail,
1262        .flush                   = blt_ring_flush,
1263        .add_request             = gen6_add_request,
1264        .get_seqno               = ring_get_seqno,
1265        .irq_get                 = blt_ring_get_irq,
1266        .irq_put                 = blt_ring_put_irq,
1267        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1268        .cleanup                 = blt_ring_cleanup,
1269 };
1270
1271 int intel_init_render_ring_buffer(struct drm_device *dev)
1272 {
1273         drm_i915_private_t *dev_priv = dev->dev_private;
1274         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1275
1276         *ring = render_ring;
1277         if (INTEL_INFO(dev)->gen >= 6) {
1278                 ring->add_request = gen6_add_request;
1279                 ring->irq_get = gen6_render_ring_get_irq;
1280                 ring->irq_put = gen6_render_ring_put_irq;
1281         } else if (IS_GEN5(dev)) {
1282                 ring->add_request = pc_render_add_request;
1283                 ring->get_seqno = pc_render_get_seqno;
1284         }
1285
1286         if (!I915_NEED_GFX_HWS(dev)) {
1287                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1288                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1289         }
1290
1291         return intel_init_ring_buffer(dev, ring);
1292 }
1293
1294 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1295 {
1296         drm_i915_private_t *dev_priv = dev->dev_private;
1297         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1298
1299         *ring = render_ring;
1300         if (INTEL_INFO(dev)->gen >= 6) {
1301                 ring->add_request = gen6_add_request;
1302                 ring->irq_get = gen6_render_ring_get_irq;
1303                 ring->irq_put = gen6_render_ring_put_irq;
1304         } else if (IS_GEN5(dev)) {
1305                 ring->add_request = pc_render_add_request;
1306                 ring->get_seqno = pc_render_get_seqno;
1307         }
1308
1309         ring->dev = dev;
1310         INIT_LIST_HEAD(&ring->active_list);
1311         INIT_LIST_HEAD(&ring->request_list);
1312         INIT_LIST_HEAD(&ring->gpu_write_list);
1313
1314         ring->size = size;
1315         ring->effective_size = ring->size;
1316         if (IS_I830(ring->dev))
1317                 ring->effective_size -= 128;
1318
1319         ring->map.offset = start;
1320         ring->map.size = size;
1321         ring->map.type = 0;
1322         ring->map.flags = 0;
1323         ring->map.mtrr = 0;
1324
1325         drm_core_ioremap_wc(&ring->map, dev);
1326         if (ring->map.handle == NULL) {
1327                 DRM_ERROR("can not ioremap virtual address for"
1328                           " ring buffer\n");
1329                 return -ENOMEM;
1330         }
1331
1332         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1333         return 0;
1334 }
1335
1336 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1337 {
1338         drm_i915_private_t *dev_priv = dev->dev_private;
1339         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1340
1341         if (IS_GEN6(dev))
1342                 *ring = gen6_bsd_ring;
1343         else
1344                 *ring = bsd_ring;
1345
1346         return intel_init_ring_buffer(dev, ring);
1347 }
1348
1349 int intel_init_blt_ring_buffer(struct drm_device *dev)
1350 {
1351         drm_i915_private_t *dev_priv = dev->dev_private;
1352         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1353
1354         *ring = gen6_blt_ring;
1355
1356         return intel_init_ring_buffer(dev, ring);
1357 }