drm/i915: add set_tail hook in struct intel_ring_buffer
[linux-flexiantxendom0.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35
36 static u32 i915_gem_get_seqno(struct drm_device *dev)
37 {
38         drm_i915_private_t *dev_priv = dev->dev_private;
39         u32 seqno;
40
41         seqno = dev_priv->next_seqno;
42
43         /* reserve 0 for non-seqno */
44         if (++dev_priv->next_seqno == 0)
45                 dev_priv->next_seqno = 1;
46
47         return seqno;
48 }
49
50 static void
51 render_ring_flush(struct drm_device *dev,
52                 struct intel_ring_buffer *ring,
53                 u32     invalidate_domains,
54                 u32     flush_domains)
55 {
56         drm_i915_private_t *dev_priv = dev->dev_private;
57         u32 cmd;
58
59 #if WATCH_EXEC
60         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
61                   invalidate_domains, flush_domains);
62 #endif
63
64         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
65                                      invalidate_domains, flush_domains);
66
67         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
68                 /*
69                  * read/write caches:
70                  *
71                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
73                  * also flushed at 2d versus 3d pipeline switches.
74                  *
75                  * read-only caches:
76                  *
77                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78                  * MI_READ_FLUSH is set, and is always flushed on 965.
79                  *
80                  * I915_GEM_DOMAIN_COMMAND may not exist?
81                  *
82                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83                  * invalidated when MI_EXE_FLUSH is set.
84                  *
85                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86                  * invalidated with every MI_FLUSH.
87                  *
88                  * TLBs:
89                  *
90                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93                  * are flushed at any MI_FLUSH.
94                  */
95
96                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97                 if ((invalidate_domains|flush_domains) &
98                     I915_GEM_DOMAIN_RENDER)
99                         cmd &= ~MI_NO_WRITE_FLUSH;
100                 if (INTEL_INFO(dev)->gen < 4) {
101                         /*
102                          * On the 965, the sampler cache always gets flushed
103                          * and this bit is reserved.
104                          */
105                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106                                 cmd |= MI_READ_FLUSH;
107                 }
108                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109                         cmd |= MI_EXE_FLUSH;
110
111 #if WATCH_EXEC
112                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
113 #endif
114                 intel_ring_begin(dev, ring, 2);
115                 intel_ring_emit(dev, ring, cmd);
116                 intel_ring_emit(dev, ring, MI_NOOP);
117                 intel_ring_advance(dev, ring);
118         }
119 }
120
121 static unsigned int render_ring_get_head(struct drm_device *dev,
122                 struct intel_ring_buffer *ring)
123 {
124         drm_i915_private_t *dev_priv = dev->dev_private;
125         return I915_READ(PRB0_HEAD) & HEAD_ADDR;
126 }
127
128 static unsigned int render_ring_get_tail(struct drm_device *dev,
129                 struct intel_ring_buffer *ring)
130 {
131         drm_i915_private_t *dev_priv = dev->dev_private;
132         return I915_READ(PRB0_TAIL) & TAIL_ADDR;
133 }
134
135 static inline void render_ring_set_tail(struct drm_device *dev, u32 value)
136 {
137         drm_i915_private_t *dev_priv = dev->dev_private;
138         I915_WRITE(PRB0_TAIL, value);
139 }
140
141 static unsigned int render_ring_get_active_head(struct drm_device *dev,
142                 struct intel_ring_buffer *ring)
143 {
144         drm_i915_private_t *dev_priv = dev->dev_private;
145         u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD;
146
147         return I915_READ(acthd_reg);
148 }
149
150 static void render_ring_advance_ring(struct drm_device *dev,
151                 struct intel_ring_buffer *ring)
152 {
153         render_ring_set_tail(dev, ring->tail);
154 }
155
156 static int init_ring_common(struct drm_device *dev,
157                 struct intel_ring_buffer *ring)
158 {
159         u32 head;
160         drm_i915_private_t *dev_priv = dev->dev_private;
161         struct drm_i915_gem_object *obj_priv;
162         obj_priv = to_intel_bo(ring->gem_object);
163
164         /* Stop the ring if it's running. */
165         I915_WRITE(ring->regs.ctl, 0);
166         I915_WRITE(ring->regs.head, 0);
167         ring->set_tail(dev, 0);
168
169         /* Initialize the ring. */
170         I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
171         head = ring->get_head(dev, ring);
172
173         /* G45 ring initialization fails to reset head to zero */
174         if (head != 0) {
175                 DRM_ERROR("%s head not reset to zero "
176                                 "ctl %08x head %08x tail %08x start %08x\n",
177                                 ring->name,
178                                 I915_READ(ring->regs.ctl),
179                                 I915_READ(ring->regs.head),
180                                 I915_READ(ring->regs.tail),
181                                 I915_READ(ring->regs.start));
182
183                 I915_WRITE(ring->regs.head, 0);
184
185                 DRM_ERROR("%s head forced to zero "
186                                 "ctl %08x head %08x tail %08x start %08x\n",
187                                 ring->name,
188                                 I915_READ(ring->regs.ctl),
189                                 I915_READ(ring->regs.head),
190                                 I915_READ(ring->regs.tail),
191                                 I915_READ(ring->regs.start));
192         }
193
194         I915_WRITE(ring->regs.ctl,
195                         ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
196                         | RING_NO_REPORT | RING_VALID);
197
198         head = I915_READ(ring->regs.head) & HEAD_ADDR;
199         /* If the head is still not zero, the ring is dead */
200         if (head != 0) {
201                 DRM_ERROR("%s initialization failed "
202                                 "ctl %08x head %08x tail %08x start %08x\n",
203                                 ring->name,
204                                 I915_READ(ring->regs.ctl),
205                                 I915_READ(ring->regs.head),
206                                 I915_READ(ring->regs.tail),
207                                 I915_READ(ring->regs.start));
208                 return -EIO;
209         }
210
211         if (!drm_core_check_feature(dev, DRIVER_MODESET))
212                 i915_kernel_lost_context(dev);
213         else {
214                 ring->head = ring->get_head(dev, ring);
215                 ring->tail = ring->get_tail(dev, ring);
216                 ring->space = ring->head - (ring->tail + 8);
217                 if (ring->space < 0)
218                         ring->space += ring->size;
219         }
220         return 0;
221 }
222
223 static int init_render_ring(struct drm_device *dev,
224                 struct intel_ring_buffer *ring)
225 {
226         drm_i915_private_t *dev_priv = dev->dev_private;
227         int ret = init_ring_common(dev, ring);
228         int mode;
229
230         if (INTEL_INFO(dev)->gen > 3) {
231                 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
232                 if (IS_GEN6(dev))
233                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
234                 I915_WRITE(MI_MODE, mode);
235         }
236         return ret;
237 }
238
239 #define PIPE_CONTROL_FLUSH(addr)                                        \
240 do {                                                                    \
241         OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
242                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
243         OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
244         OUT_RING(0);                                                    \
245         OUT_RING(0);                                                    \
246 } while (0)
247
248 /**
249  * Creates a new sequence number, emitting a write of it to the status page
250  * plus an interrupt, which will trigger i915_user_interrupt_handler.
251  *
252  * Must be called with struct_lock held.
253  *
254  * Returned sequence numbers are nonzero on success.
255  */
256 static u32
257 render_ring_add_request(struct drm_device *dev,
258                 struct intel_ring_buffer *ring,
259                 struct drm_file *file_priv,
260                 u32 flush_domains)
261 {
262         drm_i915_private_t *dev_priv = dev->dev_private;
263         u32 seqno;
264
265         seqno = i915_gem_get_seqno(dev);
266
267         if (IS_GEN6(dev)) {
268                 BEGIN_LP_RING(6);
269                 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
270                 OUT_RING(PIPE_CONTROL_QW_WRITE |
271                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
272                          PIPE_CONTROL_NOTIFY);
273                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
274                 OUT_RING(seqno);
275                 OUT_RING(0);
276                 OUT_RING(0);
277                 ADVANCE_LP_RING();
278         } else if (HAS_PIPE_CONTROL(dev)) {
279                 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
280
281                 /*
282                  * Workaround qword write incoherence by flushing the
283                  * PIPE_NOTIFY buffers out to memory before requesting
284                  * an interrupt.
285                  */
286                 BEGIN_LP_RING(32);
287                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
288                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
289                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
290                 OUT_RING(seqno);
291                 OUT_RING(0);
292                 PIPE_CONTROL_FLUSH(scratch_addr);
293                 scratch_addr += 128; /* write to separate cachelines */
294                 PIPE_CONTROL_FLUSH(scratch_addr);
295                 scratch_addr += 128;
296                 PIPE_CONTROL_FLUSH(scratch_addr);
297                 scratch_addr += 128;
298                 PIPE_CONTROL_FLUSH(scratch_addr);
299                 scratch_addr += 128;
300                 PIPE_CONTROL_FLUSH(scratch_addr);
301                 scratch_addr += 128;
302                 PIPE_CONTROL_FLUSH(scratch_addr);
303                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
304                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
305                          PIPE_CONTROL_NOTIFY);
306                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
307                 OUT_RING(seqno);
308                 OUT_RING(0);
309                 ADVANCE_LP_RING();
310         } else {
311                 BEGIN_LP_RING(4);
312                 OUT_RING(MI_STORE_DWORD_INDEX);
313                 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
314                 OUT_RING(seqno);
315
316                 OUT_RING(MI_USER_INTERRUPT);
317                 ADVANCE_LP_RING();
318         }
319         return seqno;
320 }
321
322 static u32
323 render_ring_get_gem_seqno(struct drm_device *dev,
324                 struct intel_ring_buffer *ring)
325 {
326         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
327         if (HAS_PIPE_CONTROL(dev))
328                 return ((volatile u32 *)(dev_priv->seqno_page))[0];
329         else
330                 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
331 }
332
333 static void
334 render_ring_get_user_irq(struct drm_device *dev,
335                 struct intel_ring_buffer *ring)
336 {
337         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
338         unsigned long irqflags;
339
340         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
341         if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
342                 if (HAS_PCH_SPLIT(dev))
343                         ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
344                 else
345                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
346         }
347         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
348 }
349
350 static void
351 render_ring_put_user_irq(struct drm_device *dev,
352                 struct intel_ring_buffer *ring)
353 {
354         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
355         unsigned long irqflags;
356
357         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
358         BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
359         if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
360                 if (HAS_PCH_SPLIT(dev))
361                         ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
362                 else
363                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
364         }
365         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
366 }
367
368 static void render_setup_status_page(struct drm_device *dev,
369         struct  intel_ring_buffer *ring)
370 {
371         drm_i915_private_t *dev_priv = dev->dev_private;
372         if (IS_GEN6(dev)) {
373                 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
374                 I915_READ(HWS_PGA_GEN6); /* posting read */
375         } else {
376                 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
377                 I915_READ(HWS_PGA); /* posting read */
378         }
379
380 }
381
382 void
383 bsd_ring_flush(struct drm_device *dev,
384                 struct intel_ring_buffer *ring,
385                 u32     invalidate_domains,
386                 u32     flush_domains)
387 {
388         intel_ring_begin(dev, ring, 2);
389         intel_ring_emit(dev, ring, MI_FLUSH);
390         intel_ring_emit(dev, ring, MI_NOOP);
391         intel_ring_advance(dev, ring);
392 }
393
394 static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
395                 struct intel_ring_buffer *ring)
396 {
397         drm_i915_private_t *dev_priv = dev->dev_private;
398         return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
399 }
400
401 static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
402                 struct intel_ring_buffer *ring)
403 {
404         drm_i915_private_t *dev_priv = dev->dev_private;
405         return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
406 }
407
408 static inline void bsd_ring_set_tail(struct drm_device *dev, u32 value)
409 {
410         drm_i915_private_t *dev_priv = dev->dev_private;
411         I915_WRITE(BSD_RING_TAIL, value);
412 }
413
414 static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
415                 struct intel_ring_buffer *ring)
416 {
417         drm_i915_private_t *dev_priv = dev->dev_private;
418         return I915_READ(BSD_RING_ACTHD);
419 }
420
421 static inline void bsd_ring_advance_ring(struct drm_device *dev,
422                 struct intel_ring_buffer *ring)
423 {
424         bsd_ring_set_tail(dev, ring->tail);
425 }
426
427 static int init_bsd_ring(struct drm_device *dev,
428                 struct intel_ring_buffer *ring)
429 {
430         return init_ring_common(dev, ring);
431 }
432
433 static u32
434 bsd_ring_add_request(struct drm_device *dev,
435                 struct intel_ring_buffer *ring,
436                 struct drm_file *file_priv,
437                 u32 flush_domains)
438 {
439         u32 seqno;
440
441         seqno = i915_gem_get_seqno(dev);
442
443         intel_ring_begin(dev, ring, 4);
444         intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
445         intel_ring_emit(dev, ring,
446                         I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
447         intel_ring_emit(dev, ring, seqno);
448         intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
449         intel_ring_advance(dev, ring);
450
451         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
452
453         return seqno;
454 }
455
456 static void bsd_setup_status_page(struct drm_device *dev,
457                 struct  intel_ring_buffer *ring)
458 {
459         drm_i915_private_t *dev_priv = dev->dev_private;
460         I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
461         I915_READ(BSD_HWS_PGA);
462 }
463
464 static void
465 bsd_ring_get_user_irq(struct drm_device *dev,
466                 struct intel_ring_buffer *ring)
467 {
468         /* do nothing */
469 }
470 static void
471 bsd_ring_put_user_irq(struct drm_device *dev,
472                 struct intel_ring_buffer *ring)
473 {
474         /* do nothing */
475 }
476
477 static u32
478 bsd_ring_get_gem_seqno(struct drm_device *dev,
479                 struct intel_ring_buffer *ring)
480 {
481         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
482 }
483
484 static int
485 bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
486                 struct intel_ring_buffer *ring,
487                 struct drm_i915_gem_execbuffer2 *exec,
488                 struct drm_clip_rect *cliprects,
489                 uint64_t exec_offset)
490 {
491         uint32_t exec_start;
492         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
493         intel_ring_begin(dev, ring, 2);
494         intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
495                         (2 << 6) | MI_BATCH_NON_SECURE_I965);
496         intel_ring_emit(dev, ring, exec_start);
497         intel_ring_advance(dev, ring);
498         return 0;
499 }
500
501
502 static int
503 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
504                 struct intel_ring_buffer *ring,
505                 struct drm_i915_gem_execbuffer2 *exec,
506                 struct drm_clip_rect *cliprects,
507                 uint64_t exec_offset)
508 {
509         drm_i915_private_t *dev_priv = dev->dev_private;
510         int nbox = exec->num_cliprects;
511         int i = 0, count;
512         uint32_t exec_start, exec_len;
513         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
514         exec_len = (uint32_t) exec->batch_len;
515
516         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
517
518         count = nbox ? nbox : 1;
519
520         for (i = 0; i < count; i++) {
521                 if (i < nbox) {
522                         int ret = i915_emit_box(dev, cliprects, i,
523                                                 exec->DR1, exec->DR4);
524                         if (ret)
525                                 return ret;
526                 }
527
528                 if (IS_I830(dev) || IS_845G(dev)) {
529                         intel_ring_begin(dev, ring, 4);
530                         intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
531                         intel_ring_emit(dev, ring,
532                                         exec_start | MI_BATCH_NON_SECURE);
533                         intel_ring_emit(dev, ring, exec_start + exec_len - 4);
534                         intel_ring_emit(dev, ring, 0);
535                 } else {
536                         intel_ring_begin(dev, ring, 4);
537                         if (INTEL_INFO(dev)->gen >= 4) {
538                                 intel_ring_emit(dev, ring,
539                                                 MI_BATCH_BUFFER_START | (2 << 6)
540                                                 | MI_BATCH_NON_SECURE_I965);
541                                 intel_ring_emit(dev, ring, exec_start);
542                         } else {
543                                 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
544                                                 | (2 << 6));
545                                 intel_ring_emit(dev, ring, exec_start |
546                                                 MI_BATCH_NON_SECURE);
547                         }
548                 }
549                 intel_ring_advance(dev, ring);
550         }
551
552         if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
553                 intel_ring_begin(dev, ring, 2);
554                 intel_ring_emit(dev, ring, MI_FLUSH |
555                                 MI_NO_WRITE_FLUSH |
556                                 MI_INVALIDATE_ISP );
557                 intel_ring_emit(dev, ring, MI_NOOP);
558                 intel_ring_advance(dev, ring);
559         }
560         /* XXX breadcrumb */
561
562         return 0;
563 }
564
565 static void cleanup_status_page(struct drm_device *dev,
566                 struct intel_ring_buffer *ring)
567 {
568         drm_i915_private_t *dev_priv = dev->dev_private;
569         struct drm_gem_object *obj;
570         struct drm_i915_gem_object *obj_priv;
571
572         obj = ring->status_page.obj;
573         if (obj == NULL)
574                 return;
575         obj_priv = to_intel_bo(obj);
576
577         kunmap(obj_priv->pages[0]);
578         i915_gem_object_unpin(obj);
579         drm_gem_object_unreference(obj);
580         ring->status_page.obj = NULL;
581
582         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
583 }
584
585 static int init_status_page(struct drm_device *dev,
586                 struct intel_ring_buffer *ring)
587 {
588         drm_i915_private_t *dev_priv = dev->dev_private;
589         struct drm_gem_object *obj;
590         struct drm_i915_gem_object *obj_priv;
591         int ret;
592
593         obj = i915_gem_alloc_object(dev, 4096);
594         if (obj == NULL) {
595                 DRM_ERROR("Failed to allocate status page\n");
596                 ret = -ENOMEM;
597                 goto err;
598         }
599         obj_priv = to_intel_bo(obj);
600         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
601
602         ret = i915_gem_object_pin(obj, 4096);
603         if (ret != 0) {
604                 goto err_unref;
605         }
606
607         ring->status_page.gfx_addr = obj_priv->gtt_offset;
608         ring->status_page.page_addr = kmap(obj_priv->pages[0]);
609         if (ring->status_page.page_addr == NULL) {
610                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
611                 goto err_unpin;
612         }
613         ring->status_page.obj = obj;
614         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
615
616         ring->setup_status_page(dev, ring);
617         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
618                         ring->name, ring->status_page.gfx_addr);
619
620         return 0;
621
622 err_unpin:
623         i915_gem_object_unpin(obj);
624 err_unref:
625         drm_gem_object_unreference(obj);
626 err:
627         return ret;
628 }
629
630
631 int intel_init_ring_buffer(struct drm_device *dev,
632                 struct intel_ring_buffer *ring)
633 {
634         struct drm_i915_gem_object *obj_priv;
635         struct drm_gem_object *obj;
636         int ret;
637
638         ring->dev = dev;
639
640         if (I915_NEED_GFX_HWS(dev)) {
641                 ret = init_status_page(dev, ring);
642                 if (ret)
643                         return ret;
644         }
645
646         obj = i915_gem_alloc_object(dev, ring->size);
647         if (obj == NULL) {
648                 DRM_ERROR("Failed to allocate ringbuffer\n");
649                 ret = -ENOMEM;
650                 goto err_hws;
651         }
652
653         ring->gem_object = obj;
654
655         ret = i915_gem_object_pin(obj, ring->alignment);
656         if (ret)
657                 goto err_unref;
658
659         obj_priv = to_intel_bo(obj);
660         ring->map.size = ring->size;
661         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
662         ring->map.type = 0;
663         ring->map.flags = 0;
664         ring->map.mtrr = 0;
665
666         drm_core_ioremap_wc(&ring->map, dev);
667         if (ring->map.handle == NULL) {
668                 DRM_ERROR("Failed to map ringbuffer.\n");
669                 ret = -EINVAL;
670                 goto err_unpin;
671         }
672
673         ring->virtual_start = ring->map.handle;
674         ret = ring->init(dev, ring);
675         if (ret)
676                 goto err_unmap;
677
678         if (!drm_core_check_feature(dev, DRIVER_MODESET))
679                 i915_kernel_lost_context(dev);
680         else {
681                 ring->head = ring->get_head(dev, ring);
682                 ring->tail = ring->get_tail(dev, ring);
683                 ring->space = ring->head - (ring->tail + 8);
684                 if (ring->space < 0)
685                         ring->space += ring->size;
686         }
687         INIT_LIST_HEAD(&ring->active_list);
688         INIT_LIST_HEAD(&ring->request_list);
689         return ret;
690
691 err_unmap:
692         drm_core_ioremapfree(&ring->map, dev);
693 err_unpin:
694         i915_gem_object_unpin(obj);
695 err_unref:
696         drm_gem_object_unreference(obj);
697         ring->gem_object = NULL;
698 err_hws:
699         cleanup_status_page(dev, ring);
700         return ret;
701 }
702
703 void intel_cleanup_ring_buffer(struct drm_device *dev,
704                 struct intel_ring_buffer *ring)
705 {
706         if (ring->gem_object == NULL)
707                 return;
708
709         drm_core_ioremapfree(&ring->map, dev);
710
711         i915_gem_object_unpin(ring->gem_object);
712         drm_gem_object_unreference(ring->gem_object);
713         ring->gem_object = NULL;
714         cleanup_status_page(dev, ring);
715 }
716
717 int intel_wrap_ring_buffer(struct drm_device *dev,
718                 struct intel_ring_buffer *ring)
719 {
720         unsigned int *virt;
721         int rem;
722         rem = ring->size - ring->tail;
723
724         if (ring->space < rem) {
725                 int ret = intel_wait_ring_buffer(dev, ring, rem);
726                 if (ret)
727                         return ret;
728         }
729
730         virt = (unsigned int *)(ring->virtual_start + ring->tail);
731         rem /= 8;
732         while (rem--) {
733                 *virt++ = MI_NOOP;
734                 *virt++ = MI_NOOP;
735         }
736
737         ring->tail = 0;
738         ring->space = ring->head - 8;
739
740         return 0;
741 }
742
743 int intel_wait_ring_buffer(struct drm_device *dev,
744                 struct intel_ring_buffer *ring, int n)
745 {
746         unsigned long end;
747
748         trace_i915_ring_wait_begin (dev);
749         end = jiffies + 3 * HZ;
750         do {
751                 ring->head = ring->get_head(dev, ring);
752                 ring->space = ring->head - (ring->tail + 8);
753                 if (ring->space < 0)
754                         ring->space += ring->size;
755                 if (ring->space >= n) {
756                         trace_i915_ring_wait_end (dev);
757                         return 0;
758                 }
759
760                 if (dev->primary->master) {
761                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
762                         if (master_priv->sarea_priv)
763                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
764                 }
765
766                 yield();
767         } while (!time_after(jiffies, end));
768         trace_i915_ring_wait_end (dev);
769         return -EBUSY;
770 }
771
772 void intel_ring_begin(struct drm_device *dev,
773                 struct intel_ring_buffer *ring, int num_dwords)
774 {
775         int n = 4*num_dwords;
776         if (unlikely(ring->tail + n > ring->size))
777                 intel_wrap_ring_buffer(dev, ring);
778         if (unlikely(ring->space < n))
779                 intel_wait_ring_buffer(dev, ring, n);
780
781         ring->space -= n;
782 }
783
784 void intel_ring_advance(struct drm_device *dev,
785                 struct intel_ring_buffer *ring)
786 {
787         ring->tail &= ring->size - 1;
788         ring->advance_ring(dev, ring);
789 }
790
791 void intel_fill_struct(struct drm_device *dev,
792                 struct intel_ring_buffer *ring,
793                 void *data,
794                 unsigned int len)
795 {
796         unsigned int *virt = ring->virtual_start + ring->tail;
797         BUG_ON((len&~(4-1)) != 0);
798         intel_ring_begin(dev, ring, len/4);
799         memcpy(virt, data, len);
800         ring->tail += len;
801         ring->tail &= ring->size - 1;
802         ring->space -= len;
803         intel_ring_advance(dev, ring);
804 }
805
806 static struct intel_ring_buffer render_ring = {
807         .name                   = "render ring",
808         .id                     = RING_RENDER,
809         .regs                   = {
810                 .ctl = PRB0_CTL,
811                 .head = PRB0_HEAD,
812                 .tail = PRB0_TAIL,
813                 .start = PRB0_START
814         },
815         .size                   = 32 * PAGE_SIZE,
816         .alignment              = PAGE_SIZE,
817         .virtual_start          = NULL,
818         .dev                    = NULL,
819         .gem_object             = NULL,
820         .head                   = 0,
821         .tail                   = 0,
822         .space                  = 0,
823         .user_irq_refcount      = 0,
824         .irq_gem_seqno          = 0,
825         .waiting_gem_seqno      = 0,
826         .setup_status_page      = render_setup_status_page,
827         .init                   = init_render_ring,
828         .get_head               = render_ring_get_head,
829         .get_tail               = render_ring_get_tail,
830         .set_tail               = render_ring_set_tail,
831         .get_active_head        = render_ring_get_active_head,
832         .advance_ring           = render_ring_advance_ring,
833         .flush                  = render_ring_flush,
834         .add_request            = render_ring_add_request,
835         .get_gem_seqno          = render_ring_get_gem_seqno,
836         .user_irq_get           = render_ring_get_user_irq,
837         .user_irq_put           = render_ring_put_user_irq,
838         .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
839         .status_page            = {NULL, 0, NULL},
840         .map                    = {0,}
841 };
842
843 /* ring buffer for bit-stream decoder */
844
845 static struct intel_ring_buffer bsd_ring = {
846         .name                   = "bsd ring",
847         .id                     = RING_BSD,
848         .regs                   = {
849                 .ctl = BSD_RING_CTL,
850                 .head = BSD_RING_HEAD,
851                 .tail = BSD_RING_TAIL,
852                 .start = BSD_RING_START
853         },
854         .size                   = 32 * PAGE_SIZE,
855         .alignment              = PAGE_SIZE,
856         .virtual_start          = NULL,
857         .dev                    = NULL,
858         .gem_object             = NULL,
859         .head                   = 0,
860         .tail                   = 0,
861         .space                  = 0,
862         .user_irq_refcount      = 0,
863         .irq_gem_seqno          = 0,
864         .waiting_gem_seqno      = 0,
865         .setup_status_page      = bsd_setup_status_page,
866         .init                   = init_bsd_ring,
867         .get_head               = bsd_ring_get_head,
868         .get_tail               = bsd_ring_get_tail,
869         .set_tail               = bsd_ring_set_tail,
870         .get_active_head        = bsd_ring_get_active_head,
871         .advance_ring           = bsd_ring_advance_ring,
872         .flush                  = bsd_ring_flush,
873         .add_request            = bsd_ring_add_request,
874         .get_gem_seqno          = bsd_ring_get_gem_seqno,
875         .user_irq_get           = bsd_ring_get_user_irq,
876         .user_irq_put           = bsd_ring_put_user_irq,
877         .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
878         .status_page            = {NULL, 0, NULL},
879         .map                    = {0,}
880 };
881
882 int intel_init_render_ring_buffer(struct drm_device *dev)
883 {
884         drm_i915_private_t *dev_priv = dev->dev_private;
885
886         dev_priv->render_ring = render_ring;
887
888         if (!I915_NEED_GFX_HWS(dev)) {
889                 dev_priv->render_ring.status_page.page_addr
890                         = dev_priv->status_page_dmah->vaddr;
891                 memset(dev_priv->render_ring.status_page.page_addr,
892                                 0, PAGE_SIZE);
893         }
894
895         return intel_init_ring_buffer(dev, &dev_priv->render_ring);
896 }
897
898 int intel_init_bsd_ring_buffer(struct drm_device *dev)
899 {
900         drm_i915_private_t *dev_priv = dev->dev_private;
901
902         dev_priv->bsd_ring = bsd_ring;
903
904         return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
905 }