1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
40 #include <linux/pagemap.h>
42 #define I810_BUF_FREE 2
43 #define I810_BUF_CLIENT 1
44 #define I810_BUF_HARDWARE 0
46 #define I810_BUF_UNMAPPED 0
47 #define I810_BUF_MAPPED 1
49 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
50 #define down_write down
54 static inline void i810_print_status_page(drm_device_t *dev)
56 drm_device_dma_t *dma = dev->dma;
57 drm_i810_private_t *dev_priv = dev->dev_private;
58 u32 *temp = dev_priv->hw_status_page;
61 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
62 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
63 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
64 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
65 DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
66 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
67 for(i = 6; i < dma->buf_count + 6; i++) {
68 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
72 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
74 drm_device_dma_t *dma = dev->dma;
78 /* Linear search might not be the best solution */
80 for (i = 0; i < dma->buf_count; i++) {
81 drm_buf_t *buf = dma->buflist[ i ];
82 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
83 /* In use is already a pointer */
84 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
86 if(used == I810_BUF_FREE) {
93 /* This should only be called if the buffer is not sent to the hardware
94 * yet, the hardware updates in use for us once its on the ring buffer.
97 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
99 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
102 /* In use is already a pointer */
103 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
104 if(used != I810_BUF_CLIENT) {
105 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
112 static struct file_operations i810_buffer_fops = {
115 .release = DRM(release),
117 .mmap = i810_mmap_buffers,
118 .fasync = DRM(fasync),
121 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
123 drm_file_t *priv = filp->private_data;
125 drm_i810_private_t *dev_priv;
127 drm_i810_buf_priv_t *buf_priv;
131 dev_priv = dev->dev_private;
132 buf = dev_priv->mmap_buffer;
133 buf_priv = buf->dev_private;
135 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
138 buf_priv->currently_mapped = I810_BUF_MAPPED;
141 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
143 vma->vm_end - vma->vm_start,
144 vma->vm_page_prot)) return -EAGAIN;
148 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
150 drm_file_t *priv = filp->private_data;
151 drm_device_t *dev = priv->dev;
152 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
153 drm_i810_private_t *dev_priv = dev->dev_private;
154 struct file_operations *old_fops;
157 if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
159 down_write( ¤t->mm->mmap_sem );
160 old_fops = filp->f_op;
161 filp->f_op = &i810_buffer_fops;
162 dev_priv->mmap_buffer = buf;
163 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
164 PROT_READ|PROT_WRITE,
167 dev_priv->mmap_buffer = NULL;
168 filp->f_op = old_fops;
169 if ((unsigned long)buf_priv->virtual > -1024UL) {
171 DRM_ERROR("mmap error\n");
172 retcode = (signed int)buf_priv->virtual;
173 buf_priv->virtual = 0;
175 up_write( ¤t->mm->mmap_sem );
180 static int i810_unmap_buffer(drm_buf_t *buf)
182 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
185 if(buf_priv->currently_mapped != I810_BUF_MAPPED)
188 down_write(¤t->mm->mmap_sem);
189 retcode = do_munmap(current->mm,
190 (unsigned long)buf_priv->virtual,
191 (size_t) buf->total);
192 up_write(¤t->mm->mmap_sem);
194 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
195 buf_priv->virtual = 0;
200 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
204 drm_i810_buf_priv_t *buf_priv;
207 buf = i810_freelist_get(dev);
210 DRM_DEBUG("retcode=%d\n", retcode);
214 retcode = i810_map_buffer(buf, filp);
216 i810_freelist_put(dev, buf);
217 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
221 buf_priv = buf->dev_private;
223 d->request_idx = buf->idx;
224 d->request_size = buf->total;
225 d->virtual = buf_priv->virtual;
230 int i810_dma_cleanup(drm_device_t *dev)
232 drm_device_dma_t *dma = dev->dma;
235 /* Make sure interrupts are disabled here because the uninstall ioctl
236 * may not have been called from userspace and after dev_private
237 * is freed, it's too late.
239 if (dev->irq) DRM(irq_uninstall)(dev);
242 if (dev->dev_private) {
244 drm_i810_private_t *dev_priv =
245 (drm_i810_private_t *) dev->dev_private;
247 if(dev_priv->ring.virtual_start) {
248 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
249 dev_priv->ring.Size, dev);
251 if (dev_priv->hw_status_page) {
252 pci_free_consistent(dev->pdev, PAGE_SIZE,
253 dev_priv->hw_status_page,
254 dev_priv->dma_status_page);
255 /* Need to rewrite hardware status page */
256 I810_WRITE(0x02080, 0x1ffff000);
258 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
260 dev->dev_private = NULL;
262 for (i = 0; i < dma->buf_count; i++) {
263 drm_buf_t *buf = dma->buflist[ i ];
264 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
265 if ( buf_priv->kernel_virtual && buf->total )
266 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
272 static int i810_wait_ring(drm_device_t *dev, int n)
274 drm_i810_private_t *dev_priv = dev->dev_private;
275 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
278 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
280 end = jiffies + (HZ*3);
281 while (ring->space < n) {
282 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
283 ring->space = ring->head - (ring->tail+8);
284 if (ring->space < 0) ring->space += ring->Size;
286 if (ring->head != last_head)
287 end = jiffies + (HZ*3);
290 if(time_before(end, jiffies)) {
291 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
292 DRM_ERROR("lockup\n");
302 static void i810_kernel_lost_context(drm_device_t *dev)
304 drm_i810_private_t *dev_priv = dev->dev_private;
305 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
307 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
308 ring->tail = I810_READ(LP_RING + RING_TAIL);
309 ring->space = ring->head - (ring->tail+8);
310 if (ring->space < 0) ring->space += ring->Size;
313 static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
315 drm_device_dma_t *dma = dev->dma;
317 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
320 if(dma->buf_count > 1019) {
321 /* Not enough space in the status page for the freelist */
325 for (i = 0; i < dma->buf_count; i++) {
326 drm_buf_t *buf = dma->buflist[ i ];
327 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
329 buf_priv->in_use = hw_status++;
330 buf_priv->my_use_idx = my_idx;
333 *buf_priv->in_use = I810_BUF_FREE;
335 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
341 static int i810_dma_initialize(drm_device_t *dev,
342 drm_i810_private_t *dev_priv,
343 drm_i810_init_t *init)
345 struct list_head *list;
347 memset(dev_priv, 0, sizeof(drm_i810_private_t));
349 list_for_each(list, &dev->maplist->head) {
350 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
352 r_list->map->type == _DRM_SHM &&
353 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
354 dev_priv->sarea_map = r_list->map;
358 if(!dev_priv->sarea_map) {
359 dev->dev_private = (void *)dev_priv;
360 i810_dma_cleanup(dev);
361 DRM_ERROR("can not find sarea!\n");
364 DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
365 if(!dev_priv->mmio_map) {
366 dev->dev_private = (void *)dev_priv;
367 i810_dma_cleanup(dev);
368 DRM_ERROR("can not find mmio map!\n");
371 DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
372 if(!dev_priv->buffer_map) {
373 dev->dev_private = (void *)dev_priv;
374 i810_dma_cleanup(dev);
375 DRM_ERROR("can not find dma buffer map!\n");
379 dev_priv->sarea_priv = (drm_i810_sarea_t *)
380 ((u8 *)dev_priv->sarea_map->handle +
381 init->sarea_priv_offset);
383 dev_priv->ring.Start = init->ring_start;
384 dev_priv->ring.End = init->ring_end;
385 dev_priv->ring.Size = init->ring_size;
387 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
389 init->ring_size, dev);
391 if (dev_priv->ring.virtual_start == NULL) {
392 dev->dev_private = (void *) dev_priv;
393 i810_dma_cleanup(dev);
394 DRM_ERROR("can not ioremap virtual address for"
399 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
401 dev_priv->w = init->w;
402 dev_priv->h = init->h;
403 dev_priv->pitch = init->pitch;
404 dev_priv->back_offset = init->back_offset;
405 dev_priv->depth_offset = init->depth_offset;
407 dev_priv->overlay_offset = init->overlay_offset;
408 dev_priv->overlay_physical = init->overlay_physical;
410 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
411 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
412 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
414 /* Program Hardware Status Page */
415 dev_priv->hw_status_page =
416 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
417 &dev_priv->dma_status_page);
418 if (!dev_priv->hw_status_page) {
419 dev->dev_private = (void *)dev_priv;
420 i810_dma_cleanup(dev);
421 DRM_ERROR("Can not allocate hardware status page\n");
424 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
425 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
427 I810_WRITE(0x02080, dev_priv->dma_status_page);
428 DRM_DEBUG("Enabled hardware status page\n");
430 /* Now we need to init our freelist */
431 if(i810_freelist_init(dev, dev_priv) != 0) {
432 dev->dev_private = (void *)dev_priv;
433 i810_dma_cleanup(dev);
434 DRM_ERROR("Not enough space in the status page for"
438 dev->dev_private = (void *)dev_priv;
443 int i810_dma_init(struct inode *inode, struct file *filp,
444 unsigned int cmd, unsigned long arg)
446 drm_file_t *priv = filp->private_data;
447 drm_device_t *dev = priv->dev;
448 drm_i810_private_t *dev_priv;
449 drm_i810_init_t init;
452 if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
457 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
459 if(dev_priv == NULL) return -ENOMEM;
460 retcode = i810_dma_initialize(dev, dev_priv, &init);
462 case I810_CLEANUP_DMA:
463 retcode = i810_dma_cleanup(dev);
475 /* Most efficient way to verify state for the i810 is as it is
476 * emitted. Non-conformant state is silently dropped.
478 static void i810EmitContextVerified( drm_device_t *dev,
481 drm_i810_private_t *dev_priv = dev->dev_private;
485 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
487 OUT_RING( GFX_OP_COLOR_FACTOR );
488 OUT_RING( code[I810_CTXREG_CF1] );
490 OUT_RING( GFX_OP_STIPPLE );
491 OUT_RING( code[I810_CTXREG_ST1] );
493 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
494 if ((code[i] & (7<<29)) == (3<<29) &&
495 (code[i] & (0x1f<<24)) < (0x1d<<24))
500 else printk("constext state dropped!!!\n");
509 static void i810EmitTexVerified( drm_device_t *dev,
510 volatile unsigned int *code )
512 drm_i810_private_t *dev_priv = dev->dev_private;
516 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
518 OUT_RING( GFX_OP_MAP_INFO );
519 OUT_RING( code[I810_TEXREG_MI1] );
520 OUT_RING( code[I810_TEXREG_MI2] );
521 OUT_RING( code[I810_TEXREG_MI3] );
523 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
525 if ((code[i] & (7<<29)) == (3<<29) &&
526 (code[i] & (0x1f<<24)) < (0x1d<<24))
531 else printk("texture state dropped!!!\n");
541 /* Need to do some additional checking when setting the dest buffer.
543 static void i810EmitDestVerified( drm_device_t *dev,
544 volatile unsigned int *code )
546 drm_i810_private_t *dev_priv = dev->dev_private;
550 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
552 tmp = code[I810_DESTREG_DI1];
553 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
554 OUT_RING( CMD_OP_DESTBUFFER_INFO );
558 printk("buffer state dropped\n");
562 OUT_RING( CMD_OP_Z_BUFFER_INFO );
563 OUT_RING( dev_priv->zi1 );
565 OUT_RING( GFX_OP_DESTBUFFER_VARS );
566 OUT_RING( code[I810_DESTREG_DV1] );
568 OUT_RING( GFX_OP_DRAWRECT_INFO );
569 OUT_RING( code[I810_DESTREG_DR1] );
570 OUT_RING( code[I810_DESTREG_DR2] );
571 OUT_RING( code[I810_DESTREG_DR3] );
572 OUT_RING( code[I810_DESTREG_DR4] );
580 static void i810EmitState( drm_device_t *dev )
582 drm_i810_private_t *dev_priv = dev->dev_private;
583 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
584 unsigned int dirty = sarea_priv->dirty;
586 if (dirty & I810_UPLOAD_BUFFERS) {
587 i810EmitDestVerified( dev, sarea_priv->BufferState );
588 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
591 if (dirty & I810_UPLOAD_CTX) {
592 i810EmitContextVerified( dev, sarea_priv->ContextState );
593 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
596 if (dirty & I810_UPLOAD_TEX0) {
597 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
598 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
601 if (dirty & I810_UPLOAD_TEX1) {
602 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
603 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
611 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
612 unsigned int clear_color,
613 unsigned int clear_zval )
615 drm_i810_private_t *dev_priv = dev->dev_private;
616 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
617 int nbox = sarea_priv->nbox;
618 drm_clip_rect_t *pbox = sarea_priv->boxes;
619 int pitch = dev_priv->pitch;
624 i810_kernel_lost_context(dev);
626 if (nbox > I810_NR_SAREA_CLIPRECTS)
627 nbox = I810_NR_SAREA_CLIPRECTS;
629 for (i = 0 ; i < nbox ; i++, pbox++) {
630 unsigned int x = pbox->x1;
631 unsigned int y = pbox->y1;
632 unsigned int width = (pbox->x2 - x) * cpp;
633 unsigned int height = pbox->y2 - y;
634 unsigned int start = y * pitch + x * cpp;
636 if (pbox->x1 > pbox->x2 ||
637 pbox->y1 > pbox->y2 ||
638 pbox->x2 > dev_priv->w ||
639 pbox->y2 > dev_priv->h)
642 if ( flags & I810_FRONT ) {
644 OUT_RING( BR00_BITBLT_CLIENT |
645 BR00_OP_COLOR_BLT | 0x3 );
646 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
647 OUT_RING( (height << 16) | width );
649 OUT_RING( clear_color );
654 if ( flags & I810_BACK ) {
656 OUT_RING( BR00_BITBLT_CLIENT |
657 BR00_OP_COLOR_BLT | 0x3 );
658 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
659 OUT_RING( (height << 16) | width );
660 OUT_RING( dev_priv->back_offset + start );
661 OUT_RING( clear_color );
666 if ( flags & I810_DEPTH ) {
668 OUT_RING( BR00_BITBLT_CLIENT |
669 BR00_OP_COLOR_BLT | 0x3 );
670 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
671 OUT_RING( (height << 16) | width );
672 OUT_RING( dev_priv->depth_offset + start );
673 OUT_RING( clear_zval );
680 static void i810_dma_dispatch_swap( drm_device_t *dev )
682 drm_i810_private_t *dev_priv = dev->dev_private;
683 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
684 int nbox = sarea_priv->nbox;
685 drm_clip_rect_t *pbox = sarea_priv->boxes;
686 int pitch = dev_priv->pitch;
688 int ofs = dev_priv->back_offset;
692 i810_kernel_lost_context(dev);
694 if (nbox > I810_NR_SAREA_CLIPRECTS)
695 nbox = I810_NR_SAREA_CLIPRECTS;
697 for (i = 0 ; i < nbox; i++, pbox++)
699 unsigned int w = pbox->x2 - pbox->x1;
700 unsigned int h = pbox->y2 - pbox->y1;
701 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
702 unsigned int start = ofs + dst;
704 if (pbox->x1 > pbox->x2 ||
705 pbox->y1 > pbox->y2 ||
706 pbox->x2 > dev_priv->w ||
707 pbox->y2 > dev_priv->h)
711 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
712 OUT_RING( pitch | (0xCC << 16));
713 OUT_RING( (h << 16) | (w * cpp));
722 static void i810_dma_dispatch_vertex(drm_device_t *dev,
727 drm_i810_private_t *dev_priv = dev->dev_private;
728 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
729 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
730 drm_clip_rect_t *box = sarea_priv->boxes;
731 int nbox = sarea_priv->nbox;
732 unsigned long address = (unsigned long)buf->bus_address;
733 unsigned long start = address - dev->agp->base;
737 i810_kernel_lost_context(dev);
739 if (nbox > I810_NR_SAREA_CLIPRECTS)
740 nbox = I810_NR_SAREA_CLIPRECTS;
745 if (sarea_priv->dirty)
746 i810EmitState( dev );
748 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
749 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
751 *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim |
755 *(u32 *)((u32)buf_priv->virtual + used) = 0;
759 i810_unmap_buffer(buf);
766 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
768 OUT_RING( GFX_OP_SCISSOR_INFO );
769 OUT_RING( box[i].x1 | (box[i].y1<<16) );
770 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
775 OUT_RING( CMD_OP_BATCH_BUFFER );
776 OUT_RING( start | BB1_PROTECTED );
777 OUT_RING( start + used - 4 );
781 } while (++i < nbox);
787 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
791 OUT_RING( CMD_STORE_DWORD_IDX );
793 OUT_RING( dev_priv->counter );
794 OUT_RING( CMD_STORE_DWORD_IDX );
795 OUT_RING( buf_priv->my_use_idx );
796 OUT_RING( I810_BUF_FREE );
797 OUT_RING( CMD_REPORT_HEAD );
804 void i810_dma_quiescent(drm_device_t *dev)
806 drm_i810_private_t *dev_priv = dev->dev_private;
809 /* printk("%s\n", __FUNCTION__); */
811 i810_kernel_lost_context(dev);
814 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
815 OUT_RING( CMD_REPORT_HEAD );
820 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
823 static int i810_flush_queue(drm_device_t *dev)
825 drm_i810_private_t *dev_priv = dev->dev_private;
826 drm_device_dma_t *dma = dev->dma;
830 /* printk("%s\n", __FUNCTION__); */
832 i810_kernel_lost_context(dev);
835 OUT_RING( CMD_REPORT_HEAD );
839 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
841 for (i = 0; i < dma->buf_count; i++) {
842 drm_buf_t *buf = dma->buflist[ i ];
843 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
845 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
848 if (used == I810_BUF_HARDWARE)
849 DRM_DEBUG("reclaimed from HARDWARE\n");
850 if (used == I810_BUF_CLIENT)
851 DRM_DEBUG("still on client\n");
857 /* Must be called with the lock held */
858 void i810_reclaim_buffers(struct file *filp)
860 drm_file_t *priv = filp->private_data;
861 drm_device_t *dev = priv->dev;
862 drm_device_dma_t *dma = dev->dma;
866 if (!dev->dev_private) return;
867 if (!dma->buflist) return;
869 i810_flush_queue(dev);
871 for (i = 0; i < dma->buf_count; i++) {
872 drm_buf_t *buf = dma->buflist[ i ];
873 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
875 if (buf->filp == filp && buf_priv) {
876 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
879 if (used == I810_BUF_CLIENT)
880 DRM_DEBUG("reclaimed from client\n");
881 if(buf_priv->currently_mapped == I810_BUF_MAPPED)
882 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
887 int i810_flush_ioctl(struct inode *inode, struct file *filp,
888 unsigned int cmd, unsigned long arg)
890 drm_file_t *priv = filp->private_data;
891 drm_device_t *dev = priv->dev;
893 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
894 DRM_ERROR("i810_flush_ioctl called without lock held\n");
898 i810_flush_queue(dev);
903 int i810_dma_vertex(struct inode *inode, struct file *filp,
904 unsigned int cmd, unsigned long arg)
906 drm_file_t *priv = filp->private_data;
907 drm_device_t *dev = priv->dev;
908 drm_device_dma_t *dma = dev->dma;
909 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
910 u32 *hw_status = dev_priv->hw_status_page;
911 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
912 dev_priv->sarea_priv;
913 drm_i810_vertex_t vertex;
915 if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
918 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
919 DRM_ERROR("i810_dma_vertex called without lock held\n");
923 if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
925 i810_dma_dispatch_vertex( dev,
926 dma->buflist[ vertex.idx ],
927 vertex.discard, vertex.used );
929 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
930 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
931 sarea_priv->last_enqueue = dev_priv->counter-1;
932 sarea_priv->last_dispatch = (int) hw_status[5];
939 int i810_clear_bufs(struct inode *inode, struct file *filp,
940 unsigned int cmd, unsigned long arg)
942 drm_file_t *priv = filp->private_data;
943 drm_device_t *dev = priv->dev;
944 drm_i810_clear_t clear;
946 if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
949 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
950 DRM_ERROR("i810_clear_bufs called without lock held\n");
954 /* GH: Someone's doing nasty things... */
955 if (!dev->dev_private) {
959 i810_dma_dispatch_clear( dev, clear.flags,
965 int i810_swap_bufs(struct inode *inode, struct file *filp,
966 unsigned int cmd, unsigned long arg)
968 drm_file_t *priv = filp->private_data;
969 drm_device_t *dev = priv->dev;
971 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
972 DRM_ERROR("i810_swap_buf called without lock held\n");
976 i810_dma_dispatch_swap( dev );
980 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
983 drm_file_t *priv = filp->private_data;
984 drm_device_t *dev = priv->dev;
985 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
986 u32 *hw_status = dev_priv->hw_status_page;
987 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
988 dev_priv->sarea_priv;
990 sarea_priv->last_dispatch = (int) hw_status[5];
994 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
997 drm_file_t *priv = filp->private_data;
998 drm_device_t *dev = priv->dev;
1001 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1002 u32 *hw_status = dev_priv->hw_status_page;
1003 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1004 dev_priv->sarea_priv;
1006 if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
1009 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1010 DRM_ERROR("i810_dma called without lock held\n");
1016 retcode = i810_dma_get_buffer(dev, &d, filp);
1018 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1020 sarea_priv->last_dispatch = (int) hw_status[5];
1025 int i810_copybuf(struct inode *inode,
1030 /* Never copy - 2.4.x doesn't need it */
1034 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1037 /* Never copy - 2.4.x doesn't need it */
1041 static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
1042 unsigned int last_render)
1044 drm_i810_private_t *dev_priv = dev->dev_private;
1045 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1046 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1047 unsigned long address = (unsigned long)buf->bus_address;
1048 unsigned long start = address - dev->agp->base;
1052 i810_kernel_lost_context(dev);
1054 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1056 if(u != I810_BUF_CLIENT) {
1057 DRM_DEBUG("MC found buffer that isn't mine!\n");
1063 sarea_priv->dirty = 0x7f;
1065 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
1068 dev_priv->counter++;
1069 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1070 DRM_DEBUG("i810_dma_dispatch_mc\n");
1071 DRM_DEBUG("start : %lx\n", start);
1072 DRM_DEBUG("used : %d\n", used);
1073 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1075 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1077 *(u32 *)((u32)buf_priv->virtual + used) = 0;
1081 i810_unmap_buffer(buf);
1084 OUT_RING( CMD_OP_BATCH_BUFFER );
1085 OUT_RING( start | BB1_PROTECTED );
1086 OUT_RING( start + used - 4 );
1092 OUT_RING( CMD_STORE_DWORD_IDX );
1093 OUT_RING( buf_priv->my_use_idx );
1094 OUT_RING( I810_BUF_FREE );
1097 OUT_RING( CMD_STORE_DWORD_IDX );
1099 OUT_RING( last_render );
1104 int i810_dma_mc(struct inode *inode, struct file *filp,
1105 unsigned int cmd, unsigned long arg)
1107 drm_file_t *priv = filp->private_data;
1108 drm_device_t *dev = priv->dev;
1109 drm_device_dma_t *dma = dev->dma;
1110 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1111 u32 *hw_status = dev_priv->hw_status_page;
1112 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1113 dev_priv->sarea_priv;
1116 if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
1120 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1121 DRM_ERROR("i810_dma_mc called without lock held\n");
1125 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
1128 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
1129 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1130 sarea_priv->last_enqueue = dev_priv->counter-1;
1131 sarea_priv->last_dispatch = (int) hw_status[5];
1136 int i810_rstatus(struct inode *inode, struct file *filp,
1137 unsigned int cmd, unsigned long arg)
1139 drm_file_t *priv = filp->private_data;
1140 drm_device_t *dev = priv->dev;
1141 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1143 return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
1146 int i810_ov0_info(struct inode *inode, struct file *filp,
1147 unsigned int cmd, unsigned long arg)
1149 drm_file_t *priv = filp->private_data;
1150 drm_device_t *dev = priv->dev;
1151 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1152 drm_i810_overlay_t data;
1154 data.offset = dev_priv->overlay_offset;
1155 data.physical = dev_priv->overlay_physical;
1156 if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
1161 int i810_fstatus(struct inode *inode, struct file *filp,
1162 unsigned int cmd, unsigned long arg)
1164 drm_file_t *priv = filp->private_data;
1165 drm_device_t *dev = priv->dev;
1166 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1168 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1169 DRM_ERROR("i810_fstatus called without lock held\n");
1172 return I810_READ(0x30008);
1175 int i810_ov0_flip(struct inode *inode, struct file *filp,
1176 unsigned int cmd, unsigned long arg)
1178 drm_file_t *priv = filp->private_data;
1179 drm_device_t *dev = priv->dev;
1180 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1182 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1183 DRM_ERROR("i810_ov0_flip called without lock held\n");
1187 //Tell the overlay to update
1188 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);