1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
48 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
49 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4);
50 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
53 void gamma_dma_quiescent_single(drm_device_t *dev)
55 drm_gamma_private_t *dev_priv =
56 (drm_gamma_private_t *)dev->dev_private;
57 while (GAMMA_READ(GAMMA_DMACOUNT));
59 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
61 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
62 GAMMA_WRITE(GAMMA_SYNC, 0);
65 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
67 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
70 void gamma_dma_quiescent_dual(drm_device_t *dev)
72 drm_gamma_private_t *dev_priv =
73 (drm_gamma_private_t *)dev->dev_private;
74 while (GAMMA_READ(GAMMA_DMACOUNT));
76 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
78 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
79 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
80 GAMMA_WRITE(GAMMA_SYNC, 0);
82 /* Read from first MX */
84 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS));
85 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
87 /* Read from second MX */
89 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000));
90 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
93 void gamma_dma_ready(drm_device_t *dev)
95 drm_gamma_private_t *dev_priv =
96 (drm_gamma_private_t *)dev->dev_private;
97 while (GAMMA_READ(GAMMA_DMACOUNT));
100 static inline int gamma_dma_is_ready(drm_device_t *dev)
102 drm_gamma_private_t *dev_priv =
103 (drm_gamma_private_t *)dev->dev_private;
104 return(!GAMMA_READ(GAMMA_DMACOUNT));
107 void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
109 drm_device_t *dev = (drm_device_t *)device;
110 drm_device_dma_t *dma = dev->dma;
111 drm_gamma_private_t *dev_priv =
112 (drm_gamma_private_t *)dev->dev_private;
114 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
116 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
117 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
118 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
119 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
120 if (gamma_dma_is_ready(dev)) {
121 /* Free previous buffer */
122 if (test_and_set_bit(0, &dev->dma_flag)) return;
123 if (dma->this_buffer) {
124 gamma_free_buffer(dev, dma->this_buffer);
125 dma->this_buffer = NULL;
127 clear_bit(0, &dev->dma_flag);
129 /* Dispatch new buffer */
130 schedule_work(&dev->tq);
134 /* Only called by gamma_dma_schedule. */
135 static int gamma_do_dma(drm_device_t *dev, int locked)
137 unsigned long address;
138 unsigned long length;
141 drm_device_dma_t *dma = dev->dma;
143 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
146 if (!dma->next_buffer) {
147 DRM_ERROR("No next_buffer\n");
148 clear_bit(0, &dev->dma_flag);
152 buf = dma->next_buffer;
153 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
154 /* So we pass the buffer index value into the physical page offset */
155 address = buf->idx << 12;
158 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
159 buf->context, buf->idx, length);
161 if (buf->list == DRM_LIST_RECLAIM) {
162 gamma_clear_next_buffer(dev);
163 gamma_free_buffer(dev, buf);
164 clear_bit(0, &dev->dma_flag);
169 DRM_ERROR("0 length buffer\n");
170 gamma_clear_next_buffer(dev);
171 gamma_free_buffer(dev, buf);
172 clear_bit(0, &dev->dma_flag);
176 if (!gamma_dma_is_ready(dev)) {
177 clear_bit(0, &dev->dma_flag);
181 if (buf->while_locked) {
182 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
183 DRM_ERROR("Dispatching buffer %d from pid %d"
184 " \"while locked\", but no lock held\n",
185 buf->idx, current->pid);
188 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
189 DRM_KERNEL_CONTEXT)) {
190 clear_bit(0, &dev->dma_flag);
195 if (dev->last_context != buf->context
196 && !(dev->queuelist[buf->context]->flags
197 & _DRM_CONTEXT_PRESERVED)) {
198 /* PRE: dev->last_context != buf->context */
199 if (DRM(context_switch)(dev, dev->last_context,
201 DRM(clear_next_buffer)(dev);
202 DRM(free_buffer)(dev, buf);
207 /* POST: we will wait for the context
208 switch and will dispatch on a later call
209 when dev->last_context == buf->context.
210 NOTE WE HOLD THE LOCK THROUGHOUT THIS
214 gamma_clear_next_buffer(dev);
217 buf->list = DRM_LIST_PEND;
219 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
220 address = buf->idx << 12;
222 gamma_dma_dispatch(dev, address, length);
223 gamma_free_buffer(dev, dma->this_buffer);
224 dma->this_buffer = buf;
226 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
227 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
229 if (!buf->while_locked && !dev->context_flag && !locked) {
230 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
231 DRM_KERNEL_CONTEXT)) {
237 clear_bit(0, &dev->dma_flag);
243 static void gamma_dma_timer_bh(unsigned long dev)
245 gamma_dma_schedule((drm_device_t *)dev, 0);
248 void gamma_dma_immediate_bh(void *dev)
250 gamma_dma_schedule(dev, 0);
253 int gamma_dma_schedule(drm_device_t *dev, int locked)
262 drm_device_dma_t *dma = dev->dma;
264 if (test_and_set_bit(0, &dev->interrupt_flag)) {
266 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
269 missed = atomic_read(&dev->counts[10]);
273 if (dev->context_flag) {
274 clear_bit(0, &dev->interrupt_flag);
277 if (dma->next_buffer) {
278 /* Unsent buffer that was previously
279 selected, but that couldn't be sent
280 because the lock could not be obtained
281 or the DMA engine wasn't ready. Try
283 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
286 next = gamma_select_queue(dev, gamma_dma_timer_bh);
288 q = dev->queuelist[next];
289 buf = gamma_waitlist_get(&q->waitlist);
290 dma->next_buffer = buf;
292 if (buf && buf->list == DRM_LIST_RECLAIM) {
293 gamma_clear_next_buffer(dev);
294 gamma_free_buffer(dev, buf);
297 } while (next >= 0 && !dma->next_buffer);
298 if (dma->next_buffer) {
299 if (!(retcode = gamma_do_dma(dev, locked))) {
306 if (missed != atomic_read(&dev->counts[10])) {
307 if (gamma_dma_is_ready(dev)) goto again;
309 if (processed && gamma_dma_is_ready(dev)) {
315 clear_bit(0, &dev->interrupt_flag);
320 static int gamma_dma_priority(struct file *filp,
321 drm_device_t *dev, drm_dma_t *d)
323 unsigned long address;
324 unsigned long length;
330 drm_buf_t *last_buf = NULL;
331 drm_device_dma_t *dma = dev->dma;
332 DECLARE_WAITQUEUE(entry, current);
334 /* Turn off interrupt handling */
335 while (test_and_set_bit(0, &dev->interrupt_flag)) {
337 if (signal_pending(current)) return -EINTR;
339 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
340 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
341 DRM_KERNEL_CONTEXT)) {
343 if (signal_pending(current)) {
344 clear_bit(0, &dev->interrupt_flag);
351 for (i = 0; i < d->send_count; i++) {
352 idx = d->send_indices[i];
353 if (idx < 0 || idx >= dma->buf_count) {
354 DRM_ERROR("Index %d (of %d max)\n",
355 d->send_indices[i], dma->buf_count - 1);
358 buf = dma->buflist[ idx ];
359 if (buf->filp != filp) {
360 DRM_ERROR("Process %d using buffer not owned\n",
365 if (buf->list != DRM_LIST_NONE) {
366 DRM_ERROR("Process %d using buffer on list %d\n",
367 current->pid, buf->list);
371 /* This isn't a race condition on
372 buf->list, since our concern is the
373 buffer reclaim during the time the
374 process closes the /dev/drm? handle, so
375 it can't also be doing DMA. */
376 buf->list = DRM_LIST_PRIO;
377 buf->used = d->send_sizes[i];
378 buf->context = d->context;
379 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
380 address = (unsigned long)buf->address;
383 DRM_ERROR("0 length buffer\n");
386 DRM_ERROR("Sending pending buffer:"
387 " buffer %d, offset %d\n",
388 d->send_indices[i], i);
393 DRM_ERROR("Sending waiting buffer:"
394 " buffer %d, offset %d\n",
395 d->send_indices[i], i);
401 if (dev->last_context != buf->context
402 && !(dev->queuelist[buf->context]->flags
403 & _DRM_CONTEXT_PRESERVED)) {
404 add_wait_queue(&dev->context_wait, &entry);
405 current->state = TASK_INTERRUPTIBLE;
406 /* PRE: dev->last_context != buf->context */
407 DRM(context_switch)(dev, dev->last_context,
409 /* POST: we will wait for the context
410 switch and will dispatch on a later call
411 when dev->last_context == buf->context.
412 NOTE WE HOLD THE LOCK THROUGHOUT THIS
415 current->state = TASK_RUNNING;
416 remove_wait_queue(&dev->context_wait, &entry);
417 if (signal_pending(current)) {
421 if (dev->last_context != buf->context) {
422 DRM_ERROR("Context mismatch: %d %d\n",
428 gamma_dma_dispatch(dev, address, length);
429 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
430 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
433 gamma_free_buffer(dev, last_buf);
441 gamma_dma_ready(dev);
442 gamma_free_buffer(dev, last_buf);
445 if (must_free && !dev->context_flag) {
446 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
447 DRM_KERNEL_CONTEXT)) {
451 clear_bit(0, &dev->interrupt_flag);
455 static int gamma_dma_send_buffers(struct file *filp,
456 drm_device_t *dev, drm_dma_t *d)
458 DECLARE_WAITQUEUE(entry, current);
459 drm_buf_t *last_buf = NULL;
461 drm_device_dma_t *dma = dev->dma;
463 if (d->flags & _DRM_DMA_BLOCK) {
464 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
465 add_wait_queue(&last_buf->dma_wait, &entry);
468 if ((retcode = gamma_dma_enqueue(filp, d))) {
469 if (d->flags & _DRM_DMA_BLOCK)
470 remove_wait_queue(&last_buf->dma_wait, &entry);
474 gamma_dma_schedule(dev, 0);
476 if (d->flags & _DRM_DMA_BLOCK) {
477 DRM_DEBUG("%d waiting\n", current->pid);
479 current->state = TASK_INTERRUPTIBLE;
480 if (!last_buf->waiting && !last_buf->pending)
481 break; /* finished */
483 if (signal_pending(current)) {
484 retcode = -EINTR; /* Can't restart */
488 current->state = TASK_RUNNING;
489 DRM_DEBUG("%d running\n", current->pid);
490 remove_wait_queue(&last_buf->dma_wait, &entry);
492 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
493 if (!waitqueue_active(&last_buf->dma_wait)) {
494 gamma_free_buffer(dev, last_buf);
498 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
502 (long)DRM_WAITCOUNT(dev, d->context),
511 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
514 drm_file_t *priv = filp->private_data;
515 drm_device_t *dev = priv->dev;
516 drm_device_dma_t *dma = dev->dma;
520 if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
523 if (d.send_count < 0 || d.send_count > dma->buf_count) {
524 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
525 current->pid, d.send_count, dma->buf_count);
529 if (d.request_count < 0 || d.request_count > dma->buf_count) {
530 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
531 current->pid, d.request_count, dma->buf_count);
536 if (d.flags & _DRM_DMA_PRIORITY)
537 retcode = gamma_dma_priority(filp, dev, &d);
539 retcode = gamma_dma_send_buffers(filp, dev, &d);
544 if (!retcode && d.request_count) {
545 retcode = gamma_dma_get_buffers(filp, &d);
548 DRM_DEBUG("%d returning, granted = %d\n",
549 current->pid, d.granted_count);
550 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
556 /* =============================================================
557 * DMA initialization, cleanup
560 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
562 drm_gamma_private_t *dev_priv;
563 drm_device_dma_t *dma = dev->dma;
566 struct list_head *list;
569 DRM_DEBUG( "%s\n", __FUNCTION__ );
571 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
576 dev->dev_private = (void *)dev_priv;
578 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
580 dev_priv->num_rast = init->num_rast;
582 list_for_each(list, &dev->maplist->head) {
583 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
585 r_list->map->type == _DRM_SHM &&
586 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
587 dev_priv->sarea = r_list->map;
592 DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
593 DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
594 DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
595 DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
597 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
598 ((u8 *)dev_priv->sarea->handle +
599 init->sarea_priv_offset);
602 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
605 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
606 buf = dma->buflist[i];
607 *pgt = virt_to_phys((void*)buf->address) | 0x07;
611 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
613 DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
615 DRM_IOREMAP( dev_priv->buffers );
617 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
620 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
621 buf = dma->buflist[i];
622 *pgt = (unsigned long)buf->address + 0x07;
626 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
628 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
629 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
631 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
632 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
633 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
638 int gamma_do_cleanup_dma( drm_device_t *dev )
640 DRM_DEBUG( "%s\n", __FUNCTION__ );
643 /* Make sure interrupts are disabled here because the uninstall ioctl
644 * may not have been called from userspace and after dev_private
645 * is freed, it's too late.
647 if ( dev->irq ) DRM(irq_uninstall)(dev);
650 if ( dev->dev_private ) {
651 drm_gamma_private_t *dev_priv = dev->dev_private;
653 if ( dev_priv->buffers != NULL )
654 DRM_IOREMAPFREE( dev_priv->buffers );
656 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
658 dev->dev_private = NULL;
664 int gamma_dma_init( struct inode *inode, struct file *filp,
665 unsigned int cmd, unsigned long arg )
667 drm_file_t *priv = filp->private_data;
668 drm_device_t *dev = priv->dev;
669 drm_gamma_init_t init;
671 LOCK_TEST_WITH_RETURN( dev, filp );
673 if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
676 switch ( init.func ) {
678 return gamma_do_init_dma( dev, &init );
679 case GAMMA_CLEANUP_DMA:
680 return gamma_do_cleanup_dma( dev );
686 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
688 drm_device_dma_t *dma = dev->dma;
689 unsigned int *screenbuf;
691 DRM_DEBUG( "%s\n", __FUNCTION__ );
693 /* We've DRM_RESTRICTED this DMA buffer */
695 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
698 *buffer++ = 0x180; /* Tag (FilterMode) */
699 *buffer++ = 0x200; /* Allow FBColor through */
700 *buffer++ = 0x53B; /* Tag */
701 *buffer++ = copy->Pitch;
702 *buffer++ = 0x53A; /* Tag */
703 *buffer++ = copy->SrcAddress;
704 *buffer++ = 0x539; /* Tag */
705 *buffer++ = copy->WidthHeight; /* Initiates transfer */
706 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
707 *buffer++ = virt_to_phys((void*)screenbuf);
708 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
709 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
711 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
712 /* Now put it back to the screen */
714 *buffer++ = 0x180; /* Tag (FilterMode) */
715 *buffer++ = 0x400; /* Allow Sync through */
716 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
717 *buffer++ = 0x155; /* FBSourceData | count */
718 *buffer++ = 0x537; /* Tag */
719 *buffer++ = copy->Pitch;
720 *buffer++ = 0x536; /* Tag */
721 *buffer++ = copy->DstAddress;
722 *buffer++ = 0x535; /* Tag */
723 *buffer++ = copy->WidthHeight; /* Initiates transfer */
724 *buffer++ = 0x530; /* Tag - DMAAddr */
725 *buffer++ = virt_to_phys((void*)screenbuf);
727 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
730 /* need to dispatch it now */
735 int gamma_dma_copy( struct inode *inode, struct file *filp,
736 unsigned int cmd, unsigned long arg )
738 drm_file_t *priv = filp->private_data;
739 drm_device_t *dev = priv->dev;
740 drm_gamma_copy_t copy;
742 if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
745 return gamma_do_copy_dma( dev, © );
748 /* =============================================================
749 * Per Context SAREA Support
752 int gamma_getsareactx(struct inode *inode, struct file *filp,
753 unsigned int cmd, unsigned long arg)
755 drm_file_t *priv = filp->private_data;
756 drm_device_t *dev = priv->dev;
757 drm_ctx_priv_map_t request;
760 if (copy_from_user(&request,
761 (drm_ctx_priv_map_t *)arg,
765 down(&dev->struct_sem);
766 if ((int)request.ctx_id >= dev->max_context) {
767 up(&dev->struct_sem);
771 map = dev->context_sareas[request.ctx_id];
772 up(&dev->struct_sem);
774 request.handle = map->handle;
775 if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
780 int gamma_setsareactx(struct inode *inode, struct file *filp,
781 unsigned int cmd, unsigned long arg)
783 drm_file_t *priv = filp->private_data;
784 drm_device_t *dev = priv->dev;
785 drm_ctx_priv_map_t request;
786 drm_map_t *map = NULL;
787 drm_map_list_t *r_list;
788 struct list_head *list;
790 if (copy_from_user(&request,
791 (drm_ctx_priv_map_t *)arg,
795 down(&dev->struct_sem);
797 list_for_each(list, &dev->maplist->head) {
798 r_list = list_entry(list, drm_map_list_t, head);
800 r_list->map->handle == request.handle) break;
802 if (list == &(dev->maplist->head)) {
803 up(&dev->struct_sem);
807 up(&dev->struct_sem);
809 if (!map) return -EINVAL;
811 down(&dev->struct_sem);
812 if ((int)request.ctx_id >= dev->max_context) {
813 up(&dev->struct_sem);
816 dev->context_sareas[request.ctx_id] = map;
817 up(&dev->struct_sem);
821 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
822 drm_gamma_private_t *dev_priv =
823 (drm_gamma_private_t *)dev->dev_private;
825 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
827 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
828 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
831 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
832 drm_gamma_private_t *dev_priv =
833 (drm_gamma_private_t *)dev->dev_private;
835 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
837 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
838 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
839 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
842 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
843 drm_gamma_private_t *dev_priv =
844 (drm_gamma_private_t *)dev->dev_private;
848 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
850 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
851 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
852 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );