8246b1bdd0b45694397a8976bca44d1e143dbb0e
[linux-flexiantxendom0-3.2.10.git] / drivers / char / drm / gamma_dma.c
1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31
32 #include "gamma.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39 #include <linux/delay.h>
40
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42                                       unsigned long length)
43 {
44         drm_gamma_private_t *dev_priv =
45                                 (drm_gamma_private_t *)dev->dev_private;
46         mb();
47         while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
48         GAMMA_WRITE(GAMMA_DMAADDRESS, address);
49         while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4);
50         GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
51 }
52
53 void gamma_dma_quiescent_single(drm_device_t *dev)
54 {
55         drm_gamma_private_t *dev_priv =
56                                 (drm_gamma_private_t *)dev->dev_private;
57         while (GAMMA_READ(GAMMA_DMACOUNT));
58
59         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
60
61         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
62         GAMMA_WRITE(GAMMA_SYNC, 0);
63
64         do {
65                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
66                         ;
67         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
68 }
69
70 void gamma_dma_quiescent_dual(drm_device_t *dev)
71 {
72         drm_gamma_private_t *dev_priv =
73                                 (drm_gamma_private_t *)dev->dev_private;
74         while (GAMMA_READ(GAMMA_DMACOUNT));
75
76         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
77
78         GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
79         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
80         GAMMA_WRITE(GAMMA_SYNC, 0);
81
82         /* Read from first MX */
83         do {
84                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS));
85         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
86
87         /* Read from second MX */
88         do {
89                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000));
90         } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
91 }
92
93 void gamma_dma_ready(drm_device_t *dev)
94 {
95         drm_gamma_private_t *dev_priv =
96                                 (drm_gamma_private_t *)dev->dev_private;
97         while (GAMMA_READ(GAMMA_DMACOUNT));
98 }
99
100 static inline int gamma_dma_is_ready(drm_device_t *dev)
101 {
102         drm_gamma_private_t *dev_priv =
103                                 (drm_gamma_private_t *)dev->dev_private;
104         return(!GAMMA_READ(GAMMA_DMACOUNT));
105 }
106
107 void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
108 {
109         drm_device_t     *dev = (drm_device_t *)device;
110         drm_device_dma_t *dma = dev->dma;
111         drm_gamma_private_t *dev_priv =
112                                 (drm_gamma_private_t *)dev->dev_private;
113
114         atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
115
116         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
117         GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
118         GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
119         GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
120         if (gamma_dma_is_ready(dev)) {
121                                 /* Free previous buffer */
122                 if (test_and_set_bit(0, &dev->dma_flag)) return;
123                 if (dma->this_buffer) {
124                         gamma_free_buffer(dev, dma->this_buffer);
125                         dma->this_buffer = NULL;
126                 }
127                 clear_bit(0, &dev->dma_flag);
128
129                                 /* Dispatch new buffer */
130                 schedule_work(&dev->tq);
131         }
132 }
133
134 /* Only called by gamma_dma_schedule. */
135 static int gamma_do_dma(drm_device_t *dev, int locked)
136 {
137         unsigned long    address;
138         unsigned long    length;
139         drm_buf_t        *buf;
140         int              retcode = 0;
141         drm_device_dma_t *dma = dev->dma;
142
143         if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
144
145
146         if (!dma->next_buffer) {
147                 DRM_ERROR("No next_buffer\n");
148                 clear_bit(0, &dev->dma_flag);
149                 return -EINVAL;
150         }
151
152         buf     = dma->next_buffer;
153         /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
154         /* So we pass the buffer index value into the physical page offset */
155         address = buf->idx << 12;
156         length  = buf->used;
157
158         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
159                   buf->context, buf->idx, length);
160
161         if (buf->list == DRM_LIST_RECLAIM) {
162                 gamma_clear_next_buffer(dev);
163                 gamma_free_buffer(dev, buf);
164                 clear_bit(0, &dev->dma_flag);
165                 return -EINVAL;
166         }
167
168         if (!length) {
169                 DRM_ERROR("0 length buffer\n");
170                 gamma_clear_next_buffer(dev);
171                 gamma_free_buffer(dev, buf);
172                 clear_bit(0, &dev->dma_flag);
173                 return 0;
174         }
175
176         if (!gamma_dma_is_ready(dev)) {
177                 clear_bit(0, &dev->dma_flag);
178                 return -EBUSY;
179         }
180
181         if (buf->while_locked) {
182                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
183                         DRM_ERROR("Dispatching buffer %d from pid %d"
184                                   " \"while locked\", but no lock held\n",
185                                   buf->idx, current->pid);
186                 }
187         } else {
188                 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
189                                               DRM_KERNEL_CONTEXT)) {
190                         clear_bit(0, &dev->dma_flag);
191                         return -EBUSY;
192                 }
193         }
194
195         if (dev->last_context != buf->context
196             && !(dev->queuelist[buf->context]->flags
197                  & _DRM_CONTEXT_PRESERVED)) {
198                                 /* PRE: dev->last_context != buf->context */
199                 if (DRM(context_switch)(dev, dev->last_context,
200                                         buf->context)) {
201                         DRM(clear_next_buffer)(dev);
202                         DRM(free_buffer)(dev, buf);
203                 }
204                 retcode = -EBUSY;
205                 goto cleanup;
206
207                                 /* POST: we will wait for the context
208                                    switch and will dispatch on a later call
209                                    when dev->last_context == buf->context.
210                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
211                                    TIME! */
212         }
213
214         gamma_clear_next_buffer(dev);
215         buf->pending     = 1;
216         buf->waiting     = 0;
217         buf->list        = DRM_LIST_PEND;
218
219         /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
220         address = buf->idx << 12;
221
222         gamma_dma_dispatch(dev, address, length);
223         gamma_free_buffer(dev, dma->this_buffer);
224         dma->this_buffer = buf;
225
226         atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
227         atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
228
229         if (!buf->while_locked && !dev->context_flag && !locked) {
230                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
231                                   DRM_KERNEL_CONTEXT)) {
232                         DRM_ERROR("\n");
233                 }
234         }
235 cleanup:
236
237         clear_bit(0, &dev->dma_flag);
238
239
240         return retcode;
241 }
242
243 static void gamma_dma_timer_bh(unsigned long dev)
244 {
245         gamma_dma_schedule((drm_device_t *)dev, 0);
246 }
247
248 void gamma_dma_immediate_bh(void *dev)
249 {
250         gamma_dma_schedule(dev, 0);
251 }
252
253 int gamma_dma_schedule(drm_device_t *dev, int locked)
254 {
255         int              next;
256         drm_queue_t      *q;
257         drm_buf_t        *buf;
258         int              retcode   = 0;
259         int              processed = 0;
260         int              missed;
261         int              expire    = 20;
262         drm_device_dma_t *dma      = dev->dma;
263
264         if (test_and_set_bit(0, &dev->interrupt_flag)) {
265                                 /* Not reentrant */
266                 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
267                 return -EBUSY;
268         }
269         missed = atomic_read(&dev->counts[10]);
270
271
272 again:
273         if (dev->context_flag) {
274                 clear_bit(0, &dev->interrupt_flag);
275                 return -EBUSY;
276         }
277         if (dma->next_buffer) {
278                                 /* Unsent buffer that was previously
279                                    selected, but that couldn't be sent
280                                    because the lock could not be obtained
281                                    or the DMA engine wasn't ready.  Try
282                                    again. */
283                 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
284         } else {
285                 do {
286                         next = gamma_select_queue(dev, gamma_dma_timer_bh);
287                         if (next >= 0) {
288                                 q   = dev->queuelist[next];
289                                 buf = gamma_waitlist_get(&q->waitlist);
290                                 dma->next_buffer = buf;
291                                 dma->next_queue  = q;
292                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
293                                         gamma_clear_next_buffer(dev);
294                                         gamma_free_buffer(dev, buf);
295                                 }
296                         }
297                 } while (next >= 0 && !dma->next_buffer);
298                 if (dma->next_buffer) {
299                         if (!(retcode = gamma_do_dma(dev, locked))) {
300                                 ++processed;
301                         }
302                 }
303         }
304
305         if (--expire) {
306                 if (missed != atomic_read(&dev->counts[10])) {
307                         if (gamma_dma_is_ready(dev)) goto again;
308                 }
309                 if (processed && gamma_dma_is_ready(dev)) {
310                         processed = 0;
311                         goto again;
312                 }
313         }
314
315         clear_bit(0, &dev->interrupt_flag);
316
317         return retcode;
318 }
319
320 static int gamma_dma_priority(struct file *filp, 
321                               drm_device_t *dev, drm_dma_t *d)
322 {
323         unsigned long     address;
324         unsigned long     length;
325         int               must_free = 0;
326         int               retcode   = 0;
327         int               i;
328         int               idx;
329         drm_buf_t         *buf;
330         drm_buf_t         *last_buf = NULL;
331         drm_device_dma_t  *dma      = dev->dma;
332         DECLARE_WAITQUEUE(entry, current);
333
334                                 /* Turn off interrupt handling */
335         while (test_and_set_bit(0, &dev->interrupt_flag)) {
336                 schedule();
337                 if (signal_pending(current)) return -EINTR;
338         }
339         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
340                 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
341                                       DRM_KERNEL_CONTEXT)) {
342                         schedule();
343                         if (signal_pending(current)) {
344                                 clear_bit(0, &dev->interrupt_flag);
345                                 return -EINTR;
346                         }
347                 }
348                 ++must_free;
349         }
350
351         for (i = 0; i < d->send_count; i++) {
352                 idx = d->send_indices[i];
353                 if (idx < 0 || idx >= dma->buf_count) {
354                         DRM_ERROR("Index %d (of %d max)\n",
355                                   d->send_indices[i], dma->buf_count - 1);
356                         continue;
357                 }
358                 buf = dma->buflist[ idx ];
359                 if (buf->filp != filp) {
360                         DRM_ERROR("Process %d using buffer not owned\n",
361                                   current->pid);
362                         retcode = -EINVAL;
363                         goto cleanup;
364                 }
365                 if (buf->list != DRM_LIST_NONE) {
366                         DRM_ERROR("Process %d using buffer on list %d\n",
367                                   current->pid, buf->list);
368                         retcode = -EINVAL;
369                         goto cleanup;
370                 }
371                                 /* This isn't a race condition on
372                                    buf->list, since our concern is the
373                                    buffer reclaim during the time the
374                                    process closes the /dev/drm? handle, so
375                                    it can't also be doing DMA. */
376                 buf->list         = DRM_LIST_PRIO;
377                 buf->used         = d->send_sizes[i];
378                 buf->context      = d->context;
379                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
380                 address           = (unsigned long)buf->address;
381                 length            = buf->used;
382                 if (!length) {
383                         DRM_ERROR("0 length buffer\n");
384                 }
385                 if (buf->pending) {
386                         DRM_ERROR("Sending pending buffer:"
387                                   " buffer %d, offset %d\n",
388                                   d->send_indices[i], i);
389                         retcode = -EINVAL;
390                         goto cleanup;
391                 }
392                 if (buf->waiting) {
393                         DRM_ERROR("Sending waiting buffer:"
394                                   " buffer %d, offset %d\n",
395                                   d->send_indices[i], i);
396                         retcode = -EINVAL;
397                         goto cleanup;
398                 }
399                 buf->pending = 1;
400
401                 if (dev->last_context != buf->context
402                     && !(dev->queuelist[buf->context]->flags
403                          & _DRM_CONTEXT_PRESERVED)) {
404                         add_wait_queue(&dev->context_wait, &entry);
405                         current->state = TASK_INTERRUPTIBLE;
406                                 /* PRE: dev->last_context != buf->context */
407                         DRM(context_switch)(dev, dev->last_context,
408                                             buf->context);
409                                 /* POST: we will wait for the context
410                                    switch and will dispatch on a later call
411                                    when dev->last_context == buf->context.
412                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
413                                    TIME! */
414                         schedule();
415                         current->state = TASK_RUNNING;
416                         remove_wait_queue(&dev->context_wait, &entry);
417                         if (signal_pending(current)) {
418                                 retcode = -EINTR;
419                                 goto cleanup;
420                         }
421                         if (dev->last_context != buf->context) {
422                                 DRM_ERROR("Context mismatch: %d %d\n",
423                                           dev->last_context,
424                                           buf->context);
425                         }
426                 }
427
428                 gamma_dma_dispatch(dev, address, length);
429                 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
430                 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
431
432                 if (last_buf) {
433                         gamma_free_buffer(dev, last_buf);
434                 }
435                 last_buf = buf;
436         }
437
438
439 cleanup:
440         if (last_buf) {
441                 gamma_dma_ready(dev);
442                 gamma_free_buffer(dev, last_buf);
443         }
444
445         if (must_free && !dev->context_flag) {
446                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
447                                   DRM_KERNEL_CONTEXT)) {
448                         DRM_ERROR("\n");
449                 }
450         }
451         clear_bit(0, &dev->interrupt_flag);
452         return retcode;
453 }
454
455 static int gamma_dma_send_buffers(struct file *filp,
456                                   drm_device_t *dev, drm_dma_t *d)
457 {
458         DECLARE_WAITQUEUE(entry, current);
459         drm_buf_t         *last_buf = NULL;
460         int               retcode   = 0;
461         drm_device_dma_t  *dma      = dev->dma;
462
463         if (d->flags & _DRM_DMA_BLOCK) {
464                 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
465                 add_wait_queue(&last_buf->dma_wait, &entry);
466         }
467
468         if ((retcode = gamma_dma_enqueue(filp, d))) {
469                 if (d->flags & _DRM_DMA_BLOCK)
470                         remove_wait_queue(&last_buf->dma_wait, &entry);
471                 return retcode;
472         }
473
474         gamma_dma_schedule(dev, 0);
475
476         if (d->flags & _DRM_DMA_BLOCK) {
477                 DRM_DEBUG("%d waiting\n", current->pid);
478                 for (;;) {
479                         current->state = TASK_INTERRUPTIBLE;
480                         if (!last_buf->waiting && !last_buf->pending)
481                                 break; /* finished */
482                         schedule();
483                         if (signal_pending(current)) {
484                                 retcode = -EINTR; /* Can't restart */
485                                 break;
486                         }
487                 }
488                 current->state = TASK_RUNNING;
489                 DRM_DEBUG("%d running\n", current->pid);
490                 remove_wait_queue(&last_buf->dma_wait, &entry);
491                 if (!retcode
492                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
493                         if (!waitqueue_active(&last_buf->dma_wait)) {
494                                 gamma_free_buffer(dev, last_buf);
495                         }
496                 }
497                 if (retcode) {
498                         DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
499                                   d->context,
500                                   last_buf->waiting,
501                                   last_buf->pending,
502                                   (long)DRM_WAITCOUNT(dev, d->context),
503                                   last_buf->idx,
504                                   last_buf->list,
505                                   current->pid);
506                 }
507         }
508         return retcode;
509 }
510
511 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
512               unsigned long arg)
513 {
514         drm_file_t        *priv     = filp->private_data;
515         drm_device_t      *dev      = priv->dev;
516         drm_device_dma_t  *dma      = dev->dma;
517         int               retcode   = 0;
518         drm_dma_t         d;
519
520         if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
521                 return -EFAULT;
522
523         if (d.send_count < 0 || d.send_count > dma->buf_count) {
524                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
525                           current->pid, d.send_count, dma->buf_count);
526                 return -EINVAL;
527         }
528
529         if (d.request_count < 0 || d.request_count > dma->buf_count) {
530                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
531                           current->pid, d.request_count, dma->buf_count);
532                 return -EINVAL;
533         }
534
535         if (d.send_count) {
536                 if (d.flags & _DRM_DMA_PRIORITY)
537                         retcode = gamma_dma_priority(filp, dev, &d);
538                 else
539                         retcode = gamma_dma_send_buffers(filp, dev, &d);
540         }
541
542         d.granted_count = 0;
543
544         if (!retcode && d.request_count) {
545                 retcode = gamma_dma_get_buffers(filp, &d);
546         }
547
548         DRM_DEBUG("%d returning, granted = %d\n",
549                   current->pid, d.granted_count);
550         if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
551                 return -EFAULT;
552
553         return retcode;
554 }
555
556 /* =============================================================
557  * DMA initialization, cleanup
558  */
559
560 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
561 {
562         drm_gamma_private_t *dev_priv;
563         drm_device_dma_t    *dma = dev->dma;
564         drm_buf_t           *buf;
565         int i;
566         struct list_head    *list;
567         unsigned long       *pgt;
568
569         DRM_DEBUG( "%s\n", __FUNCTION__ );
570
571         dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
572                                                         DRM_MEM_DRIVER );
573         if ( !dev_priv )
574                 return -ENOMEM;
575
576         dev->dev_private = (void *)dev_priv;
577
578         memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
579
580         dev_priv->num_rast = init->num_rast;
581
582         list_for_each(list, &dev->maplist->head) {
583                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
584                 if( r_list->map &&
585                     r_list->map->type == _DRM_SHM &&
586                     r_list->map->flags & _DRM_CONTAINS_LOCK ) {
587                         dev_priv->sarea = r_list->map;
588                         break;
589                 }
590         }
591
592         DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
593         DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
594         DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
595         DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
596
597         dev_priv->sarea_priv = (drm_gamma_sarea_t *)
598                 ((u8 *)dev_priv->sarea->handle +
599                  init->sarea_priv_offset);
600
601         if (init->pcimode) {
602                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
603                 pgt = buf->address;
604
605                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
606                         buf = dma->buflist[i];
607                         *pgt = virt_to_phys((void*)buf->address) | 0x07;
608                         pgt++;
609                 }
610
611                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
612         } else {
613                 DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
614
615                 DRM_IOREMAP( dev_priv->buffers );
616
617                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
618                 pgt = buf->address;
619
620                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
621                         buf = dma->buflist[i];
622                         *pgt = (unsigned long)buf->address + 0x07;
623                         pgt++;
624                 }
625
626                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
627
628                 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
629                 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
630         }
631         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
632         GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
633         GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
634
635         return 0;
636 }
637
638 int gamma_do_cleanup_dma( drm_device_t *dev )
639 {
640         DRM_DEBUG( "%s\n", __FUNCTION__ );
641
642 #if _HAVE_DMA_IRQ
643         /* Make sure interrupts are disabled here because the uninstall ioctl
644          * may not have been called from userspace and after dev_private
645          * is freed, it's too late.
646          */
647         if ( dev->irq ) DRM(irq_uninstall)(dev);
648 #endif
649
650         if ( dev->dev_private ) {
651                 drm_gamma_private_t *dev_priv = dev->dev_private;
652
653                 if ( dev_priv->buffers != NULL )
654                         DRM_IOREMAPFREE( dev_priv->buffers );
655
656                 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
657                            DRM_MEM_DRIVER );
658                 dev->dev_private = NULL;
659         }
660
661         return 0;
662 }
663
664 int gamma_dma_init( struct inode *inode, struct file *filp,
665                   unsigned int cmd, unsigned long arg )
666 {
667         drm_file_t *priv = filp->private_data;
668         drm_device_t *dev = priv->dev;
669         drm_gamma_init_t init;
670
671         LOCK_TEST_WITH_RETURN( dev, filp );
672
673         if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
674                 return -EFAULT;
675
676         switch ( init.func ) {
677         case GAMMA_INIT_DMA:
678                 return gamma_do_init_dma( dev, &init );
679         case GAMMA_CLEANUP_DMA:
680                 return gamma_do_cleanup_dma( dev );
681         }
682
683         return -EINVAL;
684 }
685
686 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
687 {
688         drm_device_dma_t    *dma = dev->dma;
689         unsigned int        *screenbuf;
690
691         DRM_DEBUG( "%s\n", __FUNCTION__ );
692
693         /* We've DRM_RESTRICTED this DMA buffer */
694
695         screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
696
697 #if 0
698         *buffer++ = 0x180;      /* Tag (FilterMode) */
699         *buffer++ = 0x200;      /* Allow FBColor through */
700         *buffer++ = 0x53B;      /* Tag */
701         *buffer++ = copy->Pitch;
702         *buffer++ = 0x53A;      /* Tag */
703         *buffer++ = copy->SrcAddress;
704         *buffer++ = 0x539;      /* Tag */
705         *buffer++ = copy->WidthHeight; /* Initiates transfer */
706         *buffer++ = 0x53C;      /* Tag - DMAOutputAddress */
707         *buffer++ = virt_to_phys((void*)screenbuf);
708         *buffer++ = 0x53D;      /* Tag - DMAOutputCount */
709         *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
710
711         /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
712         /* Now put it back to the screen */
713
714         *buffer++ = 0x180;      /* Tag (FilterMode) */
715         *buffer++ = 0x400;      /* Allow Sync through */
716         *buffer++ = 0x538;      /* Tag - DMARectangleReadTarget */
717         *buffer++ = 0x155;      /* FBSourceData | count */
718         *buffer++ = 0x537;      /* Tag */
719         *buffer++ = copy->Pitch;
720         *buffer++ = 0x536;      /* Tag */
721         *buffer++ = copy->DstAddress;
722         *buffer++ = 0x535;      /* Tag */
723         *buffer++ = copy->WidthHeight; /* Initiates transfer */
724         *buffer++ = 0x530;      /* Tag - DMAAddr */
725         *buffer++ = virt_to_phys((void*)screenbuf);
726         *buffer++ = 0x531;
727         *buffer++ = copy->Count; /* initiates DMA transfer of color data */
728 #endif
729
730         /* need to dispatch it now */
731
732         return 0;
733 }
734
735 int gamma_dma_copy( struct inode *inode, struct file *filp,
736                   unsigned int cmd, unsigned long arg )
737 {
738         drm_file_t *priv = filp->private_data;
739         drm_device_t *dev = priv->dev;
740         drm_gamma_copy_t copy;
741
742         if ( copy_from_user( &copy, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
743                 return -EFAULT;
744
745         return gamma_do_copy_dma( dev, &copy );
746 }
747
748 /* =============================================================
749  * Per Context SAREA Support
750  */
751
752 int gamma_getsareactx(struct inode *inode, struct file *filp,
753                      unsigned int cmd, unsigned long arg)
754 {
755         drm_file_t      *priv   = filp->private_data;
756         drm_device_t    *dev    = priv->dev;
757         drm_ctx_priv_map_t request;
758         drm_map_t *map;
759
760         if (copy_from_user(&request,
761                            (drm_ctx_priv_map_t *)arg,
762                            sizeof(request)))
763                 return -EFAULT;
764
765         down(&dev->struct_sem);
766         if ((int)request.ctx_id >= dev->max_context) {
767                 up(&dev->struct_sem);
768                 return -EINVAL;
769         }
770
771         map = dev->context_sareas[request.ctx_id];
772         up(&dev->struct_sem);
773
774         request.handle = map->handle;
775         if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
776                 return -EFAULT;
777         return 0;
778 }
779
780 int gamma_setsareactx(struct inode *inode, struct file *filp,
781                      unsigned int cmd, unsigned long arg)
782 {
783         drm_file_t      *priv   = filp->private_data;
784         drm_device_t    *dev    = priv->dev;
785         drm_ctx_priv_map_t request;
786         drm_map_t *map = NULL;
787         drm_map_list_t *r_list;
788         struct list_head *list;
789
790         if (copy_from_user(&request,
791                            (drm_ctx_priv_map_t *)arg,
792                            sizeof(request)))
793                 return -EFAULT;
794
795         down(&dev->struct_sem);
796         r_list = NULL;
797         list_for_each(list, &dev->maplist->head) {
798                 r_list = list_entry(list, drm_map_list_t, head);
799                 if(r_list->map &&
800                    r_list->map->handle == request.handle) break;
801         }
802         if (list == &(dev->maplist->head)) {
803                 up(&dev->struct_sem);
804                 return -EINVAL;
805         }
806         map = r_list->map;
807         up(&dev->struct_sem);
808
809         if (!map) return -EINVAL;
810
811         down(&dev->struct_sem);
812         if ((int)request.ctx_id >= dev->max_context) {
813                 up(&dev->struct_sem);
814                 return -EINVAL;
815         }
816         dev->context_sareas[request.ctx_id] = map;
817         up(&dev->struct_sem);
818         return 0;
819 }
820
821 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
822         drm_gamma_private_t *dev_priv =
823                                 (drm_gamma_private_t *)dev->dev_private;
824
825         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
826
827         GAMMA_WRITE( GAMMA_GCOMMANDMODE,        0x00000004 );
828         GAMMA_WRITE( GAMMA_GDMACONTROL,         0x00000000 );
829 }
830
831 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
832         drm_gamma_private_t *dev_priv =
833                                 (drm_gamma_private_t *)dev->dev_private;
834
835         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
836
837         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00002001 );
838         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000008 );
839         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00039090 );
840 }
841
842 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
843         drm_gamma_private_t *dev_priv =
844                                 (drm_gamma_private_t *)dev->dev_private;
845         if (!dev_priv)
846                 return;
847
848         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
849
850         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00000000 );
851         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000000 );
852         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00000000 );
853 }