5ee9333729381da82554653b950b54c8a62d3774
[linux-flexiantxendom0-3.2.10.git] / drivers / char / drm / drm_bufs.h
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31
32 #include <linux/vmalloc.h>
33 #include "drmP.h"
34
35 #ifndef __HAVE_PCI_DMA
36 #define __HAVE_PCI_DMA          0
37 #endif
38
39 #ifndef __HAVE_SG
40 #define __HAVE_SG               0
41 #endif
42
43 #ifndef DRIVER_BUF_PRIV_T
44 #define DRIVER_BUF_PRIV_T               u32
45 #endif
46 #ifndef DRIVER_AGP_BUFFERS_MAP
47 #if __HAVE_AGP && __HAVE_DMA
48 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
49 #else
50 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
51 #endif
52 #endif
53
54 /*
55  * Compute order.  Can be made faster.
56  */
57 int DRM(order)( unsigned long size )
58 {
59         int order;
60         unsigned long tmp;
61
62         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
63
64         if ( size & ~(1 << order) )
65                 ++order;
66
67         return order;
68 }
69
70 int DRM(addmap)( struct inode *inode, struct file *filp,
71                  unsigned int cmd, unsigned long arg )
72 {
73         drm_file_t *priv = filp->private_data;
74         drm_device_t *dev = priv->dev;
75         drm_map_t *map;
76         drm_map_list_t *list;
77
78         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
79
80         map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
81         if ( !map )
82                 return -ENOMEM;
83
84         if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
85                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
86                 return -EFAULT;
87         }
88
89         /* Only allow shared memory to be removable since we only keep enough
90          * book keeping information about shared memory to allow for removal
91          * when processes fork.
92          */
93         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
94                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
95                 return -EINVAL;
96         }
97         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
98                    map->offset, map->size, map->type );
99         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
100                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
101                 return -EINVAL;
102         }
103         map->mtrr   = -1;
104         map->handle = 0;
105
106         switch ( map->type ) {
107         case _DRM_REGISTERS:
108         case _DRM_FRAME_BUFFER:
109 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
110                 if ( map->offset + map->size < map->offset ||
111                      map->offset < virt_to_phys(high_memory) ) {
112                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113                         return -EINVAL;
114                 }
115 #endif
116 #ifdef __alpha__
117                 map->offset += dev->hose->mem_space->start;
118 #endif
119 #if __REALLY_HAVE_MTRR
120                 if ( map->type == _DRM_FRAME_BUFFER ||
121                      (map->flags & _DRM_WRITE_COMBINING) ) {
122                         map->mtrr = mtrr_add( map->offset, map->size,
123                                               MTRR_TYPE_WRCOMB, 1 );
124                 }
125 #endif
126                 map->handle = DRM(ioremap)( map->offset, map->size );
127                 break;
128
129         case _DRM_SHM:
130                 map->handle = vmalloc_32(map->size);
131                 DRM_DEBUG( "%lu %d %p\n",
132                            map->size, DRM(order)( map->size ), map->handle );
133                 if ( !map->handle ) {
134                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
135                         return -ENOMEM;
136                 }
137                 map->offset = (unsigned long)map->handle;
138                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
139                         dev->sigdata.lock =
140                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
141                 }
142                 break;
143 #if __REALLY_HAVE_AGP
144         case _DRM_AGP:
145 #ifdef __alpha__
146                 map->offset += dev->hose->mem_space->start;
147 #endif
148                 map->offset += dev->agp->base;
149                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
150                 break;
151 #endif
152         case _DRM_SCATTER_GATHER:
153                 if (!dev->sg) {
154                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
155                         return -EINVAL;
156                 }
157                 map->offset += dev->sg->handle;
158                 break;
159
160         default:
161                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
162                 return -EINVAL;
163         }
164
165         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
166         if(!list) {
167                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
168                 return -EINVAL;
169         }
170         memset(list, 0, sizeof(*list));
171         list->map = map;
172
173         down(&dev->struct_sem);
174         list_add(&list->head, &dev->maplist->head);
175         up(&dev->struct_sem);
176
177         if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
178                 return -EFAULT;
179         if ( map->type != _DRM_SHM ) {
180                 if ( copy_to_user( &((drm_map_t *)arg)->handle,
181                                    &map->offset,
182                                    sizeof(map->offset) ) )
183                         return -EFAULT;
184         }
185         return 0;
186 }
187
188
189 /* Remove a map private from list and deallocate resources if the mapping
190  * isn't in use.
191  */
192
193 int DRM(rmmap)(struct inode *inode, struct file *filp,
194                unsigned int cmd, unsigned long arg)
195 {
196         drm_file_t      *priv   = filp->private_data;
197         drm_device_t    *dev    = priv->dev;
198         struct list_head *list;
199         drm_map_list_t *r_list = NULL;
200         drm_vma_entry_t *pt, *prev;
201         drm_map_t *map;
202         drm_map_t request;
203         int found_maps = 0;
204
205         if (copy_from_user(&request, (drm_map_t *)arg,
206                            sizeof(request))) {
207                 return -EFAULT;
208         }
209
210         down(&dev->struct_sem);
211         list = &dev->maplist->head;
212         list_for_each(list, &dev->maplist->head) {
213                 r_list = list_entry(list, drm_map_list_t, head);
214
215                 if(r_list->map &&
216                    r_list->map->handle == request.handle &&
217                    r_list->map->flags & _DRM_REMOVABLE) break;
218         }
219
220         /* List has wrapped around to the head pointer, or its empty we didn't
221          * find anything.
222          */
223         if(list == (&dev->maplist->head)) {
224                 up(&dev->struct_sem);
225                 return -EINVAL;
226         }
227         map = r_list->map;
228         list_del(list);
229         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
230
231         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
232                 if (pt->vma->vm_private_data == map) found_maps++;
233         }
234
235         if(!found_maps) {
236                 switch (map->type) {
237                 case _DRM_REGISTERS:
238                 case _DRM_FRAME_BUFFER:
239 #if __REALLY_HAVE_MTRR
240                         if (map->mtrr >= 0) {
241                                 int retcode;
242                                 retcode = mtrr_del(map->mtrr,
243                                                    map->offset,
244                                                    map->size);
245                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
246                         }
247 #endif
248                         DRM(ioremapfree)(map->handle, map->size);
249                         break;
250                 case _DRM_SHM:
251                         vfree(map->handle);
252                         break;
253                 case _DRM_AGP:
254                 case _DRM_SCATTER_GATHER:
255                         break;
256                 }
257                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
258         }
259         up(&dev->struct_sem);
260         return 0;
261 }
262
263 #if __HAVE_DMA
264
265
266 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
267 {
268         int i;
269
270         if (entry->seg_count) {
271                 for (i = 0; i < entry->seg_count; i++) {
272                         if (entry->seglist[i]) {
273                                 DRM(free_pages)(entry->seglist[i],
274                                                 entry->page_order,
275                                                 DRM_MEM_DMA);
276                         }
277                 }
278                 DRM(free)(entry->seglist,
279                           entry->seg_count *
280                           sizeof(*entry->seglist),
281                           DRM_MEM_SEGS);
282
283                 entry->seg_count = 0;
284         }
285
286         if (entry->buf_count) {
287                 for (i = 0; i < entry->buf_count; i++) {
288                         if (entry->buflist[i].dev_private) {
289                                 DRM(free)(entry->buflist[i].dev_private,
290                                           entry->buflist[i].dev_priv_size,
291                                           DRM_MEM_BUFS);
292                         }
293                 }
294                 DRM(free)(entry->buflist,
295                           entry->buf_count *
296                           sizeof(*entry->buflist),
297                           DRM_MEM_BUFS);
298
299 #if __HAVE_DMA_FREELIST
300                 DRM(freelist_destroy)(&entry->freelist);
301 #endif
302
303                 entry->buf_count = 0;
304         }
305 }
306
307 #if __REALLY_HAVE_AGP
308 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
309                       unsigned int cmd, unsigned long arg )
310 {
311         drm_file_t *priv = filp->private_data;
312         drm_device_t *dev = priv->dev;
313         drm_device_dma_t *dma = dev->dma;
314         drm_buf_desc_t request;
315         drm_buf_entry_t *entry;
316         drm_buf_t *buf;
317         unsigned long offset;
318         unsigned long agp_offset;
319         int count;
320         int order;
321         int size;
322         int alignment;
323         int page_order;
324         int total;
325         int byte_count;
326         int i;
327         drm_buf_t **temp_buflist;
328
329         if ( !dma ) return -EINVAL;
330
331         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
332                              sizeof(request) ) )
333                 return -EFAULT;
334
335         count = request.count;
336         order = DRM(order)( request.size );
337         size = 1 << order;
338
339         alignment  = (request.flags & _DRM_PAGE_ALIGN)
340                 ? PAGE_ALIGN(size) : size;
341         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
342         total = PAGE_SIZE << page_order;
343
344         byte_count = 0;
345         agp_offset = dev->agp->base + request.agp_start;
346
347         DRM_DEBUG( "count:      %d\n",  count );
348         DRM_DEBUG( "order:      %d\n",  order );
349         DRM_DEBUG( "size:       %d\n",  size );
350         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
351         DRM_DEBUG( "alignment:  %d\n",  alignment );
352         DRM_DEBUG( "page_order: %d\n",  page_order );
353         DRM_DEBUG( "total:      %d\n",  total );
354
355         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
356         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
357
358         spin_lock( &dev->count_lock );
359         if ( dev->buf_use ) {
360                 spin_unlock( &dev->count_lock );
361                 return -EBUSY;
362         }
363         atomic_inc( &dev->buf_alloc );
364         spin_unlock( &dev->count_lock );
365
366         down( &dev->struct_sem );
367         entry = &dma->bufs[order];
368         if ( entry->buf_count ) {
369                 up( &dev->struct_sem );
370                 atomic_dec( &dev->buf_alloc );
371                 return -ENOMEM; /* May only call once for each order */
372         }
373
374         if (count < 0 || count > 4096) {
375                 up( &dev->struct_sem );
376                 atomic_dec( &dev->buf_alloc );
377                 return -EINVAL;
378         }
379
380         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
381                                     DRM_MEM_BUFS );
382         if ( !entry->buflist ) {
383                 up( &dev->struct_sem );
384                 atomic_dec( &dev->buf_alloc );
385                 return -ENOMEM;
386         }
387         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
388
389         entry->buf_size = size;
390         entry->page_order = page_order;
391
392         offset = 0;
393
394         while ( entry->buf_count < count ) {
395                 buf          = &entry->buflist[entry->buf_count];
396                 buf->idx     = dma->buf_count + entry->buf_count;
397                 buf->total   = alignment;
398                 buf->order   = order;
399                 buf->used    = 0;
400
401                 buf->offset  = (dma->byte_count + offset);
402                 buf->bus_address = agp_offset + offset;
403                 buf->address = (void *)(agp_offset + offset);
404                 buf->next    = NULL;
405                 buf->waiting = 0;
406                 buf->pending = 0;
407                 init_waitqueue_head( &buf->dma_wait );
408                 buf->filp    = 0;
409
410                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
411                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
412                                                DRM_MEM_BUFS );
413                 if(!buf->dev_private) {
414                         /* Set count correctly so we free the proper amount. */
415                         entry->buf_count = count;
416                         DRM(cleanup_buf_error)(entry);
417                         up( &dev->struct_sem );
418                         atomic_dec( &dev->buf_alloc );
419                         return -ENOMEM;
420                 }
421                 memset( buf->dev_private, 0, buf->dev_priv_size );
422
423                 DRM_DEBUG( "buffer %d @ %p\n",
424                            entry->buf_count, buf->address );
425
426                 offset += alignment;
427                 entry->buf_count++;
428                 byte_count += PAGE_SIZE << page_order;
429         }
430
431         DRM_DEBUG( "byte_count: %d\n", byte_count );
432
433         temp_buflist = DRM(realloc)( dma->buflist,
434                                      dma->buf_count * sizeof(*dma->buflist),
435                                      (dma->buf_count + entry->buf_count)
436                                      * sizeof(*dma->buflist),
437                                      DRM_MEM_BUFS );
438         if(!temp_buflist) {
439                 /* Free the entry because it isn't valid */
440                 DRM(cleanup_buf_error)(entry);
441                 up( &dev->struct_sem );
442                 atomic_dec( &dev->buf_alloc );
443                 return -ENOMEM;
444         }
445         dma->buflist = temp_buflist;
446
447         for ( i = 0 ; i < entry->buf_count ; i++ ) {
448                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
449         }
450
451         dma->buf_count += entry->buf_count;
452         dma->byte_count += byte_count;
453
454         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
455         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
456
457 #if __HAVE_DMA_FREELIST
458         DRM(freelist_create)( &entry->freelist, entry->buf_count );
459         for ( i = 0 ; i < entry->buf_count ; i++ ) {
460                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
461         }
462 #endif
463         up( &dev->struct_sem );
464
465         request.count = entry->buf_count;
466         request.size = size;
467
468         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
469                 return -EFAULT;
470
471         dma->flags = _DRM_DMA_USE_AGP;
472
473         atomic_dec( &dev->buf_alloc );
474         return 0;
475 }
476 #endif /* __REALLY_HAVE_AGP */
477
478 #if __HAVE_PCI_DMA
479 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
480                       unsigned int cmd, unsigned long arg )
481 {
482         drm_file_t *priv = filp->private_data;
483         drm_device_t *dev = priv->dev;
484         drm_device_dma_t *dma = dev->dma;
485         drm_buf_desc_t request;
486         int count;
487         int order;
488         int size;
489         int total;
490         int page_order;
491         drm_buf_entry_t *entry;
492         unsigned long page;
493         drm_buf_t *buf;
494         int alignment;
495         unsigned long offset;
496         int i;
497         int byte_count;
498         int page_count;
499         unsigned long *temp_pagelist;
500         drm_buf_t **temp_buflist;
501
502         if ( !dma ) return -EINVAL;
503
504         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
505                              sizeof(request) ) )
506                 return -EFAULT;
507
508         count = request.count;
509         order = DRM(order)( request.size );
510         size = 1 << order;
511
512         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
513                    request.count, request.size, size,
514                    order, dev->queue_count );
515
516         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
517         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
518
519         alignment = (request.flags & _DRM_PAGE_ALIGN)
520                 ? PAGE_ALIGN(size) : size;
521         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
522         total = PAGE_SIZE << page_order;
523
524         spin_lock( &dev->count_lock );
525         if ( dev->buf_use ) {
526                 spin_unlock( &dev->count_lock );
527                 return -EBUSY;
528         }
529         atomic_inc( &dev->buf_alloc );
530         spin_unlock( &dev->count_lock );
531
532         down( &dev->struct_sem );
533         entry = &dma->bufs[order];
534         if ( entry->buf_count ) {
535                 up( &dev->struct_sem );
536                 atomic_dec( &dev->buf_alloc );
537                 return -ENOMEM; /* May only call once for each order */
538         }
539
540         if (count < 0 || count > 4096) {
541                 up( &dev->struct_sem );
542                 atomic_dec( &dev->buf_alloc );
543                 return -EINVAL;
544         }
545
546         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
547                                     DRM_MEM_BUFS );
548         if ( !entry->buflist ) {
549                 up( &dev->struct_sem );
550                 atomic_dec( &dev->buf_alloc );
551                 return -ENOMEM;
552         }
553         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
554
555         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
556                                     DRM_MEM_SEGS );
557         if ( !entry->seglist ) {
558                 DRM(free)( entry->buflist,
559                           count * sizeof(*entry->buflist),
560                           DRM_MEM_BUFS );
561                 up( &dev->struct_sem );
562                 atomic_dec( &dev->buf_alloc );
563                 return -ENOMEM;
564         }
565         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
566
567         /* Keep the original pagelist until we know all the allocations
568          * have succeeded
569          */
570         temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
571                                     * sizeof(*dma->pagelist),
572                                     DRM_MEM_PAGES );
573         if (!temp_pagelist) {
574                 DRM(free)( entry->buflist,
575                            count * sizeof(*entry->buflist),
576                            DRM_MEM_BUFS );
577                 DRM(free)( entry->seglist,
578                            count * sizeof(*entry->seglist),
579                            DRM_MEM_SEGS );
580                 up( &dev->struct_sem );
581                 atomic_dec( &dev->buf_alloc );
582                 return -ENOMEM;
583         }
584         memcpy(temp_pagelist,
585                dma->pagelist,
586                dma->page_count * sizeof(*dma->pagelist));
587         DRM_DEBUG( "pagelist: %d entries\n",
588                    dma->page_count + (count << page_order) );
589
590         entry->buf_size = size;
591         entry->page_order = page_order;
592         byte_count = 0;
593         page_count = 0;
594
595         while ( entry->buf_count < count ) {
596                 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
597                 if ( !page ) {
598                         /* Set count correctly so we free the proper amount. */
599                         entry->buf_count = count;
600                         entry->seg_count = count;
601                         DRM(cleanup_buf_error)(entry);
602                         DRM(free)( temp_pagelist,
603                                    (dma->page_count + (count << page_order))
604                                    * sizeof(*dma->pagelist),
605                                    DRM_MEM_PAGES );
606                         up( &dev->struct_sem );
607                         atomic_dec( &dev->buf_alloc );
608                         return -ENOMEM;
609                 }
610                 entry->seglist[entry->seg_count++] = page;
611                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
612                         DRM_DEBUG( "page %d @ 0x%08lx\n",
613                                    dma->page_count + page_count,
614                                    page + PAGE_SIZE * i );
615                         temp_pagelist[dma->page_count + page_count++]
616                                 = page + PAGE_SIZE * i;
617                 }
618                 for ( offset = 0 ;
619                       offset + size <= total && entry->buf_count < count ;
620                       offset += alignment, ++entry->buf_count ) {
621                         buf          = &entry->buflist[entry->buf_count];
622                         buf->idx     = dma->buf_count + entry->buf_count;
623                         buf->total   = alignment;
624                         buf->order   = order;
625                         buf->used    = 0;
626                         buf->offset  = (dma->byte_count + byte_count + offset);
627                         buf->address = (void *)(page + offset);
628                         buf->next    = NULL;
629                         buf->waiting = 0;
630                         buf->pending = 0;
631                         init_waitqueue_head( &buf->dma_wait );
632                         buf->filp    = 0;
633
634                         buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
635                         buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
636                                                        DRM_MEM_BUFS );
637                         if(!buf->dev_private) {
638                                 /* Set count correctly so we free the proper amount. */
639                                 entry->buf_count = count;
640                                 entry->seg_count = count;
641                                 DRM(cleanup_buf_error)(entry);
642                                 DRM(free)( temp_pagelist,
643                                            (dma->page_count + (count << page_order))
644                                            * sizeof(*dma->pagelist),
645                                            DRM_MEM_PAGES );
646                                 up( &dev->struct_sem );
647                                 atomic_dec( &dev->buf_alloc );
648                                 return -ENOMEM;
649                         }
650                         memset( buf->dev_private, 0, buf->dev_priv_size );
651
652                         DRM_DEBUG( "buffer %d @ %p\n",
653                                    entry->buf_count, buf->address );
654                 }
655                 byte_count += PAGE_SIZE << page_order;
656         }
657
658         temp_buflist = DRM(realloc)( dma->buflist,
659                                      dma->buf_count * sizeof(*dma->buflist),
660                                      (dma->buf_count + entry->buf_count)
661                                      * sizeof(*dma->buflist),
662                                      DRM_MEM_BUFS );
663         if (!temp_buflist) {
664                 /* Free the entry because it isn't valid */
665                 DRM(cleanup_buf_error)(entry);
666                 DRM(free)( temp_pagelist,
667                            (dma->page_count + (count << page_order))
668                            * sizeof(*dma->pagelist),
669                            DRM_MEM_PAGES );
670                 up( &dev->struct_sem );
671                 atomic_dec( &dev->buf_alloc );
672                 return -ENOMEM;
673         }
674         dma->buflist = temp_buflist;
675
676         for ( i = 0 ; i < entry->buf_count ; i++ ) {
677                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
678         }
679
680         /* No allocations failed, so now we can replace the orginal pagelist
681          * with the new one.
682          */
683         if (dma->page_count) {
684                 DRM(free)(dma->pagelist,
685                           dma->page_count * sizeof(*dma->pagelist),
686                           DRM_MEM_PAGES);
687         }
688         dma->pagelist = temp_pagelist;
689
690         dma->buf_count += entry->buf_count;
691         dma->seg_count += entry->seg_count;
692         dma->page_count += entry->seg_count << page_order;
693         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
694
695 #if __HAVE_DMA_FREELIST
696         DRM(freelist_create)( &entry->freelist, entry->buf_count );
697         for ( i = 0 ; i < entry->buf_count ; i++ ) {
698                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
699         }
700 #endif
701         up( &dev->struct_sem );
702
703         request.count = entry->buf_count;
704         request.size = size;
705
706         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
707                 return -EFAULT;
708
709         atomic_dec( &dev->buf_alloc );
710         return 0;
711
712 }
713 #endif /* __HAVE_PCI_DMA */
714
715 #ifdef __HAVE_SG
716 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
717                      unsigned int cmd, unsigned long arg )
718 {
719         drm_file_t *priv = filp->private_data;
720         drm_device_t *dev = priv->dev;
721         drm_device_dma_t *dma = dev->dma;
722         drm_buf_desc_t request;
723         drm_buf_entry_t *entry;
724         drm_buf_t *buf;
725         unsigned long offset;
726         unsigned long agp_offset;
727         int count;
728         int order;
729         int size;
730         int alignment;
731         int page_order;
732         int total;
733         int byte_count;
734         int i;
735         drm_buf_t **temp_buflist;
736
737         if ( !dma ) return -EINVAL;
738
739         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
740                              sizeof(request) ) )
741                 return -EFAULT;
742
743         count = request.count;
744         order = DRM(order)( request.size );
745         size = 1 << order;
746
747         alignment  = (request.flags & _DRM_PAGE_ALIGN)
748                         ? PAGE_ALIGN(size) : size;
749         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
750         total = PAGE_SIZE << page_order;
751
752         byte_count = 0;
753         agp_offset = request.agp_start;
754
755         DRM_DEBUG( "count:      %d\n",  count );
756         DRM_DEBUG( "order:      %d\n",  order );
757         DRM_DEBUG( "size:       %d\n",  size );
758         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
759         DRM_DEBUG( "alignment:  %d\n",  alignment );
760         DRM_DEBUG( "page_order: %d\n",  page_order );
761         DRM_DEBUG( "total:      %d\n",  total );
762
763         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
764         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
765
766         spin_lock( &dev->count_lock );
767         if ( dev->buf_use ) {
768                 spin_unlock( &dev->count_lock );
769                 return -EBUSY;
770         }
771         atomic_inc( &dev->buf_alloc );
772         spin_unlock( &dev->count_lock );
773
774         down( &dev->struct_sem );
775         entry = &dma->bufs[order];
776         if ( entry->buf_count ) {
777                 up( &dev->struct_sem );
778                 atomic_dec( &dev->buf_alloc );
779                 return -ENOMEM; /* May only call once for each order */
780         }
781
782         if (count < 0 || count > 4096) {
783                 up( &dev->struct_sem );
784                 atomic_dec( &dev->buf_alloc );
785                 return -EINVAL;
786         }
787
788         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
789                                      DRM_MEM_BUFS );
790         if ( !entry->buflist ) {
791                 up( &dev->struct_sem );
792                 atomic_dec( &dev->buf_alloc );
793                 return -ENOMEM;
794         }
795         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
796
797         entry->buf_size = size;
798         entry->page_order = page_order;
799
800         offset = 0;
801
802         while ( entry->buf_count < count ) {
803                 buf          = &entry->buflist[entry->buf_count];
804                 buf->idx     = dma->buf_count + entry->buf_count;
805                 buf->total   = alignment;
806                 buf->order   = order;
807                 buf->used    = 0;
808
809                 buf->offset  = (dma->byte_count + offset);
810                 buf->bus_address = agp_offset + offset;
811                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
812                 buf->next    = NULL;
813                 buf->waiting = 0;
814                 buf->pending = 0;
815                 init_waitqueue_head( &buf->dma_wait );
816                 buf->filp    = 0;
817
818                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
819                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
820                                                DRM_MEM_BUFS );
821                 if(!buf->dev_private) {
822                         /* Set count correctly so we free the proper amount. */
823                         entry->buf_count = count;
824                         DRM(cleanup_buf_error)(entry);
825                         up( &dev->struct_sem );
826                         atomic_dec( &dev->buf_alloc );
827                         return -ENOMEM;
828                 }
829
830                 memset( buf->dev_private, 0, buf->dev_priv_size );
831
832                 DRM_DEBUG( "buffer %d @ %p\n",
833                            entry->buf_count, buf->address );
834
835                 offset += alignment;
836                 entry->buf_count++;
837                 byte_count += PAGE_SIZE << page_order;
838         }
839
840         DRM_DEBUG( "byte_count: %d\n", byte_count );
841
842         temp_buflist = DRM(realloc)( dma->buflist,
843                                      dma->buf_count * sizeof(*dma->buflist),
844                                      (dma->buf_count + entry->buf_count)
845                                      * sizeof(*dma->buflist),
846                                      DRM_MEM_BUFS );
847         if(!temp_buflist) {
848                 /* Free the entry because it isn't valid */
849                 DRM(cleanup_buf_error)(entry);
850                 up( &dev->struct_sem );
851                 atomic_dec( &dev->buf_alloc );
852                 return -ENOMEM;
853         }
854         dma->buflist = temp_buflist;
855
856         for ( i = 0 ; i < entry->buf_count ; i++ ) {
857                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
858         }
859
860         dma->buf_count += entry->buf_count;
861         dma->byte_count += byte_count;
862
863         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
864         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
865
866 #if __HAVE_DMA_FREELIST
867         DRM(freelist_create)( &entry->freelist, entry->buf_count );
868         for ( i = 0 ; i < entry->buf_count ; i++ ) {
869                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
870         }
871 #endif
872         up( &dev->struct_sem );
873
874         request.count = entry->buf_count;
875         request.size = size;
876
877         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
878                 return -EFAULT;
879
880         dma->flags = _DRM_DMA_USE_SG;
881
882         atomic_dec( &dev->buf_alloc );
883         return 0;
884 }
885 #endif /* __HAVE_SG */
886
887 int DRM(addbufs)( struct inode *inode, struct file *filp,
888                   unsigned int cmd, unsigned long arg )
889 {
890         drm_buf_desc_t request;
891
892         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
893                              sizeof(request) ) )
894                 return -EFAULT;
895
896 #if __REALLY_HAVE_AGP
897         if ( request.flags & _DRM_AGP_BUFFER )
898                 return DRM(addbufs_agp)( inode, filp, cmd, arg );
899         else
900 #endif
901 #if __HAVE_SG
902         if ( request.flags & _DRM_SG_BUFFER )
903                 return DRM(addbufs_sg)( inode, filp, cmd, arg );
904         else
905 #endif
906 #if __HAVE_PCI_DMA
907                 return DRM(addbufs_pci)( inode, filp, cmd, arg );
908 #else
909                 return -EINVAL;
910 #endif
911 }
912
913 int DRM(infobufs)( struct inode *inode, struct file *filp,
914                    unsigned int cmd, unsigned long arg )
915 {
916         drm_file_t *priv = filp->private_data;
917         drm_device_t *dev = priv->dev;
918         drm_device_dma_t *dma = dev->dma;
919         drm_buf_info_t request;
920         int i;
921         int count;
922
923         if ( !dma ) return -EINVAL;
924
925         spin_lock( &dev->count_lock );
926         if ( atomic_read( &dev->buf_alloc ) ) {
927                 spin_unlock( &dev->count_lock );
928                 return -EBUSY;
929         }
930         ++dev->buf_use;         /* Can't allocate more after this call */
931         spin_unlock( &dev->count_lock );
932
933         if ( copy_from_user( &request,
934                              (drm_buf_info_t *)arg,
935                              sizeof(request) ) )
936                 return -EFAULT;
937
938         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
939                 if ( dma->bufs[i].buf_count ) ++count;
940         }
941
942         DRM_DEBUG( "count = %d\n", count );
943
944         if ( request.count >= count ) {
945                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
946                         if ( dma->bufs[i].buf_count ) {
947                                 drm_buf_desc_t *to = &request.list[count];
948                                 drm_buf_entry_t *from = &dma->bufs[i];
949                                 drm_freelist_t *list = &dma->bufs[i].freelist;
950                                 if ( copy_to_user( &to->count,
951                                                    &from->buf_count,
952                                                    sizeof(from->buf_count) ) ||
953                                      copy_to_user( &to->size,
954                                                    &from->buf_size,
955                                                    sizeof(from->buf_size) ) ||
956                                      copy_to_user( &to->low_mark,
957                                                    &list->low_mark,
958                                                    sizeof(list->low_mark) ) ||
959                                      copy_to_user( &to->high_mark,
960                                                    &list->high_mark,
961                                                    sizeof(list->high_mark) ) )
962                                         return -EFAULT;
963
964                                 DRM_DEBUG( "%d %d %d %d %d\n",
965                                            i,
966                                            dma->bufs[i].buf_count,
967                                            dma->bufs[i].buf_size,
968                                            dma->bufs[i].freelist.low_mark,
969                                            dma->bufs[i].freelist.high_mark );
970                                 ++count;
971                         }
972                 }
973         }
974         request.count = count;
975
976         if ( copy_to_user( (drm_buf_info_t *)arg,
977                            &request,
978                            sizeof(request) ) )
979                 return -EFAULT;
980
981         return 0;
982 }
983
984 int DRM(markbufs)( struct inode *inode, struct file *filp,
985                    unsigned int cmd, unsigned long arg )
986 {
987         drm_file_t *priv = filp->private_data;
988         drm_device_t *dev = priv->dev;
989         drm_device_dma_t *dma = dev->dma;
990         drm_buf_desc_t request;
991         int order;
992         drm_buf_entry_t *entry;
993
994         if ( !dma ) return -EINVAL;
995
996         if ( copy_from_user( &request,
997                              (drm_buf_desc_t *)arg,
998                              sizeof(request) ) )
999                 return -EFAULT;
1000
1001         DRM_DEBUG( "%d, %d, %d\n",
1002                    request.size, request.low_mark, request.high_mark );
1003         order = DRM(order)( request.size );
1004         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1005         entry = &dma->bufs[order];
1006
1007         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1008                 return -EINVAL;
1009         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1010                 return -EINVAL;
1011
1012         entry->freelist.low_mark  = request.low_mark;
1013         entry->freelist.high_mark = request.high_mark;
1014
1015         return 0;
1016 }
1017
1018 int DRM(freebufs)( struct inode *inode, struct file *filp,
1019                    unsigned int cmd, unsigned long arg )
1020 {
1021         drm_file_t *priv = filp->private_data;
1022         drm_device_t *dev = priv->dev;
1023         drm_device_dma_t *dma = dev->dma;
1024         drm_buf_free_t request;
1025         int i;
1026         int idx;
1027         drm_buf_t *buf;
1028
1029         if ( !dma ) return -EINVAL;
1030
1031         if ( copy_from_user( &request,
1032                              (drm_buf_free_t *)arg,
1033                              sizeof(request) ) )
1034                 return -EFAULT;
1035
1036         DRM_DEBUG( "%d\n", request.count );
1037         for ( i = 0 ; i < request.count ; i++ ) {
1038                 if ( copy_from_user( &idx,
1039                                      &request.list[i],
1040                                      sizeof(idx) ) )
1041                         return -EFAULT;
1042                 if ( idx < 0 || idx >= dma->buf_count ) {
1043                         DRM_ERROR( "Index %d (of %d max)\n",
1044                                    idx, dma->buf_count - 1 );
1045                         return -EINVAL;
1046                 }
1047                 buf = dma->buflist[idx];
1048                 if ( buf->filp != filp ) {
1049                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1050                                    current->pid );
1051                         return -EINVAL;
1052                 }
1053                 DRM(free_buffer)( dev, buf );
1054         }
1055
1056         return 0;
1057 }
1058
1059 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1060                   unsigned int cmd, unsigned long arg )
1061 {
1062         drm_file_t *priv = filp->private_data;
1063         drm_device_t *dev = priv->dev;
1064         drm_device_dma_t *dma = dev->dma;
1065         int retcode = 0;
1066         const int zero = 0;
1067         unsigned long virtual;
1068         unsigned long address;
1069         drm_buf_map_t request;
1070         int i;
1071
1072         if ( !dma ) return -EINVAL;
1073
1074         spin_lock( &dev->count_lock );
1075         if ( atomic_read( &dev->buf_alloc ) ) {
1076                 spin_unlock( &dev->count_lock );
1077                 return -EBUSY;
1078         }
1079         dev->buf_use++;         /* Can't allocate more after this call */
1080         spin_unlock( &dev->count_lock );
1081
1082         if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1083                              sizeof(request) ) )
1084                 return -EFAULT;
1085
1086         if ( request.count >= dma->buf_count ) {
1087                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1088                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1089                         drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1090
1091                         if ( !map ) {
1092                                 retcode = -EINVAL;
1093                                 goto done;
1094                         }
1095
1096 #if LINUX_VERSION_CODE <= 0x020402
1097                         down( &current->mm->mmap_sem );
1098 #else
1099                         down_write( &current->mm->mmap_sem );
1100 #endif
1101                         virtual = do_mmap( filp, 0, map->size,
1102                                            PROT_READ | PROT_WRITE,
1103                                            MAP_SHARED,
1104                                            (unsigned long)map->offset );
1105 #if LINUX_VERSION_CODE <= 0x020402
1106                         up( &current->mm->mmap_sem );
1107 #else
1108                         up_write( &current->mm->mmap_sem );
1109 #endif
1110                 } else {
1111 #if LINUX_VERSION_CODE <= 0x020402
1112                         down( &current->mm->mmap_sem );
1113 #else
1114                         down_write( &current->mm->mmap_sem );
1115 #endif
1116                         virtual = do_mmap( filp, 0, dma->byte_count,
1117                                            PROT_READ | PROT_WRITE,
1118                                            MAP_SHARED, 0 );
1119 #if LINUX_VERSION_CODE <= 0x020402
1120                         up( &current->mm->mmap_sem );
1121 #else
1122                         up_write( &current->mm->mmap_sem );
1123 #endif
1124                 }
1125                 if ( virtual > -1024UL ) {
1126                         /* Real error */
1127                         retcode = (signed long)virtual;
1128                         goto done;
1129                 }
1130                 request.virtual = (void *)virtual;
1131
1132                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1133                         if ( copy_to_user( &request.list[i].idx,
1134                                            &dma->buflist[i]->idx,
1135                                            sizeof(request.list[0].idx) ) ) {
1136                                 retcode = -EFAULT;
1137                                 goto done;
1138                         }
1139                         if ( copy_to_user( &request.list[i].total,
1140                                            &dma->buflist[i]->total,
1141                                            sizeof(request.list[0].total) ) ) {
1142                                 retcode = -EFAULT;
1143                                 goto done;
1144                         }
1145                         if ( copy_to_user( &request.list[i].used,
1146                                            &zero,
1147                                            sizeof(zero) ) ) {
1148                                 retcode = -EFAULT;
1149                                 goto done;
1150                         }
1151                         address = virtual + dma->buflist[i]->offset; /* *** */
1152                         if ( copy_to_user( &request.list[i].address,
1153                                            &address,
1154                                            sizeof(address) ) ) {
1155                                 retcode = -EFAULT;
1156                                 goto done;
1157                         }
1158                 }
1159         }
1160  done:
1161         request.count = dma->buf_count;
1162         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1163
1164         if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1165                 return -EFAULT;
1166
1167         return retcode;
1168 }
1169
1170 #endif /* __HAVE_DMA */