added some suse-specific patches to the kernel.
[linux-flexiantxendom0-3.2.10.git] / drivers / char / drm / drm_drv.h
1 /* drm_drv.h -- Generic driver template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31
32 /*
33  * To use this template, you must at least define the following (samples
34  * given for the MGA driver):
35  *
36  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
37  *
38  * #define DRIVER_NAME          "mga"
39  * #define DRIVER_DESC          "Matrox G200/G400"
40  * #define DRIVER_DATE          "20001127"
41  *
42  * #define DRIVER_MAJOR         2
43  * #define DRIVER_MINOR         0
44  * #define DRIVER_PATCHLEVEL    2
45  *
46  * #define DRIVER_IOCTL_COUNT   DRM_ARRAY_SIZE( mga_ioctls )
47  *
48  * #define DRM(x)               mga_##x
49  */
50
51 #ifndef __MUST_HAVE_AGP
52 #define __MUST_HAVE_AGP                 0
53 #endif
54 #ifndef __HAVE_CTX_BITMAP
55 #define __HAVE_CTX_BITMAP               0
56 #endif
57 #ifndef __HAVE_DMA_IRQ
58 #define __HAVE_DMA_IRQ                  0
59 #endif
60 #ifndef __HAVE_DMA_QUEUE
61 #define __HAVE_DMA_QUEUE                0
62 #endif
63 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
64 #define __HAVE_MULTIPLE_DMA_QUEUES      0
65 #endif
66 #ifndef __HAVE_DMA_SCHEDULE
67 #define __HAVE_DMA_SCHEDULE             0
68 #endif
69 #ifndef __HAVE_DMA_FLUSH
70 #define __HAVE_DMA_FLUSH                0
71 #endif
72 #ifndef __HAVE_DMA_READY
73 #define __HAVE_DMA_READY                0
74 #endif
75 #ifndef __HAVE_DMA_QUIESCENT
76 #define __HAVE_DMA_QUIESCENT            0
77 #endif
78 #ifndef __HAVE_RELEASE
79 #define __HAVE_RELEASE                  0
80 #endif
81 #ifndef __HAVE_COUNTERS
82 #define __HAVE_COUNTERS                 0
83 #endif
84 #ifndef __HAVE_SG
85 #define __HAVE_SG                       0
86 #endif
87 #ifndef __HAVE_KERNEL_CTX_SWITCH
88 #define __HAVE_KERNEL_CTX_SWITCH        0
89 #endif
90 #ifndef __HAVE_DRIVER_FOPS_READ
91 #define __HAVE_DRIVER_FOPS_READ         0
92 #endif
93 #ifndef __HAVE_DRIVER_FOPS_POLL
94 #define __HAVE_DRIVER_FOPS_POLL         0
95 #endif
96
97 #ifndef DRIVER_PREINIT
98 #define DRIVER_PREINIT()
99 #endif
100 #ifndef DRIVER_POSTINIT
101 #define DRIVER_POSTINIT()
102 #endif
103 #ifndef DRIVER_PRERELEASE
104 #define DRIVER_PRERELEASE()
105 #endif
106 #ifndef DRIVER_PRETAKEDOWN
107 #define DRIVER_PRETAKEDOWN()
108 #endif
109 #ifndef DRIVER_POSTCLEANUP
110 #define DRIVER_POSTCLEANUP()
111 #endif
112 #ifndef DRIVER_PRESETUP
113 #define DRIVER_PRESETUP()
114 #endif
115 #ifndef DRIVER_POSTSETUP
116 #define DRIVER_POSTSETUP()
117 #endif
118 #ifndef DRIVER_IOCTLS
119 #define DRIVER_IOCTLS
120 #endif
121 #ifndef DRIVER_FOPS
122 #define DRIVER_FOPS                             \
123 static struct file_operations   DRM(fops) = {   \
124         .owner   = THIS_MODULE,                 \
125         .open    = DRM(open),                   \
126         .flush   = DRM(flush),                  \
127         .release = DRM(release),                \
128         .ioctl   = DRM(ioctl),                  \
129         .mmap    = DRM(mmap),                   \
130         .fasync  = DRM(fasync),                 \
131         .poll    = DRM(poll),                   \
132         .read    = DRM(read),                   \
133 }
134 #endif
135
136 #ifndef MODULE
137 /* DRM(options) is called by the kernel to parse command-line options
138  * passed via the boot-loader (e.g., LILO).  It calls the insmod option
139  * routine, drm_parse_drm.
140  */
141 /* Use an additional macro to avoid preprocessor troubles */
142 #define DRM_OPTIONS_FUNC DRM(options)
143 static int __init DRM(options)( char *str )
144 {
145         DRM(parse_options)( str );
146         return 1;
147 }
148
149 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
150 #undef DRM_OPTIONS_FUNC
151 #endif
152
153 /*
154  * The default number of instances (minor numbers) to initialize.
155  */
156 #ifndef DRIVER_NUM_CARDS
157 #define DRIVER_NUM_CARDS 1
158 #endif
159
160 static drm_device_t     *DRM(device);
161 static int              *DRM(minor);
162 static int              DRM(numdevs) = 0;
163
164 DRIVER_FOPS;
165
166 static drm_ioctl_desc_t           DRM(ioctls)[] = {
167         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { DRM(version),     0, 0 },
168         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { DRM(getunique),   0, 0 },
169         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { DRM(getmagic),    0, 0 },
170         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { DRM(irq_busid),   0, 1 },
171         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)]       = { DRM(getmap),      0, 0 },
172         [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)]    = { DRM(getclient),   0, 0 },
173         [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)]     = { DRM(getstats),    0, 0 },
174
175         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { DRM(setunique),   1, 1 },
176         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]         = { DRM(noop),        1, 1 },
177         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { DRM(noop),        1, 1 },
178         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { DRM(authmagic),   1, 1 },
179
180         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { DRM(addmap),      1, 1 },
181         [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { DRM(rmmap),       1, 0 },
182
183 #if __HAVE_CTX_BITMAP
184         [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
185         [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
186 #endif
187
188         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { DRM(addctx),      1, 1 },
189         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { DRM(rmctx),       1, 1 },
190         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { DRM(modctx),      1, 1 },
191         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { DRM(getctx),      1, 0 },
192         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { DRM(switchctx),   1, 1 },
193         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { DRM(newctx),      1, 1 },
194         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { DRM(resctx),      1, 0 },
195
196         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { DRM(adddraw),     1, 1 },
197         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { DRM(rmdraw),      1, 1 },
198
199         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)]          = { DRM(lock),        1, 0 },
200         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { DRM(unlock),      1, 0 },
201
202 #if __HAVE_DMA_FLUSH
203         /* Gamma only, really */
204         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(finish),      1, 0 },
205 #else
206         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(noop),      1, 0 },
207 #endif
208
209 #if __HAVE_DMA
210         [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { DRM(addbufs),     1, 1 },
211         [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { DRM(markbufs),    1, 1 },
212         [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { DRM(infobufs),    1, 0 },
213         [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { DRM(mapbufs),     1, 0 },
214         [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { DRM(freebufs),    1, 0 },
215
216         /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
217          */
218         [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { DRM(control),     1, 1 },
219 #endif
220
221 #if __REALLY_HAVE_AGP
222         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { DRM(agp_acquire), 1, 1 },
223         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { DRM(agp_release), 1, 1 },
224         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { DRM(agp_enable),  1, 1 },
225         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { DRM(agp_info),    1, 0 },
226         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { DRM(agp_alloc),   1, 1 },
227         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { DRM(agp_free),    1, 1 },
228         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { DRM(agp_bind),    1, 1 },
229         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { DRM(agp_unbind),  1, 1 },
230 #endif
231
232 #if __HAVE_SG
233         [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)]      = { DRM(sg_alloc),    1, 1 },
234         [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)]       = { DRM(sg_free),     1, 1 },
235 #endif
236
237 #if __HAVE_VBL_IRQ
238         [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)]   = { DRM(wait_vblank), 0, 0 },
239 #endif
240
241         DRIVER_IOCTLS
242 };
243
244 #define DRIVER_IOCTL_COUNT      DRM_ARRAY_SIZE( DRM(ioctls) )
245
246 #ifdef MODULE
247 static char *drm_opts = NULL;
248 #endif
249
250 MODULE_AUTHOR( DRIVER_AUTHOR );
251 MODULE_DESCRIPTION( DRIVER_DESC );
252 MODULE_PARM( drm_opts, "s" );
253 MODULE_LICENSE("GPL and additional rights");
254
255 static int DRM(setup)( drm_device_t *dev )
256 {
257         int i;
258
259         DRIVER_PRESETUP();
260         atomic_set( &dev->ioctl_count, 0 );
261         atomic_set( &dev->vma_count, 0 );
262         dev->buf_use = 0;
263         atomic_set( &dev->buf_alloc, 0 );
264
265 #if __HAVE_DMA
266         i = DRM(dma_setup)( dev );
267         if ( i < 0 )
268                 return i;
269 #endif
270
271         dev->counters  = 6 + __HAVE_COUNTERS;
272         dev->types[0]  = _DRM_STAT_LOCK;
273         dev->types[1]  = _DRM_STAT_OPENS;
274         dev->types[2]  = _DRM_STAT_CLOSES;
275         dev->types[3]  = _DRM_STAT_IOCTLS;
276         dev->types[4]  = _DRM_STAT_LOCKS;
277         dev->types[5]  = _DRM_STAT_UNLOCKS;
278 #ifdef __HAVE_COUNTER6
279         dev->types[6]  = __HAVE_COUNTER6;
280 #endif
281 #ifdef __HAVE_COUNTER7
282         dev->types[7]  = __HAVE_COUNTER7;
283 #endif
284 #ifdef __HAVE_COUNTER8
285         dev->types[8]  = __HAVE_COUNTER8;
286 #endif
287 #ifdef __HAVE_COUNTER9
288         dev->types[9]  = __HAVE_COUNTER9;
289 #endif
290 #ifdef __HAVE_COUNTER10
291         dev->types[10] = __HAVE_COUNTER10;
292 #endif
293 #ifdef __HAVE_COUNTER11
294         dev->types[11] = __HAVE_COUNTER11;
295 #endif
296 #ifdef __HAVE_COUNTER12
297         dev->types[12] = __HAVE_COUNTER12;
298 #endif
299 #ifdef __HAVE_COUNTER13
300         dev->types[13] = __HAVE_COUNTER13;
301 #endif
302 #ifdef __HAVE_COUNTER14
303         dev->types[14] = __HAVE_COUNTER14;
304 #endif
305 #ifdef __HAVE_COUNTER15
306         dev->types[14] = __HAVE_COUNTER14;
307 #endif
308
309         for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
310                 atomic_set( &dev->counts[i], 0 );
311
312         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
313                 dev->magiclist[i].head = NULL;
314                 dev->magiclist[i].tail = NULL;
315         }
316
317         dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
318                                   DRM_MEM_MAPS);
319         if(dev->maplist == NULL) return -ENOMEM;
320         memset(dev->maplist, 0, sizeof(*dev->maplist));
321         INIT_LIST_HEAD(&dev->maplist->head);
322
323         dev->vmalist = NULL;
324         dev->sigdata.lock = dev->lock.hw_lock = NULL;
325         init_waitqueue_head( &dev->lock.lock_queue );
326         dev->queue_count = 0;
327         dev->queue_reserved = 0;
328         dev->queue_slots = 0;
329         dev->queuelist = NULL;
330         dev->irq = 0;
331         dev->context_flag = 0;
332         dev->interrupt_flag = 0;
333         dev->dma_flag = 0;
334         dev->last_context = 0;
335         dev->last_switch = 0;
336         dev->last_checked = 0;
337         init_timer( &dev->timer );
338         init_waitqueue_head( &dev->context_wait );
339
340         dev->ctx_start = 0;
341         dev->lck_start = 0;
342
343         dev->buf_rp = dev->buf;
344         dev->buf_wp = dev->buf;
345         dev->buf_end = dev->buf + DRM_BSZ;
346         dev->buf_async = NULL;
347         init_waitqueue_head( &dev->buf_readers );
348         init_waitqueue_head( &dev->buf_writers );
349
350         DRM_DEBUG( "\n" );
351
352         /* The kernel's context could be created here, but is now created
353          * in drm_dma_enqueue.  This is more resource-efficient for
354          * hardware that does not do DMA, but may mean that
355          * drm_select_queue fails between the time the interrupt is
356          * initialized and the time the queues are initialized.
357          */
358         DRIVER_POSTSETUP();
359         return 0;
360 }
361
362
363 static int DRM(takedown)( drm_device_t *dev )
364 {
365         drm_magic_entry_t *pt, *next;
366         drm_map_t *map;
367         drm_map_list_t *r_list;
368         struct list_head *list, *list_next;
369         drm_vma_entry_t *vma, *vma_next;
370         int i;
371
372         DRM_DEBUG( "\n" );
373
374         DRIVER_PRETAKEDOWN();
375 #if __HAVE_DMA_IRQ
376         if ( dev->irq ) DRM(irq_uninstall)( dev );
377 #endif
378
379         down( &dev->struct_sem );
380         del_timer( &dev->timer );
381
382         if ( dev->devname ) {
383                 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
384                            DRM_MEM_DRIVER );
385                 dev->devname = NULL;
386         }
387
388         if ( dev->unique ) {
389                 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
390                            DRM_MEM_DRIVER );
391                 dev->unique = NULL;
392                 dev->unique_len = 0;
393         }
394                                 /* Clear pid list */
395         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
396                 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
397                         next = pt->next;
398                         DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
399                 }
400                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
401         }
402
403 #if __REALLY_HAVE_AGP
404                                 /* Clear AGP information */
405         if ( dev->agp ) {
406                 drm_agp_mem_t *entry;
407                 drm_agp_mem_t *nexte;
408
409                                 /* Remove AGP resources, but leave dev->agp
410                                    intact until drv_cleanup is called. */
411                 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
412                         nexte = entry->next;
413                         if ( entry->bound ) DRM(unbind_agp)( entry->memory );
414                         DRM(free_agp)( entry->memory, entry->pages );
415                         DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
416                 }
417                 dev->agp->memory = NULL;
418
419                 if ( dev->agp->acquired ) DRM(agp_do_release)();
420
421                 dev->agp->acquired = 0;
422                 dev->agp->enabled  = 0;
423         }
424 #endif
425
426                                 /* Clear vma list (only built for debugging) */
427         if ( dev->vmalist ) {
428                 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
429                         vma_next = vma->next;
430                         DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
431                 }
432                 dev->vmalist = NULL;
433         }
434
435         if( dev->maplist ) {
436                 for(list = dev->maplist->head.next;
437                     list != &dev->maplist->head;
438                     list = list_next) {
439                         list_next = list->next;
440                         r_list = (drm_map_list_t *)list;
441                         map = r_list->map;
442                         DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
443                         if(!map) continue;
444
445                         switch ( map->type ) {
446                         case _DRM_REGISTERS:
447                         case _DRM_FRAME_BUFFER:
448 #if __REALLY_HAVE_MTRR
449                                 if ( map->mtrr >= 0 ) {
450                                         int retcode;
451                                         retcode = mtrr_del( map->mtrr,
452                                                             map->offset,
453                                                             map->size );
454                                         DRM_DEBUG( "mtrr_del=%d\n", retcode );
455                                 }
456 #endif
457                                 DRM(ioremapfree)( map->handle, map->size );
458                                 break;
459                         case _DRM_SHM:
460                                 vfree(map->handle);
461                                 break;
462
463                         case _DRM_AGP:
464                                 /* Do nothing here, because this is all
465                                  * handled in the AGP/GART driver.
466                                  */
467                                 break;
468                        case _DRM_SCATTER_GATHER:
469                                 /* Handle it, but do nothing, if HAVE_SG
470                                  * isn't defined.
471                                  */
472 #if __HAVE_SG
473                                 if(dev->sg) {
474                                         DRM(sg_cleanup)(dev->sg);
475                                         dev->sg = NULL;
476                                 }
477 #endif
478                                 break;
479                         }
480                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
481                 }
482                 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
483                 dev->maplist = NULL;
484         }
485
486 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
487         if ( dev->queuelist ) {
488                 for ( i = 0 ; i < dev->queue_count ; i++ ) {
489 #if __HAVE_DMA_WAITLIST
490                         DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
491 #endif
492                         if ( dev->queuelist[i] ) {
493                                 DRM(free)( dev->queuelist[i],
494                                           sizeof(*dev->queuelist[0]),
495                                           DRM_MEM_QUEUES );
496                                 dev->queuelist[i] = NULL;
497                         }
498                 }
499                 DRM(free)( dev->queuelist,
500                           dev->queue_slots * sizeof(*dev->queuelist),
501                           DRM_MEM_QUEUES );
502                 dev->queuelist = NULL;
503         }
504         dev->queue_count = 0;
505 #endif
506
507 #if __HAVE_DMA
508         DRM(dma_takedown)( dev );
509 #endif
510         if ( dev->lock.hw_lock ) {
511                 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
512                 dev->lock.filp = 0;
513                 wake_up_interruptible( &dev->lock.lock_queue );
514         }
515         up( &dev->struct_sem );
516
517         return 0;
518 }
519
520 /*
521  * Figure out how many instances to initialize.
522  */
523 static int drm_count_cards(void)
524 {
525         int num = 0;
526 #if defined(DRIVER_CARD_LIST)
527         int i;
528         drm_pci_list_t *l;
529         u16 device, vendor;
530         struct pci_dev *pdev = NULL;
531 #endif
532
533         DRM_DEBUG( "\n" );
534
535 #if defined(DRIVER_COUNT_CARDS)
536         num = DRIVER_COUNT_CARDS();
537 #elif defined(DRIVER_CARD_LIST)
538         for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
539                 pdev = NULL;
540                 vendor = l[i].vendor;
541                 device = l[i].device;
542                 if(device == 0xffff) device = PCI_ANY_ID;
543                 if(vendor == 0xffff) vendor = PCI_ANY_ID;
544                 while ((pdev = pci_find_device(vendor, device, pdev))) {
545                         num++;
546                 }
547         }
548 #else
549         num = DRIVER_NUM_CARDS;
550 #endif
551         DRM_DEBUG("numdevs = %d\n", num);
552         return num;
553 }
554
555 /* drm_init is called via init_module at module load time, or via
556  * linux/init/main.c (this is not currently supported).
557  */
558 static int __init drm_init( void )
559 {
560
561         drm_device_t *dev;
562         int i;
563         int retcode;
564         DRM_DEBUG( "\n" );
565
566 #ifdef MODULE
567         DRM(parse_options)( drm_opts );
568 #endif
569
570         DRM(numdevs) = drm_count_cards();
571         /* Force at least one instance. */
572         if (DRM(numdevs) <= 0)
573                 DRM(numdevs) = 1;
574
575         DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
576         if (!DRM(device)) {
577                 return -ENOMEM;
578         }
579         DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
580         if (!DRM(minor)) {
581                 kfree(DRM(device));
582                 return -ENOMEM;
583         }
584
585         DRIVER_PREINIT();
586
587         DRM(mem_init)();
588
589         for (i = 0; i < DRM(numdevs); i++) {
590                 dev = &(DRM(device)[i]);
591                 memset( (void *)dev, 0, sizeof(*dev) );
592                 dev->count_lock = SPIN_LOCK_UNLOCKED;
593                 sema_init( &dev->struct_sem, 1 );
594
595                 if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
596                         return -EPERM;
597                 dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
598                 dev->name   = DRIVER_NAME;
599
600 #if __REALLY_HAVE_AGP
601                 dev->agp = DRM(agp_init)();
602 #if __MUST_HAVE_AGP
603                 if ( dev->agp == NULL ) {
604                         DRM_ERROR( "Cannot initialize the agpgart module.\n" );
605                         DRM(stub_unregister)(DRM(minor)[i]);
606                         DRM(takedown)( dev );
607                         return -ENOMEM;
608                 }
609 #endif
610 #if __REALLY_HAVE_MTRR
611                 if (dev->agp)
612                         dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
613                                        dev->agp->agp_info.aper_size*1024*1024,
614                                        MTRR_TYPE_WRCOMB,
615                                        1 );
616 #endif
617 #endif
618
619 #if __HAVE_CTX_BITMAP
620                 retcode = DRM(ctxbitmap_init)( dev );
621                 if( retcode ) {
622                         DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
623                         DRM(stub_unregister)(DRM(minor)[i]);
624                         DRM(takedown)( dev );
625                         return retcode;
626                 }
627 #endif
628                 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
629                         DRIVER_NAME,
630                         DRIVER_MAJOR,
631                         DRIVER_MINOR,
632                         DRIVER_PATCHLEVEL,
633                         DRIVER_DATE,
634                         DRM(minor)[i] );
635         }
636
637         DRIVER_POSTINIT();
638
639         return 0;
640 }
641
642 /* drm_cleanup is called via cleanup_module at module unload time.
643  */
644 static void __exit drm_cleanup( void )
645 {
646         drm_device_t *dev;
647         int i;
648
649         DRM_DEBUG( "\n" );
650
651         for (i = DRM(numdevs) - 1; i >= 0; i--) {
652                 dev = &(DRM(device)[i]);
653                 if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
654                         DRM_ERROR( "Cannot unload module\n" );
655                 } else {
656                         DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
657                         if (i == 0) {
658                                 DRM_INFO( "Module unloaded\n" );
659                         }
660                 }
661 #if __HAVE_CTX_BITMAP
662                 DRM(ctxbitmap_cleanup)( dev );
663 #endif
664
665 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
666                 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
667                         int retval;
668                         retval = mtrr_del( dev->agp->agp_mtrr,
669                                    dev->agp->agp_info.aper_base,
670                                    dev->agp->agp_info.aper_size*1024*1024 );
671                         DRM_DEBUG( "mtrr_del=%d\n", retval );
672                 }
673 #endif
674
675                 DRM(takedown)( dev );
676
677 #if __REALLY_HAVE_AGP
678                 if ( dev->agp ) {
679                         DRM(agp_uninit)();
680                         DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
681                         dev->agp = NULL;
682                 }
683 #endif
684         }
685         DRIVER_POSTCLEANUP();
686         kfree(DRM(minor));
687         kfree(DRM(device));
688         DRM(numdevs) = 0;
689 }
690
691 module_init( drm_init );
692 module_exit( drm_cleanup );
693
694
695 int DRM(version)( struct inode *inode, struct file *filp,
696                   unsigned int cmd, unsigned long arg )
697 {
698         drm_version_t version;
699         int len;
700
701         if ( copy_from_user( &version,
702                              (drm_version_t *)arg,
703                              sizeof(version) ) )
704                 return -EFAULT;
705
706 #define DRM_COPY( name, value )                                         \
707         len = strlen( value );                                          \
708         if ( len > name##_len ) len = name##_len;                       \
709         name##_len = strlen( value );                                   \
710         if ( len && name ) {                                            \
711                 if ( copy_to_user( name, value, len ) )                 \
712                         return -EFAULT;                                 \
713         }
714
715         version.version_major = DRIVER_MAJOR;
716         version.version_minor = DRIVER_MINOR;
717         version.version_patchlevel = DRIVER_PATCHLEVEL;
718
719         DRM_COPY( version.name, DRIVER_NAME );
720         DRM_COPY( version.date, DRIVER_DATE );
721         DRM_COPY( version.desc, DRIVER_DESC );
722
723         if ( copy_to_user( (drm_version_t *)arg,
724                            &version,
725                            sizeof(version) ) )
726                 return -EFAULT;
727         return 0;
728 }
729
730 int DRM(open)( struct inode *inode, struct file *filp )
731 {
732         drm_device_t *dev = NULL;
733         int retcode = 0;
734         int i;
735
736         for (i = 0; i < DRM(numdevs); i++) {
737                 if (minor(inode->i_rdev) == DRM(minor)[i]) {
738                         dev = &(DRM(device)[i]);
739                         break;
740                 }
741         }
742         if (!dev) {
743                 return -ENODEV;
744         }
745
746         retcode = DRM(open_helper)( inode, filp, dev );
747         if ( !retcode ) {
748                 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
749                 spin_lock( &dev->count_lock );
750                 if ( !dev->open_count++ ) {
751                         spin_unlock( &dev->count_lock );
752                         return DRM(setup)( dev );
753                 }
754                 spin_unlock( &dev->count_lock );
755         }
756
757         return retcode;
758 }
759
760 int DRM(release)( struct inode *inode, struct file *filp )
761 {
762         drm_file_t *priv = filp->private_data;
763         drm_device_t *dev;
764         int retcode = 0;
765
766         lock_kernel();
767         dev = priv->dev;
768
769         DRM_DEBUG( "open_count = %d\n", dev->open_count );
770
771         DRIVER_PRERELEASE();
772
773         /* ========================================================
774          * Begin inline drm_release
775          */
776
777         DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
778                    current->pid, (long)dev->device, dev->open_count );
779
780         if ( priv->lock_count && dev->lock.hw_lock &&
781              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
782              dev->lock.filp == filp ) {
783                 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
784                         filp,
785                         _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
786 #if __HAVE_RELEASE
787                 DRIVER_RELEASE();
788 #endif
789                 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
790                                 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
791
792                                 /* FIXME: may require heavy-handed reset of
793                                    hardware at this point, possibly
794                                    processed via a callback to the X
795                                    server. */
796         }
797 #if __HAVE_RELEASE
798         else if ( priv->lock_count && dev->lock.hw_lock ) {
799                 /* The lock is required to reclaim buffers */
800                 DECLARE_WAITQUEUE( entry, current );
801
802                 add_wait_queue( &dev->lock.lock_queue, &entry );
803                 for (;;) {
804                         current->state = TASK_INTERRUPTIBLE;
805                         if ( !dev->lock.hw_lock ) {
806                                 /* Device has been unregistered */
807                                 retcode = -EINTR;
808                                 break;
809                         }
810                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
811                                              DRM_KERNEL_CONTEXT ) ) {
812                                 dev->lock.filp      = filp;
813                                 dev->lock.lock_time = jiffies;
814                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
815                                 break;  /* Got lock */
816                         }
817                                 /* Contention */
818                         schedule();
819                         if ( signal_pending( current ) ) {
820                                 retcode = -ERESTARTSYS;
821                                 break;
822                         }
823                 }
824                 current->state = TASK_RUNNING;
825                 remove_wait_queue( &dev->lock.lock_queue, &entry );
826                 if( !retcode ) {
827                         DRIVER_RELEASE();
828                         DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
829                                         DRM_KERNEL_CONTEXT );
830                 }
831         }
832 #elif __HAVE_DMA
833         DRM(reclaim_buffers)( filp );
834 #endif
835
836         DRM(fasync)( -1, filp, 0 );
837
838         down( &dev->struct_sem );
839         if ( priv->remove_auth_on_close == 1 ) {
840                 drm_file_t *temp = dev->file_first;
841                 while ( temp ) {
842                         temp->authenticated = 0;
843                         temp = temp->next;
844                 }
845         }
846         if ( priv->prev ) {
847                 priv->prev->next = priv->next;
848         } else {
849                 dev->file_first  = priv->next;
850         }
851         if ( priv->next ) {
852                 priv->next->prev = priv->prev;
853         } else {
854                 dev->file_last   = priv->prev;
855         }
856         up( &dev->struct_sem );
857         
858         DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
859
860         /* ========================================================
861          * End inline drm_release
862          */
863
864         atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
865         spin_lock( &dev->count_lock );
866         if ( !--dev->open_count ) {
867                 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
868                         DRM_ERROR( "Device busy: %d %d\n",
869                                    atomic_read( &dev->ioctl_count ),
870                                    dev->blocked );
871                         spin_unlock( &dev->count_lock );
872                         unlock_kernel();
873                         return -EBUSY;
874                 }
875                 spin_unlock( &dev->count_lock );
876                 unlock_kernel();
877                 return DRM(takedown)( dev );
878         }
879         spin_unlock( &dev->count_lock );
880
881         unlock_kernel();
882
883         return retcode;
884 }
885
886 /* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
887  */
888 int DRM(ioctl)( struct inode *inode, struct file *filp,
889                 unsigned int cmd, unsigned long arg )
890 {
891         drm_file_t *priv = filp->private_data;
892         drm_device_t *dev = priv->dev;
893         drm_ioctl_desc_t *ioctl;
894         drm_ioctl_t *func;
895         int nr = DRM_IOCTL_NR(cmd);
896         int retcode = 0;
897
898         atomic_inc( &dev->ioctl_count );
899         atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
900         ++priv->ioctl_count;
901
902         DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
903                    current->pid, cmd, nr, (long)dev->device, 
904                    priv->authenticated );
905
906         if ( nr >= DRIVER_IOCTL_COUNT ) {
907                 retcode = -EINVAL;
908         } else {
909                 ioctl = &DRM(ioctls)[nr];
910                 func = ioctl->func;
911
912                 if ( !func ) {
913                         DRM_DEBUG( "no function\n" );
914                         retcode = -EINVAL;
915                 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
916                             ( ioctl->auth_needed && !priv->authenticated ) ) {
917                         retcode = -EACCES;
918                 } else {
919                         retcode = func( inode, filp, cmd, arg );
920                 }
921         }
922
923         atomic_dec( &dev->ioctl_count );
924         return retcode;
925 }
926
927 int DRM(lock)( struct inode *inode, struct file *filp,
928                unsigned int cmd, unsigned long arg )
929 {
930         drm_file_t *priv = filp->private_data;
931         drm_device_t *dev = priv->dev;
932         DECLARE_WAITQUEUE( entry, current );
933         drm_lock_t lock;
934         int ret = 0;
935 #if __HAVE_MULTIPLE_DMA_QUEUES
936         drm_queue_t *q;
937 #endif
938
939         ++priv->lock_count;
940
941         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
942                 return -EFAULT;
943
944         if ( lock.context == DRM_KERNEL_CONTEXT ) {
945                 DRM_ERROR( "Process %d using kernel context %d\n",
946                            current->pid, lock.context );
947                 return -EINVAL;
948         }
949
950         DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
951                    lock.context, current->pid,
952                    dev->lock.hw_lock->lock, lock.flags );
953
954 #if __HAVE_DMA_QUEUE
955         if ( lock.context < 0 )
956                 return -EINVAL;
957 #elif __HAVE_MULTIPLE_DMA_QUEUES
958         if ( lock.context < 0 || lock.context >= dev->queue_count )
959                 return -EINVAL;
960         q = dev->queuelist[lock.context];
961 #endif
962
963 #if __HAVE_DMA_FLUSH
964         ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
965 #endif
966         if ( !ret ) {
967                 add_wait_queue( &dev->lock.lock_queue, &entry );
968                 for (;;) {
969                         current->state = TASK_INTERRUPTIBLE;
970                         if ( !dev->lock.hw_lock ) {
971                                 /* Device has been unregistered */
972                                 ret = -EINTR;
973                                 break;
974                         }
975                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
976                                              lock.context ) ) {
977                                 dev->lock.filp      = filp;
978                                 dev->lock.lock_time = jiffies;
979                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
980                                 break;  /* Got lock */
981                         }
982
983                                 /* Contention */
984                         schedule();
985                         if ( signal_pending( current ) ) {
986                                 ret = -ERESTARTSYS;
987                                 break;
988                         }
989                 }
990                 current->state = TASK_RUNNING;
991                 remove_wait_queue( &dev->lock.lock_queue, &entry );
992         }
993
994 #if __HAVE_DMA_FLUSH
995         DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
996 #endif
997
998         if ( !ret ) {
999                 sigemptyset( &dev->sigmask );
1000                 sigaddset( &dev->sigmask, SIGSTOP );
1001                 sigaddset( &dev->sigmask, SIGTSTP );
1002                 sigaddset( &dev->sigmask, SIGTTIN );
1003                 sigaddset( &dev->sigmask, SIGTTOU );
1004                 dev->sigdata.context = lock.context;
1005                 dev->sigdata.lock    = dev->lock.hw_lock;
1006                 block_all_signals( DRM(notifier),
1007                                    &dev->sigdata, &dev->sigmask );
1008
1009 #if __HAVE_DMA_READY
1010                 if ( lock.flags & _DRM_LOCK_READY ) {
1011                         DRIVER_DMA_READY();
1012                 }
1013 #endif
1014 #if __HAVE_DMA_QUIESCENT
1015                 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1016                         DRIVER_DMA_QUIESCENT();
1017                 }
1018 #endif
1019 #if __HAVE_KERNEL_CTX_SWITCH
1020                 if ( dev->last_context != lock.context ) {
1021                         DRM(context_switch)(dev, dev->last_context,
1022                                             lock.context);
1023                 }
1024 #endif
1025         }
1026
1027         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1028
1029         return ret;
1030 }
1031
1032
1033 int DRM(unlock)( struct inode *inode, struct file *filp,
1034                  unsigned int cmd, unsigned long arg )
1035 {
1036         drm_file_t *priv = filp->private_data;
1037         drm_device_t *dev = priv->dev;
1038         drm_lock_t lock;
1039
1040         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1041                 return -EFAULT;
1042
1043         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1044                 DRM_ERROR( "Process %d using kernel context %d\n",
1045                            current->pid, lock.context );
1046                 return -EINVAL;
1047         }
1048
1049         atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1050
1051 #if __HAVE_KERNEL_CTX_SWITCH
1052         /* We no longer really hold it, but if we are the next
1053          * agent to request it then we should just be able to
1054          * take it immediately and not eat the ioctl.
1055          */
1056         dev->lock.filp = 0;
1057         {
1058                 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1059                 unsigned int old, new, prev, ctx;
1060
1061                 ctx = lock.context;
1062                 do {
1063                         old  = *plock;
1064                         new  = ctx;
1065                         prev = cmpxchg(plock, old, new);
1066                 } while (prev != old);
1067         }
1068         wake_up_interruptible(&dev->lock.lock_queue);
1069 #else
1070         DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1071                             DRM_KERNEL_CONTEXT );
1072 #if __HAVE_DMA_SCHEDULE
1073         DRM(dma_schedule)( dev, 1 );
1074 #endif
1075
1076         /* FIXME: Do we ever really need to check this???
1077          */
1078         if ( 1 /* !dev->context_flag */ ) {
1079                 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1080                                      DRM_KERNEL_CONTEXT ) ) {
1081                         DRM_ERROR( "\n" );
1082                 }
1083         }
1084 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1085
1086         unblock_all_signals();
1087         return 0;
1088 }