1 /* drm_drv.h -- Generic driver template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
33 * To use this template, you must at least define the following (samples
34 * given for the MGA driver):
36 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
38 * #define DRIVER_NAME "mga"
39 * #define DRIVER_DESC "Matrox G200/G400"
40 * #define DRIVER_DATE "20001127"
42 * #define DRIVER_MAJOR 2
43 * #define DRIVER_MINOR 0
44 * #define DRIVER_PATCHLEVEL 2
46 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
48 * #define DRM(x) mga_##x
51 #ifndef __MUST_HAVE_AGP
52 #define __MUST_HAVE_AGP 0
54 #ifndef __HAVE_CTX_BITMAP
55 #define __HAVE_CTX_BITMAP 0
57 #ifndef __HAVE_DMA_IRQ
58 #define __HAVE_DMA_IRQ 0
60 #ifndef __HAVE_DMA_QUEUE
61 #define __HAVE_DMA_QUEUE 0
63 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
64 #define __HAVE_MULTIPLE_DMA_QUEUES 0
66 #ifndef __HAVE_DMA_SCHEDULE
67 #define __HAVE_DMA_SCHEDULE 0
69 #ifndef __HAVE_DMA_FLUSH
70 #define __HAVE_DMA_FLUSH 0
72 #ifndef __HAVE_DMA_READY
73 #define __HAVE_DMA_READY 0
75 #ifndef __HAVE_DMA_QUIESCENT
76 #define __HAVE_DMA_QUIESCENT 0
78 #ifndef __HAVE_RELEASE
79 #define __HAVE_RELEASE 0
81 #ifndef __HAVE_COUNTERS
82 #define __HAVE_COUNTERS 0
87 #ifndef __HAVE_KERNEL_CTX_SWITCH
88 #define __HAVE_KERNEL_CTX_SWITCH 0
90 #ifndef __HAVE_DRIVER_FOPS_READ
91 #define __HAVE_DRIVER_FOPS_READ 0
93 #ifndef __HAVE_DRIVER_FOPS_POLL
94 #define __HAVE_DRIVER_FOPS_POLL 0
97 #ifndef DRIVER_PREINIT
98 #define DRIVER_PREINIT()
100 #ifndef DRIVER_POSTINIT
101 #define DRIVER_POSTINIT()
103 #ifndef DRIVER_PRERELEASE
104 #define DRIVER_PRERELEASE()
106 #ifndef DRIVER_PRETAKEDOWN
107 #define DRIVER_PRETAKEDOWN()
109 #ifndef DRIVER_POSTCLEANUP
110 #define DRIVER_POSTCLEANUP()
112 #ifndef DRIVER_PRESETUP
113 #define DRIVER_PRESETUP()
115 #ifndef DRIVER_POSTSETUP
116 #define DRIVER_POSTSETUP()
118 #ifndef DRIVER_IOCTLS
119 #define DRIVER_IOCTLS
122 #define DRIVER_FOPS \
123 static struct file_operations DRM(fops) = { \
124 .owner = THIS_MODULE, \
126 .flush = DRM(flush), \
127 .release = DRM(release), \
128 .ioctl = DRM(ioctl), \
130 .fasync = DRM(fasync), \
137 /* DRM(options) is called by the kernel to parse command-line options
138 * passed via the boot-loader (e.g., LILO). It calls the insmod option
139 * routine, drm_parse_drm.
141 /* Use an additional macro to avoid preprocessor troubles */
142 #define DRM_OPTIONS_FUNC DRM(options)
143 static int __init DRM(options)( char *str )
145 DRM(parse_options)( str );
149 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
150 #undef DRM_OPTIONS_FUNC
154 * The default number of instances (minor numbers) to initialize.
156 #ifndef DRIVER_NUM_CARDS
157 #define DRIVER_NUM_CARDS 1
160 static drm_device_t *DRM(device);
161 static int *DRM(minor);
162 static int DRM(numdevs) = 0;
166 static drm_ioctl_desc_t DRM(ioctls)[] = {
167 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
168 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
169 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
170 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
171 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
172 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
173 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
175 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
176 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
177 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
178 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
180 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
181 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
183 #if __HAVE_CTX_BITMAP
184 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
185 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
188 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
189 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
190 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
191 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
192 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
193 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
194 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
196 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
197 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
199 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
200 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
203 /* Gamma only, really */
204 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
206 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
210 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
211 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
212 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
213 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
214 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
216 /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
218 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
221 #if __REALLY_HAVE_AGP
222 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
223 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
224 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
225 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
226 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
227 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
228 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
229 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
233 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
234 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
238 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
244 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
247 static char *drm_opts = NULL;
250 MODULE_AUTHOR( DRIVER_AUTHOR );
251 MODULE_DESCRIPTION( DRIVER_DESC );
252 MODULE_PARM( drm_opts, "s" );
253 MODULE_LICENSE("GPL and additional rights");
255 static int DRM(setup)( drm_device_t *dev )
260 atomic_set( &dev->ioctl_count, 0 );
261 atomic_set( &dev->vma_count, 0 );
263 atomic_set( &dev->buf_alloc, 0 );
266 i = DRM(dma_setup)( dev );
271 dev->counters = 6 + __HAVE_COUNTERS;
272 dev->types[0] = _DRM_STAT_LOCK;
273 dev->types[1] = _DRM_STAT_OPENS;
274 dev->types[2] = _DRM_STAT_CLOSES;
275 dev->types[3] = _DRM_STAT_IOCTLS;
276 dev->types[4] = _DRM_STAT_LOCKS;
277 dev->types[5] = _DRM_STAT_UNLOCKS;
278 #ifdef __HAVE_COUNTER6
279 dev->types[6] = __HAVE_COUNTER6;
281 #ifdef __HAVE_COUNTER7
282 dev->types[7] = __HAVE_COUNTER7;
284 #ifdef __HAVE_COUNTER8
285 dev->types[8] = __HAVE_COUNTER8;
287 #ifdef __HAVE_COUNTER9
288 dev->types[9] = __HAVE_COUNTER9;
290 #ifdef __HAVE_COUNTER10
291 dev->types[10] = __HAVE_COUNTER10;
293 #ifdef __HAVE_COUNTER11
294 dev->types[11] = __HAVE_COUNTER11;
296 #ifdef __HAVE_COUNTER12
297 dev->types[12] = __HAVE_COUNTER12;
299 #ifdef __HAVE_COUNTER13
300 dev->types[13] = __HAVE_COUNTER13;
302 #ifdef __HAVE_COUNTER14
303 dev->types[14] = __HAVE_COUNTER14;
305 #ifdef __HAVE_COUNTER15
306 dev->types[14] = __HAVE_COUNTER14;
309 for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
310 atomic_set( &dev->counts[i], 0 );
312 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
313 dev->magiclist[i].head = NULL;
314 dev->magiclist[i].tail = NULL;
317 dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
319 if(dev->maplist == NULL) return -ENOMEM;
320 memset(dev->maplist, 0, sizeof(*dev->maplist));
321 INIT_LIST_HEAD(&dev->maplist->head);
324 dev->sigdata.lock = dev->lock.hw_lock = NULL;
325 init_waitqueue_head( &dev->lock.lock_queue );
326 dev->queue_count = 0;
327 dev->queue_reserved = 0;
328 dev->queue_slots = 0;
329 dev->queuelist = NULL;
331 dev->context_flag = 0;
332 dev->interrupt_flag = 0;
334 dev->last_context = 0;
335 dev->last_switch = 0;
336 dev->last_checked = 0;
337 init_timer( &dev->timer );
338 init_waitqueue_head( &dev->context_wait );
343 dev->buf_rp = dev->buf;
344 dev->buf_wp = dev->buf;
345 dev->buf_end = dev->buf + DRM_BSZ;
346 dev->buf_async = NULL;
347 init_waitqueue_head( &dev->buf_readers );
348 init_waitqueue_head( &dev->buf_writers );
352 /* The kernel's context could be created here, but is now created
353 * in drm_dma_enqueue. This is more resource-efficient for
354 * hardware that does not do DMA, but may mean that
355 * drm_select_queue fails between the time the interrupt is
356 * initialized and the time the queues are initialized.
363 static int DRM(takedown)( drm_device_t *dev )
365 drm_magic_entry_t *pt, *next;
367 drm_map_list_t *r_list;
368 struct list_head *list, *list_next;
369 drm_vma_entry_t *vma, *vma_next;
374 DRIVER_PRETAKEDOWN();
376 if ( dev->irq ) DRM(irq_uninstall)( dev );
379 down( &dev->struct_sem );
380 del_timer( &dev->timer );
382 if ( dev->devname ) {
383 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
389 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
395 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
396 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
398 DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
400 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
403 #if __REALLY_HAVE_AGP
404 /* Clear AGP information */
406 drm_agp_mem_t *entry;
407 drm_agp_mem_t *nexte;
409 /* Remove AGP resources, but leave dev->agp
410 intact until drv_cleanup is called. */
411 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
413 if ( entry->bound ) DRM(unbind_agp)( entry->memory );
414 DRM(free_agp)( entry->memory, entry->pages );
415 DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
417 dev->agp->memory = NULL;
419 if ( dev->agp->acquired ) DRM(agp_do_release)();
421 dev->agp->acquired = 0;
422 dev->agp->enabled = 0;
426 /* Clear vma list (only built for debugging) */
427 if ( dev->vmalist ) {
428 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
429 vma_next = vma->next;
430 DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
436 for(list = dev->maplist->head.next;
437 list != &dev->maplist->head;
439 list_next = list->next;
440 r_list = (drm_map_list_t *)list;
442 DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
445 switch ( map->type ) {
447 case _DRM_FRAME_BUFFER:
448 #if __REALLY_HAVE_MTRR
449 if ( map->mtrr >= 0 ) {
451 retcode = mtrr_del( map->mtrr,
454 DRM_DEBUG( "mtrr_del=%d\n", retcode );
457 DRM(ioremapfree)( map->handle, map->size );
464 /* Do nothing here, because this is all
465 * handled in the AGP/GART driver.
468 case _DRM_SCATTER_GATHER:
469 /* Handle it, but do nothing, if HAVE_SG
474 DRM(sg_cleanup)(dev->sg);
480 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
482 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
486 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
487 if ( dev->queuelist ) {
488 for ( i = 0 ; i < dev->queue_count ; i++ ) {
489 #if __HAVE_DMA_WAITLIST
490 DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
492 if ( dev->queuelist[i] ) {
493 DRM(free)( dev->queuelist[i],
494 sizeof(*dev->queuelist[0]),
496 dev->queuelist[i] = NULL;
499 DRM(free)( dev->queuelist,
500 dev->queue_slots * sizeof(*dev->queuelist),
502 dev->queuelist = NULL;
504 dev->queue_count = 0;
508 DRM(dma_takedown)( dev );
510 if ( dev->lock.hw_lock ) {
511 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
513 wake_up_interruptible( &dev->lock.lock_queue );
515 up( &dev->struct_sem );
521 * Figure out how many instances to initialize.
523 static int drm_count_cards(void)
526 #if defined(DRIVER_CARD_LIST)
530 struct pci_dev *pdev = NULL;
535 #if defined(DRIVER_COUNT_CARDS)
536 num = DRIVER_COUNT_CARDS();
537 #elif defined(DRIVER_CARD_LIST)
538 for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
540 vendor = l[i].vendor;
541 device = l[i].device;
542 if(device == 0xffff) device = PCI_ANY_ID;
543 if(vendor == 0xffff) vendor = PCI_ANY_ID;
544 while ((pdev = pci_find_device(vendor, device, pdev))) {
549 num = DRIVER_NUM_CARDS;
551 DRM_DEBUG("numdevs = %d\n", num);
555 /* drm_init is called via init_module at module load time, or via
556 * linux/init/main.c (this is not currently supported).
558 static int __init drm_init( void )
567 DRM(parse_options)( drm_opts );
570 DRM(numdevs) = drm_count_cards();
571 /* Force at least one instance. */
572 if (DRM(numdevs) <= 0)
575 DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
579 DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
589 for (i = 0; i < DRM(numdevs); i++) {
590 dev = &(DRM(device)[i]);
591 memset( (void *)dev, 0, sizeof(*dev) );
592 dev->count_lock = SPIN_LOCK_UNLOCKED;
593 sema_init( &dev->struct_sem, 1 );
595 if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
597 dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
598 dev->name = DRIVER_NAME;
600 #if __REALLY_HAVE_AGP
601 dev->agp = DRM(agp_init)();
603 if ( dev->agp == NULL ) {
604 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
605 DRM(stub_unregister)(DRM(minor)[i]);
606 DRM(takedown)( dev );
610 #if __REALLY_HAVE_MTRR
612 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
613 dev->agp->agp_info.aper_size*1024*1024,
619 #if __HAVE_CTX_BITMAP
620 retcode = DRM(ctxbitmap_init)( dev );
622 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
623 DRM(stub_unregister)(DRM(minor)[i]);
624 DRM(takedown)( dev );
628 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
642 /* drm_cleanup is called via cleanup_module at module unload time.
644 static void __exit drm_cleanup( void )
651 for (i = DRM(numdevs) - 1; i >= 0; i--) {
652 dev = &(DRM(device)[i]);
653 if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
654 DRM_ERROR( "Cannot unload module\n" );
656 DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
658 DRM_INFO( "Module unloaded\n" );
661 #if __HAVE_CTX_BITMAP
662 DRM(ctxbitmap_cleanup)( dev );
665 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
666 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
668 retval = mtrr_del( dev->agp->agp_mtrr,
669 dev->agp->agp_info.aper_base,
670 dev->agp->agp_info.aper_size*1024*1024 );
671 DRM_DEBUG( "mtrr_del=%d\n", retval );
675 DRM(takedown)( dev );
677 #if __REALLY_HAVE_AGP
680 DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
685 DRIVER_POSTCLEANUP();
691 module_init( drm_init );
692 module_exit( drm_cleanup );
695 int DRM(version)( struct inode *inode, struct file *filp,
696 unsigned int cmd, unsigned long arg )
698 drm_version_t version;
701 if ( copy_from_user( &version,
702 (drm_version_t *)arg,
706 #define DRM_COPY( name, value ) \
707 len = strlen( value ); \
708 if ( len > name##_len ) len = name##_len; \
709 name##_len = strlen( value ); \
710 if ( len && name ) { \
711 if ( copy_to_user( name, value, len ) ) \
715 version.version_major = DRIVER_MAJOR;
716 version.version_minor = DRIVER_MINOR;
717 version.version_patchlevel = DRIVER_PATCHLEVEL;
719 DRM_COPY( version.name, DRIVER_NAME );
720 DRM_COPY( version.date, DRIVER_DATE );
721 DRM_COPY( version.desc, DRIVER_DESC );
723 if ( copy_to_user( (drm_version_t *)arg,
730 int DRM(open)( struct inode *inode, struct file *filp )
732 drm_device_t *dev = NULL;
736 for (i = 0; i < DRM(numdevs); i++) {
737 if (minor(inode->i_rdev) == DRM(minor)[i]) {
738 dev = &(DRM(device)[i]);
746 retcode = DRM(open_helper)( inode, filp, dev );
748 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
749 spin_lock( &dev->count_lock );
750 if ( !dev->open_count++ ) {
751 spin_unlock( &dev->count_lock );
752 return DRM(setup)( dev );
754 spin_unlock( &dev->count_lock );
760 int DRM(release)( struct inode *inode, struct file *filp )
762 drm_file_t *priv = filp->private_data;
769 DRM_DEBUG( "open_count = %d\n", dev->open_count );
773 /* ========================================================
774 * Begin inline drm_release
777 DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
778 current->pid, (long)dev->device, dev->open_count );
780 if ( priv->lock_count && dev->lock.hw_lock &&
781 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
782 dev->lock.filp == filp ) {
783 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
785 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
789 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
790 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
792 /* FIXME: may require heavy-handed reset of
793 hardware at this point, possibly
794 processed via a callback to the X
798 else if ( priv->lock_count && dev->lock.hw_lock ) {
799 /* The lock is required to reclaim buffers */
800 DECLARE_WAITQUEUE( entry, current );
802 add_wait_queue( &dev->lock.lock_queue, &entry );
804 current->state = TASK_INTERRUPTIBLE;
805 if ( !dev->lock.hw_lock ) {
806 /* Device has been unregistered */
810 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
811 DRM_KERNEL_CONTEXT ) ) {
812 dev->lock.filp = filp;
813 dev->lock.lock_time = jiffies;
814 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
815 break; /* Got lock */
819 if ( signal_pending( current ) ) {
820 retcode = -ERESTARTSYS;
824 current->state = TASK_RUNNING;
825 remove_wait_queue( &dev->lock.lock_queue, &entry );
828 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
829 DRM_KERNEL_CONTEXT );
833 DRM(reclaim_buffers)( filp );
836 DRM(fasync)( -1, filp, 0 );
838 down( &dev->struct_sem );
839 if ( priv->remove_auth_on_close == 1 ) {
840 drm_file_t *temp = dev->file_first;
842 temp->authenticated = 0;
847 priv->prev->next = priv->next;
849 dev->file_first = priv->next;
852 priv->next->prev = priv->prev;
854 dev->file_last = priv->prev;
856 up( &dev->struct_sem );
858 DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
860 /* ========================================================
861 * End inline drm_release
864 atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
865 spin_lock( &dev->count_lock );
866 if ( !--dev->open_count ) {
867 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
868 DRM_ERROR( "Device busy: %d %d\n",
869 atomic_read( &dev->ioctl_count ),
871 spin_unlock( &dev->count_lock );
875 spin_unlock( &dev->count_lock );
877 return DRM(takedown)( dev );
879 spin_unlock( &dev->count_lock );
886 /* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
888 int DRM(ioctl)( struct inode *inode, struct file *filp,
889 unsigned int cmd, unsigned long arg )
891 drm_file_t *priv = filp->private_data;
892 drm_device_t *dev = priv->dev;
893 drm_ioctl_desc_t *ioctl;
895 int nr = DRM_IOCTL_NR(cmd);
898 atomic_inc( &dev->ioctl_count );
899 atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
902 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
903 current->pid, cmd, nr, (long)dev->device,
904 priv->authenticated );
906 if ( nr >= DRIVER_IOCTL_COUNT ) {
909 ioctl = &DRM(ioctls)[nr];
913 DRM_DEBUG( "no function\n" );
915 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
916 ( ioctl->auth_needed && !priv->authenticated ) ) {
919 retcode = func( inode, filp, cmd, arg );
923 atomic_dec( &dev->ioctl_count );
927 int DRM(lock)( struct inode *inode, struct file *filp,
928 unsigned int cmd, unsigned long arg )
930 drm_file_t *priv = filp->private_data;
931 drm_device_t *dev = priv->dev;
932 DECLARE_WAITQUEUE( entry, current );
935 #if __HAVE_MULTIPLE_DMA_QUEUES
941 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
944 if ( lock.context == DRM_KERNEL_CONTEXT ) {
945 DRM_ERROR( "Process %d using kernel context %d\n",
946 current->pid, lock.context );
950 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
951 lock.context, current->pid,
952 dev->lock.hw_lock->lock, lock.flags );
955 if ( lock.context < 0 )
957 #elif __HAVE_MULTIPLE_DMA_QUEUES
958 if ( lock.context < 0 || lock.context >= dev->queue_count )
960 q = dev->queuelist[lock.context];
964 ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
967 add_wait_queue( &dev->lock.lock_queue, &entry );
969 current->state = TASK_INTERRUPTIBLE;
970 if ( !dev->lock.hw_lock ) {
971 /* Device has been unregistered */
975 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
977 dev->lock.filp = filp;
978 dev->lock.lock_time = jiffies;
979 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
980 break; /* Got lock */
985 if ( signal_pending( current ) ) {
990 current->state = TASK_RUNNING;
991 remove_wait_queue( &dev->lock.lock_queue, &entry );
995 DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
999 sigemptyset( &dev->sigmask );
1000 sigaddset( &dev->sigmask, SIGSTOP );
1001 sigaddset( &dev->sigmask, SIGTSTP );
1002 sigaddset( &dev->sigmask, SIGTTIN );
1003 sigaddset( &dev->sigmask, SIGTTOU );
1004 dev->sigdata.context = lock.context;
1005 dev->sigdata.lock = dev->lock.hw_lock;
1006 block_all_signals( DRM(notifier),
1007 &dev->sigdata, &dev->sigmask );
1009 #if __HAVE_DMA_READY
1010 if ( lock.flags & _DRM_LOCK_READY ) {
1014 #if __HAVE_DMA_QUIESCENT
1015 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1016 DRIVER_DMA_QUIESCENT();
1019 #if __HAVE_KERNEL_CTX_SWITCH
1020 if ( dev->last_context != lock.context ) {
1021 DRM(context_switch)(dev, dev->last_context,
1027 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1033 int DRM(unlock)( struct inode *inode, struct file *filp,
1034 unsigned int cmd, unsigned long arg )
1036 drm_file_t *priv = filp->private_data;
1037 drm_device_t *dev = priv->dev;
1040 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1043 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1044 DRM_ERROR( "Process %d using kernel context %d\n",
1045 current->pid, lock.context );
1049 atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1051 #if __HAVE_KERNEL_CTX_SWITCH
1052 /* We no longer really hold it, but if we are the next
1053 * agent to request it then we should just be able to
1054 * take it immediately and not eat the ioctl.
1058 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1059 unsigned int old, new, prev, ctx;
1065 prev = cmpxchg(plock, old, new);
1066 } while (prev != old);
1068 wake_up_interruptible(&dev->lock.lock_queue);
1070 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1071 DRM_KERNEL_CONTEXT );
1072 #if __HAVE_DMA_SCHEDULE
1073 DRM(dma_schedule)( dev, 1 );
1076 /* FIXME: Do we ever really need to check this???
1078 if ( 1 /* !dev->context_flag */ ) {
1079 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1080 DRM_KERNEL_CONTEXT ) ) {
1084 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1086 unblock_all_signals();