1 /* $Id: shmiq.c,v 1.19 2000/02/23 00:41:21 ralf Exp $
3 * shmiq.c: shared memory input queue driver
4 * written 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * We implement /dev/shmiq, /dev/qcntlN here
7 * this is different from IRIX that has shmiq as a misc
8 * streams device and the and qcntl devices as a major device.
10 * minor number 0 implements /dev/shmiq,
11 * any other number implements /dev/qcntl${minor-1}
13 * /dev/shmiq is used by the X server for two things:
15 * 1. for I_LINK()ing trough ioctl the file handle of a
18 * 2. To send STREAMS-commands to the devices with the
19 * QIO ioctl interface.
21 * I have not yet figured how to make multiple X servers share
22 * /dev/shmiq for having different servers running. So, for now
23 * I keep a kernel-global array of inodes that are pushed into
26 * /dev/qcntlN is used by the X server for two things:
28 * 1. Issuing the QIOCATTACH for mapping the shared input
29 * queue into the address space of the X server (yeah, yeah,
30 * I did not invent this interface).
32 * 2. used by select. I bet it is used for checking for events on
35 * Now the problem is that there does not seem anything that
36 * establishes a connection between /dev/shmiq and the qcntlN file. I
37 * need an strace from an X server that runs on a machine with more
38 * than one keyboard. And this is a problem since the file handles
39 * are pushed in /dev/shmiq, while the events should be dispatched to
40 * the /dev/qcntlN device.
42 * Until then, I just allow for 1 qcntl device.
47 #include <linux/miscdevice.h>
48 #include <linux/sched.h>
49 #include <linux/file.h>
50 #include <linux/interrupt.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 #include <linux/wait.h>
54 #include <linux/major.h>
55 #include <linux/smp_lock.h>
56 #include <linux/devfs_fs_kernel.h>
58 #include <asm/shmiq.h>
61 #include <asm/uaccess.h>
65 /* we are not really getting any more than a few files in the shmiq */
66 #define MAX_SHMIQ_DEVS 10
69 * One per X server running, not going to get very big.
70 * Even if we have this we now assume just 1 /dev/qcntl can be
71 * active, I need to find how this works on multi-headed machines.
73 #define MAX_SHMI_QUEUES 4
78 struct shmiqsetcpos cpos;
79 } shmiq_pushed_devices [MAX_SHMIQ_DEVS];
81 /* /dev/qcntlN attached memory regions, location and size of the event queue */
83 int opened; /* if this device has been opened */
84 void *shmiq_vaddr; /* mapping in kernel-land */
85 int tail; /* our copy of the shmiq->tail */
89 wait_queue_head_t proc_list;
90 struct fasync_struct *fasync;
91 } shmiqs [MAX_SHMI_QUEUES];
94 shmiq_push_event (struct shmqevent *e)
96 struct sharedMemoryInputQueue *s;
97 int device = 0; /* FIXME: here is the assumption /dev/shmiq == /dev/qcntl0 */
100 if (!shmiqs [device].mapped)
102 s = shmiqs [device].shmiq_vaddr;
105 if (s->tail != shmiqs [device].tail){
106 s->flags |= SHMIQ_CORRUPTED;
109 tail_next = (s->tail + 1) % (shmiqs [device].events);
111 if (tail_next == s->head){
112 s->flags |= SHMIQ_OVERFLOW;
116 e->un.time = jiffies;
117 s->events [s->tail] = *e;
118 printk ("KERNEL: dev=%d which=%d type=%d flags=%d\n",
119 e->data.device, e->data.which, e->data.type, e->data.flags);
121 shmiqs [device].tail = tail_next;
122 kill_fasync (&shmiqs [device].fasync, SIGIO, POLL_IN);
123 wake_up_interruptible (&shmiqs [device].proc_list);
127 shmiq_manage_file (struct file *filp)
131 if (!filp->f_op || !filp->f_op->ioctl)
134 for (i = 0; i < MAX_SHMIQ_DEVS; i++){
135 if (shmiq_pushed_devices [i].used)
137 if ((*filp->f_op->ioctl)(filp->f_dentry->d_inode, filp, SHMIQ_ON, i) != 0)
139 shmiq_pushed_devices [i].used = 1;
140 shmiq_pushed_devices [i].filp = filp;
141 shmiq_pushed_devices [i].cpos.x = 0;
142 shmiq_pushed_devices [i].cpos.y = 0;
149 shmiq_forget_file (unsigned long fdes)
153 if (fdes > MAX_SHMIQ_DEVS)
156 if (!shmiq_pushed_devices [fdes].used)
159 filp = shmiq_pushed_devices [fdes].filp;
161 (*filp->f_op->ioctl)(filp->f_dentry->d_inode, filp, SHMIQ_OFF, 0);
162 shmiq_pushed_devices [fdes].filp = 0;
165 shmiq_pushed_devices [fdes].used = 0;
171 shmiq_sioc (int device, int cmd, struct strioctl *s)
176 * Ok, we just return the index they are providing us
178 printk ("QIOCGETINDX: returning %d\n", *(int *)s->ic_dp);
182 struct muxioctl *mux = (struct muxioctl *) s->ic_dp;
184 printk ("Double indirect ioctl: [%d, %x\n", mux->index, mux->realcmd);
189 if (copy_from_user (&shmiq_pushed_devices [device].cpos, s->ic_dp,
190 sizeof (struct shmiqsetcpos)))
195 printk ("Unknown I_STR request for shmiq device: 0x%x\n", cmd);
200 shmiq_ioctl (struct inode *inode, struct file *f, unsigned int cmd, unsigned long arg)
203 struct strioctl sioc;
208 * They are giving us the file descriptor for one
209 * of their streams devices
217 v = shmiq_manage_file (file);
223 * Remove a device from our list of managed
227 v = shmiq_forget_file (arg);
231 v = get_sioc (&sioc, arg);
235 /* FIXME: This forces device = 0 */
236 return shmiq_sioc (0, sioc.ic_cmd, &sioc);
245 extern long sys_munmap(unsigned long addr, size_t len);
248 qcntl_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg, int minor)
251 struct vm_area_struct *vma;
256 * The address space is already mapped as a /dev/zero
257 * mapping. FIXME: check that /dev/zero is what the user
258 * had mapped before :-)
264 v = verify_area (VERIFY_READ, (void *) arg,
265 sizeof (struct shmiqreq));
268 if (copy_from_user(&req, (void *) arg, sizeof (req)))
271 * Do not allow to attach to another region if it has
272 * already been attached
274 if (shmiqs [minor].mapped) {
275 printk("SHMIQ:The thingie is already mapped\n");
279 vaddr = (unsigned long) req.user_vaddr;
280 vma = find_vma (current->mm, vaddr);
282 printk ("SHMIQ: could not find %lx the vma\n",
286 s = req.arg * sizeof (struct shmqevent) +
287 sizeof (struct sharedMemoryInputQueue);
288 v = sys_munmap (vaddr, s);
289 down(¤t->mm->mmap_sem);
290 do_munmap(current->mm, vaddr, s);
291 do_mmap(filp, vaddr, s, PROT_READ | PROT_WRITE,
292 MAP_PRIVATE|MAP_FIXED, 0);
293 up(¤t->mm->mmap_sem);
294 shmiqs[minor].events = req.arg;
295 shmiqs[minor].mapped = 1;
305 shmiq_nopage (struct vm_area_struct *vma, unsigned long address,
308 /* Do not allow for mremap to expand us */
312 static struct vm_operations_struct qcntl_mmap = {
313 nopage: shmiq_nopage, /* our magic no-page fault handler */
317 shmiq_qcntl_mmap (struct file *file, struct vm_area_struct *vma)
319 int minor = MINOR (file->f_dentry->d_inode->i_rdev), error;
321 unsigned long mem, start;
323 /* mmap is only supported on the qcntl devices */
327 if (vma->vm_pgoff != 0)
330 size = vma->vm_end - vma->vm_start;
331 start = vma->vm_start;
333 mem = (unsigned long) shmiqs [minor].shmiq_vaddr = vmalloc_uncached (size);
339 /* Prevent the swapper from considering these pages for swap and touching them */
340 vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
341 vma->vm_ops = &qcntl_mmap;
343 /* Uncache the pages */
344 vma->vm_page_prot = PAGE_USERIO;
346 error = vmap_page_range (vma->vm_start, size, mem);
348 shmiqs [minor].tail = 0;
349 /* Init the shared memory input queue */
350 memset (shmiqs [minor].shmiq_vaddr, 0, size);
357 shmiq_qcntl_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
359 int minor = MINOR (inode->i_rdev);
362 return shmiq_ioctl (inode, filp, cmd, arg);
364 return qcntl_ioctl (inode, filp, cmd, arg, minor);
368 shmiq_qcntl_poll (struct file *filp, poll_table *wait)
370 struct sharedMemoryInputQueue *s;
371 int minor = MINOR (filp->f_dentry->d_inode->i_rdev);
376 if (!shmiqs [minor].mapped)
379 poll_wait (filp, &shmiqs [minor].proc_list, wait);
380 s = shmiqs [minor].shmiq_vaddr;
381 if (s->head != s->tail)
382 return POLLIN | POLLRDNORM;
387 shmiq_qcntl_open (struct inode *inode, struct file *filp)
389 int minor = MINOR (inode->i_rdev);
395 if (minor > MAX_SHMI_QUEUES)
397 if (shmiqs [minor].opened)
401 shmiqs [minor].opened = 1;
402 shmiqs [minor].shmiq_vaddr = 0;
409 shmiq_qcntl_fasync (int fd, struct file *file, int on)
412 int minor = MINOR (file->f_dentry->d_inode->i_rdev);
414 retval = fasync_helper (fd, file, on, &shmiqs [minor].fasync);
421 shmiq_qcntl_close (struct inode *inode, struct file *filp)
423 int minor = MINOR (inode->i_rdev);
427 for (j = 0; j < MAX_SHMIQ_DEVS; j++)
428 shmiq_forget_file (j);
431 if (minor > MAX_SHMI_QUEUES)
433 if (shmiqs [minor].opened == 0)
437 shmiq_qcntl_fasync (-1, filp, 0);
438 shmiqs [minor].opened = 0;
439 shmiqs [minor].mapped = 0;
440 shmiqs [minor].events = 0;
441 shmiqs [minor].fasync = 0;
442 vfree (shmiqs [minor].shmiq_vaddr);
443 shmiqs [minor].shmiq_vaddr = 0;
450 static struct file_operations shmiq_fops =
452 poll: shmiq_qcntl_poll,
453 ioctl: shmiq_qcntl_ioctl,
454 mmap: shmiq_qcntl_mmap,
455 open: shmiq_qcntl_open,
456 release: shmiq_qcntl_close,
457 fasync: shmiq_qcntl_fasync,
463 printk ("SHMIQ setup\n");
464 devfs_register_chrdev(SHMIQ_MAJOR, "shmiq", &shmiq_fops);
465 devfs_register (NULL, "shmiq", DEVFS_FL_DEFAULT,
466 SHMIQ_MAJOR, 0, S_IFCHR | S_IRUSR | S_IWUSR,
468 devfs_register_series (NULL, "qcntl%u", 2, DEVFS_FL_DEFAULT,
470 S_IFCHR | S_IRUSR | S_IWUSR,