- Updated to 2.6.22-rc2-git7:
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / fbfront / xenfb.c
1 /*
2  * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3  *
4  * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5  * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6  *
7  *  Based on linux/drivers/video/q40fb.c
8  *
9  *  This file is subject to the terms and conditions of the GNU General Public
10  *  License. See the file COPYING in the main directory of this archive for
11  *  more details.
12  */
13
14 /*
15  * TODO:
16  *
17  * Switch to grant tables when they become capable of dealing with the
18  * frame buffer.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/freezer.h>
29 #include <asm/hypervisor.h>
30 #include <xen/evtchn.h>
31 #include <xen/interface/io/fbif.h>
32 #include <xen/interface/io/protocols.h>
33 #include <xen/xenbus.h>
34 #include <linux/kthread.h>
35
36 struct xenfb_mapping
37 {
38         struct list_head        link;
39         struct vm_area_struct   *vma;
40         atomic_t                map_refs;
41         int                     faults;
42         struct xenfb_info       *info;
43 };
44
45 struct xenfb_info
46 {
47         struct task_struct      *kthread;
48         wait_queue_head_t       wq;
49
50         unsigned char           *fb;
51         struct fb_info          *fb_info;
52         struct timer_list       refresh;
53         int                     dirty;
54         int                     x1, y1, x2, y2; /* dirty rectangle,
55                                                    protected by dirty_lock */
56         spinlock_t              dirty_lock;
57         struct mutex            mm_lock;
58         int                     nr_pages;
59         struct page             **pages;
60         struct list_head        mappings; /* protected by mm_lock */
61
62         int                     irq;
63         struct xenfb_page       *page;
64         unsigned long           *mfns;
65         int                     update_wanted; /* XENFB_TYPE_UPDATE wanted */
66
67         struct xenbus_device    *xbdev;
68 };
69
70 /*
71  * How the locks work together
72  *
73  * There are two locks: spinlock dirty_lock protecting the dirty
74  * rectangle, and mutex mm_lock protecting mappings.
75  *
76  * The problem is that dirty rectangle and mappings aren't
77  * independent: the dirty rectangle must cover all faulted pages in
78  * mappings.  We need to prove that our locking maintains this
79  * invariant.
80  *
81  * There are several kinds of critical regions:
82  *
83  * 1. Holding only dirty_lock: xenfb_refresh().  May run in
84  *    interrupts.  Extends the dirty rectangle.  Trivially preserves
85  *    invariant.
86  *
87  * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close().  Touch
88  *    only mappings.  The former creates unfaulted pages.  Preserves
89  *    invariant.  The latter removes pages.  Preserves invariant.
90  *
91  * 3. Holding both locks: xenfb_vm_nopage().  Extends the dirty
92  *    rectangle and updates mappings consistently.  Preserves
93  *    invariant.
94  *
95  * 4. The ugliest one: xenfb_update_screen().  Clear the dirty
96  *    rectangle and update mappings consistently.
97  *
98  *    We can't simply hold both locks, because zap_page_range() cannot
99  *    be called with a spinlock held.
100  *
101  *    Therefore, we first clear the dirty rectangle with both locks
102  *    held.  Then we unlock dirty_lock and update the mappings.
103  *    Critical regions that hold only dirty_lock may interfere with
104  *    that.  This can only be region 1: xenfb_refresh().  But that
105  *    just extends the dirty rectangle, which can't harm the
106  *    invariant.
107  *
108  * But FIXME: the invariant is too weak.  It misses that the fault
109  * record in mappings must be consistent with the mapping of pages in
110  * the associated address space!  do_no_page() updates the PTE after
111  * xenfb_vm_nopage() returns, i.e. outside the critical region.  This
112  * allows the following race:
113  *
114  * X writes to some address in the Xen frame buffer
115  * Fault - call do_no_page()
116  *     call xenfb_vm_nopage()
117  *         grab mm_lock
118  *         map->faults++;
119  *         release mm_lock
120  *     return back to do_no_page()
121  * (preempted, or SMP)
122  * Xen worker thread runs.
123  *      grab mm_lock
124  *      look at mappings
125  *          find this mapping, zaps its pages (but page not in pte yet)
126  *          clear map->faults
127  *      releases mm_lock
128  * (back to X process)
129  *     put page in X's pte
130  *
131  * Oh well, we wont be updating the writes to this page anytime soon.
132  */
133
134 static int xenfb_fps = 20;
135 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
136
137 static int xenfb_remove(struct xenbus_device *);
138 static void xenfb_init_shared_page(struct xenfb_info *);
139 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
140 static void xenfb_disconnect_backend(struct xenfb_info *);
141
142 static void xenfb_do_update(struct xenfb_info *info,
143                             int x, int y, int w, int h)
144 {
145         union xenfb_out_event event;
146         __u32 prod;
147
148         event.type = XENFB_TYPE_UPDATE;
149         event.update.x = x;
150         event.update.y = y;
151         event.update.width = w;
152         event.update.height = h;
153
154         prod = info->page->out_prod;
155         /* caller ensures !xenfb_queue_full() */
156         mb();                   /* ensure ring space available */
157         XENFB_OUT_RING_REF(info->page, prod) = event;
158         wmb();                  /* ensure ring contents visible */
159         info->page->out_prod = prod + 1;
160
161         notify_remote_via_irq(info->irq);
162 }
163
164 static int xenfb_queue_full(struct xenfb_info *info)
165 {
166         __u32 cons, prod;
167
168         prod = info->page->out_prod;
169         cons = info->page->out_cons;
170         return prod - cons == XENFB_OUT_RING_LEN;
171 }
172
173 static void xenfb_update_screen(struct xenfb_info *info)
174 {
175         unsigned long flags;
176         int y1, y2, x1, x2;
177         struct xenfb_mapping *map;
178
179         if (!info->update_wanted)
180                 return;
181         if (xenfb_queue_full(info))
182                 return;
183
184         mutex_lock(&info->mm_lock);
185
186         spin_lock_irqsave(&info->dirty_lock, flags);
187         y1 = info->y1;
188         y2 = info->y2;
189         x1 = info->x1;
190         x2 = info->x2;
191         info->x1 = info->y1 = INT_MAX;
192         info->x2 = info->y2 = 0;
193         spin_unlock_irqrestore(&info->dirty_lock, flags);
194
195         list_for_each_entry(map, &info->mappings, link) {
196                 if (!map->faults)
197                         continue;
198                 zap_page_range(map->vma, map->vma->vm_start,
199                                map->vma->vm_end - map->vma->vm_start, NULL);
200                 map->faults = 0;
201         }
202
203         mutex_unlock(&info->mm_lock);
204
205         xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
206 }
207
208 static int xenfb_thread(void *data)
209 {
210         struct xenfb_info *info = data;
211
212         while (!kthread_should_stop()) {
213                 if (info->dirty) {
214                         info->dirty = 0;
215                         xenfb_update_screen(info);
216                 }
217                 wait_event_interruptible(info->wq,
218                         kthread_should_stop() || info->dirty);
219                 try_to_freeze();
220         }
221         return 0;
222 }
223
224 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
225                            unsigned blue, unsigned transp,
226                            struct fb_info *info)
227 {
228         u32 v;
229
230         if (regno > info->cmap.len)
231                 return 1;
232
233         red   >>= (16 - info->var.red.length);
234         green >>= (16 - info->var.green.length);
235         blue  >>= (16 - info->var.blue.length);
236
237         v = (red << info->var.red.offset) |
238             (green << info->var.green.offset) |
239             (blue << info->var.blue.offset);
240
241         /* FIXME is this sane?  check against xxxfb_setcolreg()!  */
242         switch (info->var.bits_per_pixel) {
243         case 16:
244         case 24:
245         case 32:
246                 ((u32 *)info->pseudo_palette)[regno] = v;
247                 break;
248         }
249         
250         return 0;
251 }
252
253 static void xenfb_timer(unsigned long data)
254 {
255         struct xenfb_info *info = (struct xenfb_info *)data;
256         info->dirty = 1;
257         wake_up(&info->wq);
258 }
259
260 static void __xenfb_refresh(struct xenfb_info *info,
261                             int x1, int y1, int w, int h)
262 {
263         int y2, x2;
264
265         y2 = y1 + h;
266         x2 = x1 + w;
267
268         if (info->y1 > y1)
269                 info->y1 = y1;
270         if (info->y2 < y2)
271                 info->y2 = y2;
272         if (info->x1 > x1)
273                 info->x1 = x1;
274         if (info->x2 < x2)
275                 info->x2 = x2;
276
277         if (timer_pending(&info->refresh))
278                 return;
279
280         mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
281 }
282
283 static void xenfb_refresh(struct xenfb_info *info,
284                           int x1, int y1, int w, int h)
285 {
286         unsigned long flags;
287
288         spin_lock_irqsave(&info->dirty_lock, flags);
289         __xenfb_refresh(info, x1, y1, w, h);
290         spin_unlock_irqrestore(&info->dirty_lock, flags);
291 }
292
293 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
294 {
295         struct xenfb_info *info = p->par;
296
297         cfb_fillrect(p, rect);
298         xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
299 }
300
301 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
302 {
303         struct xenfb_info *info = p->par;
304
305         cfb_imageblit(p, image);
306         xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
307 }
308
309 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
310 {
311         struct xenfb_info *info = p->par;
312
313         cfb_copyarea(p, area);
314         xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
315 }
316
317 static void xenfb_vm_open(struct vm_area_struct *vma)
318 {
319         struct xenfb_mapping *map = vma->vm_private_data;
320         atomic_inc(&map->map_refs);
321 }
322
323 static void xenfb_vm_close(struct vm_area_struct *vma)
324 {
325         struct xenfb_mapping *map = vma->vm_private_data;
326         struct xenfb_info *info = map->info;
327
328         mutex_lock(&info->mm_lock);
329         if (atomic_dec_and_test(&map->map_refs)) {
330                 list_del(&map->link);
331                 kfree(map);
332         }
333         mutex_unlock(&info->mm_lock);
334 }
335
336 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
337                                     unsigned long vaddr, int *type)
338 {
339         struct xenfb_mapping *map = vma->vm_private_data;
340         struct xenfb_info *info = map->info;
341         int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
342         unsigned long flags;
343         struct page *page;
344         int y1, y2;
345
346         if (pgnr >= info->nr_pages)
347                 return NOPAGE_SIGBUS;
348
349         mutex_lock(&info->mm_lock);
350         spin_lock_irqsave(&info->dirty_lock, flags);
351         page = info->pages[pgnr];
352         get_page(page);
353         map->faults++;
354
355         y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
356         y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
357         if (y2 > info->fb_info->var.yres)
358                 y2 = info->fb_info->var.yres;
359         __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
360         spin_unlock_irqrestore(&info->dirty_lock, flags);
361         mutex_unlock(&info->mm_lock);
362
363         if (type)
364                 *type = VM_FAULT_MINOR;
365
366         return page;
367 }
368
369 static struct vm_operations_struct xenfb_vm_ops = {
370         .open   = xenfb_vm_open,
371         .close  = xenfb_vm_close,
372         .nopage = xenfb_vm_nopage,
373 };
374
375 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
376 {
377         struct xenfb_info *info = fb_info->par;
378         struct xenfb_mapping *map;
379         int map_pages;
380
381         if (!(vma->vm_flags & VM_WRITE))
382                 return -EINVAL;
383         if (!(vma->vm_flags & VM_SHARED))
384                 return -EINVAL;
385         if (vma->vm_pgoff != 0)
386                 return -EINVAL;
387
388         map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
389         if (map_pages > info->nr_pages)
390                 return -EINVAL;
391
392         map = kzalloc(sizeof(*map), GFP_KERNEL);
393         if (map == NULL)
394                 return -ENOMEM;
395
396         map->vma = vma;
397         map->faults = 0;
398         map->info = info;
399         atomic_set(&map->map_refs, 1);
400
401         mutex_lock(&info->mm_lock);
402         list_add(&map->link, &info->mappings);
403         mutex_unlock(&info->mm_lock);
404
405         vma->vm_ops = &xenfb_vm_ops;
406         vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
407         vma->vm_private_data = map;
408
409         return 0;
410 }
411
412 static struct fb_ops xenfb_fb_ops = {
413         .owner          = THIS_MODULE,
414         .fb_setcolreg   = xenfb_setcolreg,
415         .fb_fillrect    = xenfb_fillrect,
416         .fb_copyarea    = xenfb_copyarea,
417         .fb_imageblit   = xenfb_imageblit,
418         .fb_mmap        = xenfb_mmap,
419 };
420
421 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
422 {
423         /*
424          * No in events recognized, simply ignore them all.
425          * If you need to recognize some, see xenbkd's input_handler()
426          * for how to do that.
427          */
428         struct xenfb_info *info = dev_id;
429         struct xenfb_page *page = info->page;
430
431         if (page->in_cons != page->in_prod) {
432                 info->page->in_cons = info->page->in_prod;
433                 notify_remote_via_irq(info->irq);
434         }
435         return IRQ_HANDLED;
436 }
437
438 static unsigned long vmalloc_to_mfn(void *address)
439 {
440         return pfn_to_mfn(vmalloc_to_pfn(address));
441 }
442
443 static int __devinit xenfb_probe(struct xenbus_device *dev,
444                                  const struct xenbus_device_id *id)
445 {
446         struct xenfb_info *info;
447         struct fb_info *fb_info;
448         int ret;
449
450         info = kzalloc(sizeof(*info), GFP_KERNEL);
451         if (info == NULL) {
452                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
453                 return -ENOMEM;
454         }
455         dev->dev.driver_data = info;
456         info->xbdev = dev;
457         info->irq = -1;
458         info->x1 = info->y1 = INT_MAX;
459         spin_lock_init(&info->dirty_lock);
460         mutex_init(&info->mm_lock);
461         init_waitqueue_head(&info->wq);
462         init_timer(&info->refresh);
463         info->refresh.function = xenfb_timer;
464         info->refresh.data = (unsigned long)info;
465         INIT_LIST_HEAD(&info->mappings);
466
467         info->fb = vmalloc(xenfb_mem_len);
468         if (info->fb == NULL)
469                 goto error_nomem;
470         memset(info->fb, 0, xenfb_mem_len);
471
472         info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
473
474         info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
475                               GFP_KERNEL);
476         if (info->pages == NULL)
477                 goto error_nomem;
478
479         info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
480         if (!info->mfns)
481                 goto error_nomem;
482
483         /* set up shared page */
484         info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
485         if (!info->page)
486                 goto error_nomem;
487
488         xenfb_init_shared_page(info);
489
490         fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
491                                 /* see fishy hackery below */
492         if (fb_info == NULL)
493                 goto error_nomem;
494
495         /* FIXME fishy hackery */
496         fb_info->pseudo_palette = fb_info->par;
497         fb_info->par = info;
498         /* /FIXME */
499         fb_info->screen_base = info->fb;
500
501         fb_info->fbops = &xenfb_fb_ops;
502         fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
503         fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
504         fb_info->var.bits_per_pixel = info->page->depth;
505
506         fb_info->var.red = (struct fb_bitfield){16, 8, 0};
507         fb_info->var.green = (struct fb_bitfield){8, 8, 0};
508         fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
509
510         fb_info->var.activate = FB_ACTIVATE_NOW;
511         fb_info->var.height = -1;
512         fb_info->var.width = -1;
513         fb_info->var.vmode = FB_VMODE_NONINTERLACED;
514
515         fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
516         fb_info->fix.line_length = info->page->line_length;
517         fb_info->fix.smem_start = 0;
518         fb_info->fix.smem_len = xenfb_mem_len;
519         strcpy(fb_info->fix.id, "xen");
520         fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
521         fb_info->fix.accel = FB_ACCEL_NONE;
522
523         fb_info->flags = FBINFO_FLAG_DEFAULT;
524
525         ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
526         if (ret < 0) {
527                 framebuffer_release(fb_info);
528                 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
529                 goto error;
530         }
531
532         ret = register_framebuffer(fb_info);
533         if (ret) {
534                 fb_dealloc_cmap(&info->fb_info->cmap);
535                 framebuffer_release(fb_info);
536                 xenbus_dev_fatal(dev, ret, "register_framebuffer");
537                 goto error;
538         }
539         info->fb_info = fb_info;
540
541         /* FIXME should this be delayed until backend XenbusStateConnected? */
542         info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
543         if (IS_ERR(info->kthread)) {
544                 ret = PTR_ERR(info->kthread);
545                 info->kthread = NULL;
546                 xenbus_dev_fatal(dev, ret, "register_framebuffer");
547                 goto error;
548         }
549
550         ret = xenfb_connect_backend(dev, info);
551         if (ret < 0)
552                 goto error;
553
554         return 0;
555
556  error_nomem:
557         ret = -ENOMEM;
558         xenbus_dev_fatal(dev, ret, "allocating device memory");
559  error:
560         xenfb_remove(dev);
561         return ret;
562 }
563
564 static int xenfb_resume(struct xenbus_device *dev)
565 {
566         struct xenfb_info *info = dev->dev.driver_data;
567
568         xenfb_disconnect_backend(info);
569         xenfb_init_shared_page(info);
570         return xenfb_connect_backend(dev, info);
571 }
572
573 static int xenfb_remove(struct xenbus_device *dev)
574 {
575         struct xenfb_info *info = dev->dev.driver_data;
576
577         del_timer(&info->refresh);
578         if (info->kthread)
579                 kthread_stop(info->kthread);
580         xenfb_disconnect_backend(info);
581         if (info->fb_info) {
582                 unregister_framebuffer(info->fb_info);
583                 fb_dealloc_cmap(&info->fb_info->cmap);
584                 framebuffer_release(info->fb_info);
585         }
586         free_page((unsigned long)info->page);
587         vfree(info->mfns);
588         kfree(info->pages);
589         vfree(info->fb);
590         kfree(info);
591
592         return 0;
593 }
594
595 static void xenfb_init_shared_page(struct xenfb_info *info)
596 {
597         int i;
598
599         for (i = 0; i < info->nr_pages; i++)
600                 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
601
602         for (i = 0; i < info->nr_pages; i++)
603                 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
604
605         info->page->pd[0] = vmalloc_to_mfn(info->mfns);
606         info->page->pd[1] = 0;
607         info->page->width = XENFB_WIDTH;
608         info->page->height = XENFB_HEIGHT;
609         info->page->depth = XENFB_DEPTH;
610         info->page->line_length = (info->page->depth / 8) * info->page->width;
611         info->page->mem_length = xenfb_mem_len;
612         info->page->in_cons = info->page->in_prod = 0;
613         info->page->out_cons = info->page->out_prod = 0;
614 }
615
616 static int xenfb_connect_backend(struct xenbus_device *dev,
617                                  struct xenfb_info *info)
618 {
619         int ret;
620         struct xenbus_transaction xbt;
621
622         ret = bind_listening_port_to_irqhandler(
623                 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
624         if (ret < 0) {
625                 xenbus_dev_fatal(dev, ret,
626                                  "bind_listening_port_to_irqhandler");
627                 return ret;
628         }
629         info->irq = ret;
630
631  again:
632         ret = xenbus_transaction_start(&xbt);
633         if (ret) {
634                 xenbus_dev_fatal(dev, ret, "starting transaction");
635                 return ret;
636         }
637         ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
638                             virt_to_mfn(info->page));
639         if (ret)
640                 goto error_xenbus;
641         ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
642                             irq_to_evtchn_port(info->irq));
643         if (ret)
644                 goto error_xenbus;
645         ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
646                             XEN_IO_PROTO_ABI_NATIVE);
647         if (ret)
648                 goto error_xenbus;
649         ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
650         if (ret)
651                 goto error_xenbus;
652         ret = xenbus_transaction_end(xbt, 0);
653         if (ret) {
654                 if (ret == -EAGAIN)
655                         goto again;
656                 xenbus_dev_fatal(dev, ret, "completing transaction");
657                 return ret;
658         }
659
660         xenbus_switch_state(dev, XenbusStateInitialised);
661         return 0;
662
663  error_xenbus:
664         xenbus_transaction_end(xbt, 1);
665         xenbus_dev_fatal(dev, ret, "writing xenstore");
666         return ret;
667 }
668
669 static void xenfb_disconnect_backend(struct xenfb_info *info)
670 {
671         if (info->irq >= 0)
672                 unbind_from_irqhandler(info->irq, info);
673         info->irq = -1;
674 }
675
676 static void xenfb_backend_changed(struct xenbus_device *dev,
677                                   enum xenbus_state backend_state)
678 {
679         struct xenfb_info *info = dev->dev.driver_data;
680         int val;
681
682         switch (backend_state) {
683         case XenbusStateInitialising:
684         case XenbusStateInitialised:
685         case XenbusStateUnknown:
686         case XenbusStateClosed:
687                 break;
688
689         case XenbusStateInitWait:
690         InitWait:
691                 xenbus_switch_state(dev, XenbusStateConnected);
692                 break;
693
694         case XenbusStateConnected:
695                 /*
696                  * Work around xenbus race condition: If backend goes
697                  * through InitWait to Connected fast enough, we can
698                  * get Connected twice here.
699                  */
700                 if (dev->state != XenbusStateConnected)
701                         goto InitWait; /* no InitWait seen yet, fudge it */
702
703                 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
704                                  "request-update", "%d", &val) < 0)
705                         val = 0;
706                 if (val)
707                         info->update_wanted = 1;
708                 break;
709
710         case XenbusStateClosing:
711                 // FIXME is this safe in any dev->state?
712                 xenbus_frontend_closed(dev);
713                 break;
714         }
715 }
716
717 static struct xenbus_device_id xenfb_ids[] = {
718         { "vfb" },
719         { "" }
720 };
721
722 static struct xenbus_driver xenfb = {
723         .name = "vfb",
724         .owner = THIS_MODULE,
725         .ids = xenfb_ids,
726         .probe = xenfb_probe,
727         .remove = xenfb_remove,
728         .resume = xenfb_resume,
729         .otherend_changed = xenfb_backend_changed,
730 };
731
732 static int __init xenfb_init(void)
733 {
734         if (!is_running_on_xen())
735                 return -ENODEV;
736
737         /* Nothing to do if running in dom0. */
738         if (is_initial_xendomain())
739                 return -ENODEV;
740
741         return xenbus_register_frontend(&xenfb);
742 }
743
744 static void __exit xenfb_cleanup(void)
745 {
746         return xenbus_unregister_driver(&xenfb);
747 }
748
749 module_init(xenfb_init);
750 module_exit(xenfb_cleanup);
751
752 MODULE_LICENSE("GPL");