Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / fbfront / xenfb.c
1 /*
2  * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3  *
4  * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5  * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6  *
7  *  Based on linux/drivers/video/q40fb.c
8  *
9  *  This file is subject to the terms and conditions of the GNU General Public
10  *  License. See the file COPYING in the main directory of this archive for
11  *  more details.
12  */
13
14 /*
15  * TODO:
16  *
17  * Switch to grant tables when they become capable of dealing with the
18  * frame buffer.
19  */
20
21 #include <linux/console.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/fb.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <linux/mutex.h>
30 #include <linux/freezer.h>
31 #include <asm/hypervisor.h>
32 #include <xen/evtchn.h>
33 #include <xen/interface/io/fbif.h>
34 #include <xen/interface/io/protocols.h>
35 #include <xen/xenbus.h>
36 #include <linux/kthread.h>
37
38 struct xenfb_mapping
39 {
40         struct list_head        link;
41         struct vm_area_struct   *vma;
42         atomic_t                map_refs;
43         int                     faults;
44         struct xenfb_info       *info;
45 };
46
47 struct xenfb_info
48 {
49         struct task_struct      *kthread;
50         wait_queue_head_t       wq;
51
52         unsigned char           *fb;
53         struct fb_info          *fb_info;
54         struct timer_list       refresh;
55         int                     dirty;
56         int                     x1, y1, x2, y2; /* dirty rectangle,
57                                                    protected by dirty_lock */
58         spinlock_t              dirty_lock;
59         struct mutex            mm_lock;
60         int                     nr_pages;
61         struct page             **pages;
62         struct list_head        mappings; /* protected by mm_lock */
63
64         int                     irq;
65         struct xenfb_page       *page;
66         unsigned long           *mfns;
67         int                     feature_resize; /* Backend has resize feature */
68         struct xenfb_resize     resize;
69         int                     resize_dpy;
70         spinlock_t              resize_lock;
71
72         struct xenbus_device    *xbdev;
73 };
74
75 /*
76  * There are three locks:
77  *    spinlock resize_lock protecting resize_dpy and resize
78  *    spinlock dirty_lock protecting the dirty rectangle
79  *    mutex mm_lock protecting mappings.
80  *
81  * How the dirty and mapping locks work together
82  *
83  * The problem is that dirty rectangle and mappings aren't
84  * independent: the dirty rectangle must cover all faulted pages in
85  * mappings.  We need to prove that our locking maintains this
86  * invariant.
87  *
88  * There are several kinds of critical regions:
89  *
90  * 1. Holding only dirty_lock: xenfb_refresh().  May run in
91  *    interrupts.  Extends the dirty rectangle.  Trivially preserves
92  *    invariant.
93  *
94  * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close().  Touch
95  *    only mappings.  The former creates unfaulted pages.  Preserves
96  *    invariant.  The latter removes pages.  Preserves invariant.
97  *
98  * 3. Holding both locks: xenfb_vm_fault().  Extends the dirty
99  *    rectangle and updates mappings consistently.  Preserves
100  *    invariant.
101  *
102  * 4. The ugliest one: xenfb_update_screen().  Clear the dirty
103  *    rectangle and update mappings consistently.
104  *
105  *    We can't simply hold both locks, because zap_page_range() cannot
106  *    be called with a spinlock held.
107  *
108  *    Therefore, we first clear the dirty rectangle with both locks
109  *    held.  Then we unlock dirty_lock and update the mappings.
110  *    Critical regions that hold only dirty_lock may interfere with
111  *    that.  This can only be region 1: xenfb_refresh().  But that
112  *    just extends the dirty rectangle, which can't harm the
113  *    invariant.
114  *
115  * But FIXME: the invariant is too weak.  It misses that the fault
116  * record in mappings must be consistent with the mapping of pages in
117  * the associated address space!  __do_fault() updates the PTE after
118  * xenfb_vm_fault() returns, i.e. outside the critical region.  This
119  * allows the following race:
120  *
121  * X writes to some address in the Xen frame buffer
122  * Fault - call __do_fault()
123  *     call xenfb_vm_fault()
124  *         grab mm_lock
125  *         map->faults++;
126  *         release mm_lock
127  *     return back to do_no_page()
128  * (preempted, or SMP)
129  * Xen worker thread runs.
130  *      grab mm_lock
131  *      look at mappings
132  *          find this mapping, zaps its pages (but page not in pte yet)
133  *          clear map->faults
134  *      releases mm_lock
135  * (back to X process)
136  *     put page in X's pte
137  *
138  * Oh well, we wont be updating the writes to this page anytime soon.
139  */
140 #define MB_ (1024*1024)
141 #define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
142
143 enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
144 static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
145 module_param_array(video, int, NULL, 0);
146 MODULE_PARM_DESC(video,
147                 "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
148
149 static int xenfb_fps = 20;
150
151 static int xenfb_remove(struct xenbus_device *);
152 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
153 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
154 static void xenfb_disconnect_backend(struct xenfb_info *);
155
156 static void xenfb_send_event(struct xenfb_info *info,
157                 union xenfb_out_event *event)
158 {
159         __u32 prod;
160
161         prod = info->page->out_prod;
162         /* caller ensures !xenfb_queue_full() */
163         mb();                   /* ensure ring space available */
164         XENFB_OUT_RING_REF(info->page, prod) = *event;
165         wmb();                  /* ensure ring contents visible */
166         info->page->out_prod = prod + 1;
167
168         notify_remote_via_irq(info->irq);
169 }
170
171 static void xenfb_do_update(struct xenfb_info *info,
172                             int x, int y, int w, int h)
173 {
174         union xenfb_out_event event;
175
176         memset(&event, 0, sizeof(event));
177         event.type = XENFB_TYPE_UPDATE;
178         event.update.x = x;
179         event.update.y = y;
180         event.update.width = w;
181         event.update.height = h;
182
183         /* caller ensures !xenfb_queue_full() */
184         xenfb_send_event(info, &event);
185 }
186
187 static void xenfb_do_resize(struct xenfb_info *info)
188 {
189         union xenfb_out_event event;
190
191         memset(&event, 0, sizeof(event));
192         event.resize = info->resize;
193
194         /* caller ensures !xenfb_queue_full() */
195         xenfb_send_event(info, &event);
196 }
197
198 static int xenfb_queue_full(struct xenfb_info *info)
199 {
200         __u32 cons, prod;
201
202         prod = info->page->out_prod;
203         cons = info->page->out_cons;
204         return prod - cons == XENFB_OUT_RING_LEN;
205 }
206
207 static void xenfb_update_screen(struct xenfb_info *info)
208 {
209         unsigned long flags;
210         int y1, y2, x1, x2;
211         struct xenfb_mapping *map;
212
213         if (xenfb_queue_full(info))
214                 return;
215
216         mutex_lock(&info->mm_lock);
217
218         spin_lock_irqsave(&info->dirty_lock, flags);
219         if (info->dirty){
220                 info->dirty = 0;
221                 y1 = info->y1;
222                 y2 = info->y2;
223                 x1 = info->x1;
224                 x2 = info->x2;
225                 info->x1 = info->y1 = INT_MAX;
226                 info->x2 = info->y2 = 0;
227         } else {
228                 spin_unlock_irqrestore(&info->dirty_lock, flags);
229                 mutex_unlock(&info->mm_lock);
230                 return;
231         }
232         spin_unlock_irqrestore(&info->dirty_lock, flags);
233
234         list_for_each_entry(map, &info->mappings, link) {
235                 if (!map->faults)
236                         continue;
237                 zap_page_range(map->vma, map->vma->vm_start,
238                                map->vma->vm_end - map->vma->vm_start, NULL);
239                 map->faults = 0;
240         }
241
242         mutex_unlock(&info->mm_lock);
243
244         if (x2 < x1 || y2 < y1) {
245                 pr_warning("xenfb_update_screen bogus rect %d %d %d %d\n",
246                            x1, x2, y1, y2);
247                 WARN_ON(1);
248         }
249         xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
250 }
251
252 static void xenfb_handle_resize_dpy(struct xenfb_info *info)
253 {
254         unsigned long flags;
255
256         spin_lock_irqsave(&info->resize_lock, flags);
257         if (info->resize_dpy) {
258                 if (!xenfb_queue_full(info)) {
259                         info->resize_dpy = 0;
260                         xenfb_do_resize(info);
261                 }
262         }
263         spin_unlock_irqrestore(&info->resize_lock, flags);
264 }
265
266 static int xenfb_thread(void *data)
267 {
268         struct xenfb_info *info = data;
269
270         while (!kthread_should_stop()) {
271                 xenfb_handle_resize_dpy(info);
272                 xenfb_update_screen(info);
273                 wait_event_interruptible(info->wq,
274                         kthread_should_stop() || info->dirty);
275                 try_to_freeze();
276         }
277         return 0;
278 }
279
280 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
281                            unsigned blue, unsigned transp,
282                            struct fb_info *info)
283 {
284         u32 v;
285
286         if (regno > info->cmap.len)
287                 return 1;
288
289         red   >>= (16 - info->var.red.length);
290         green >>= (16 - info->var.green.length);
291         blue  >>= (16 - info->var.blue.length);
292
293         v = (red << info->var.red.offset) |
294             (green << info->var.green.offset) |
295             (blue << info->var.blue.offset);
296
297         /* FIXME is this sane?  check against xxxfb_setcolreg()!  */
298         switch (info->var.bits_per_pixel) {
299         case 16:
300         case 24:
301         case 32:
302                 ((u32 *)info->pseudo_palette)[regno] = v;
303                 break;
304         }
305         
306         return 0;
307 }
308
309 static void xenfb_timer(unsigned long data)
310 {
311         struct xenfb_info *info = (struct xenfb_info *)data;
312         wake_up(&info->wq);
313 }
314
315 static void __xenfb_refresh(struct xenfb_info *info,
316                             int x1, int y1, int w, int h)
317 {
318         int y2, x2;
319
320         y2 = y1 + h;
321         x2 = x1 + w;
322
323         if (info->y1 > y1)
324                 info->y1 = y1;
325         if (info->y2 < y2)
326                 info->y2 = y2;
327         if (info->x1 > x1)
328                 info->x1 = x1;
329         if (info->x2 < x2)
330                 info->x2 = x2;
331         info->dirty = 1;
332
333         if (timer_pending(&info->refresh))
334                 return;
335
336         mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
337 }
338
339 static void xenfb_refresh(struct xenfb_info *info,
340                           int x1, int y1, int w, int h)
341 {
342         unsigned long flags;
343
344         spin_lock_irqsave(&info->dirty_lock, flags);
345         __xenfb_refresh(info, x1, y1, w, h);
346         spin_unlock_irqrestore(&info->dirty_lock, flags);
347 }
348
349 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
350 {
351         struct xenfb_info *info = p->par;
352
353         cfb_fillrect(p, rect);
354         xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
355 }
356
357 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
358 {
359         struct xenfb_info *info = p->par;
360
361         cfb_imageblit(p, image);
362         xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
363 }
364
365 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
366 {
367         struct xenfb_info *info = p->par;
368
369         cfb_copyarea(p, area);
370         xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
371 }
372
373 static void xenfb_vm_open(struct vm_area_struct *vma)
374 {
375         struct xenfb_mapping *map = vma->vm_private_data;
376         atomic_inc(&map->map_refs);
377 }
378
379 static void xenfb_vm_close(struct vm_area_struct *vma)
380 {
381         struct xenfb_mapping *map = vma->vm_private_data;
382         struct xenfb_info *info = map->info;
383
384         mutex_lock(&info->mm_lock);
385         if (atomic_dec_and_test(&map->map_refs)) {
386                 list_del(&map->link);
387                 kfree(map);
388         }
389         mutex_unlock(&info->mm_lock);
390 }
391
392 static int xenfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
393 {
394         struct xenfb_mapping *map = vma->vm_private_data;
395         struct xenfb_info *info = map->info;
396         int pgnr = ((long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
397         unsigned long flags;
398         struct page *page;
399         int y1, y2;
400
401         if (pgnr >= info->nr_pages)
402                 return VM_FAULT_SIGBUS;
403
404         mutex_lock(&info->mm_lock);
405         spin_lock_irqsave(&info->dirty_lock, flags);
406         page = info->pages[pgnr];
407         get_page(page);
408         map->faults++;
409
410         y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
411         y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
412         if (y2 > info->fb_info->var.yres)
413                 y2 = info->fb_info->var.yres;
414         __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
415         spin_unlock_irqrestore(&info->dirty_lock, flags);
416         mutex_unlock(&info->mm_lock);
417
418         vmf->page = page;
419
420         return VM_FAULT_MINOR;
421 }
422
423 static struct vm_operations_struct xenfb_vm_ops = {
424         .open   = xenfb_vm_open,
425         .close  = xenfb_vm_close,
426         .fault  = xenfb_vm_fault,
427 };
428
429 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
430 {
431         struct xenfb_info *info = fb_info->par;
432         struct xenfb_mapping *map;
433         int map_pages;
434
435         if (!(vma->vm_flags & VM_WRITE))
436                 return -EINVAL;
437         if (!(vma->vm_flags & VM_SHARED))
438                 return -EINVAL;
439         if (vma->vm_pgoff != 0)
440                 return -EINVAL;
441
442         map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
443         if (map_pages > info->nr_pages)
444                 return -EINVAL;
445
446         map = kzalloc(sizeof(*map), GFP_KERNEL);
447         if (map == NULL)
448                 return -ENOMEM;
449
450         map->vma = vma;
451         map->faults = 0;
452         map->info = info;
453         atomic_set(&map->map_refs, 1);
454
455         mutex_lock(&info->mm_lock);
456         list_add(&map->link, &info->mappings);
457         mutex_unlock(&info->mm_lock);
458
459         vma->vm_ops = &xenfb_vm_ops;
460         vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
461         vma->vm_private_data = map;
462
463         return 0;
464 }
465
466 static int
467 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
468 {
469         struct xenfb_info *xenfb_info;
470         int required_mem_len;
471
472         xenfb_info = info->par;
473
474         if (!xenfb_info->feature_resize) {
475                 if (var->xres == video[KPARAM_WIDTH] &&
476                         var->yres == video[KPARAM_HEIGHT] &&
477                         var->bits_per_pixel == xenfb_info->page->depth) {
478                         return 0;
479                 }
480                 return -EINVAL;
481         }
482
483         /* Can't resize past initial width and height */
484         if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
485                 return -EINVAL;
486
487         required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
488         if (var->bits_per_pixel == xenfb_info->page->depth &&
489                 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
490                 required_mem_len <= info->fix.smem_len) {
491                 var->xres_virtual = var->xres;
492                 var->yres_virtual = var->yres;
493                 return 0;
494         }
495         return -EINVAL;
496 }
497
498 static int xenfb_set_par(struct fb_info *info)
499 {
500         struct xenfb_info *xenfb_info;
501         unsigned long flags;
502
503         xenfb_info = info->par;
504
505         spin_lock_irqsave(&xenfb_info->resize_lock, flags);
506         xenfb_info->resize.type = XENFB_TYPE_RESIZE;
507         xenfb_info->resize.width = info->var.xres;
508         xenfb_info->resize.height = info->var.yres;
509         xenfb_info->resize.stride = info->fix.line_length;
510         xenfb_info->resize.depth = info->var.bits_per_pixel;
511         xenfb_info->resize.offset = 0;
512         xenfb_info->resize_dpy = 1;
513         spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
514         return 0;
515 }
516
517 static struct fb_ops xenfb_fb_ops = {
518         .owner          = THIS_MODULE,
519         .fb_setcolreg   = xenfb_setcolreg,
520         .fb_fillrect    = xenfb_fillrect,
521         .fb_copyarea    = xenfb_copyarea,
522         .fb_imageblit   = xenfb_imageblit,
523         .fb_mmap        = xenfb_mmap,
524         .fb_check_var   = xenfb_check_var,
525         .fb_set_par     = xenfb_set_par,
526 };
527
528 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
529 {
530         /*
531          * No in events recognized, simply ignore them all.
532          * If you need to recognize some, see xenbkd's input_handler()
533          * for how to do that.
534          */
535         struct xenfb_info *info = dev_id;
536         struct xenfb_page *page = info->page;
537
538         if (page->in_cons != page->in_prod) {
539                 info->page->in_cons = info->page->in_prod;
540                 notify_remote_via_irq(info->irq);
541         }
542         return IRQ_HANDLED;
543 }
544
545 static unsigned long vmalloc_to_mfn(void *address)
546 {
547         return pfn_to_mfn(vmalloc_to_pfn(address));
548 }
549
550 static __devinit void
551 xenfb_make_preferred_console(void)
552 {
553         struct console *c;
554
555         if (console_set_on_cmdline)
556                 return;
557
558         console_lock();
559         for_each_console(c) {
560                 if (!strcmp(c->name, "tty") && c->index == 0)
561                         break;
562         }
563         console_unlock();
564         if (c) {
565                 unregister_console(c);
566                 c->flags |= CON_CONSDEV;
567                 c->flags &= ~CON_PRINTBUFFER; /* don't print again */
568                 register_console(c);
569         }
570 }
571
572 static int __devinit xenfb_probe(struct xenbus_device *dev,
573                                  const struct xenbus_device_id *id)
574 {
575         struct xenfb_info *info;
576         struct fb_info *fb_info;
577         int fb_size;
578         int val;
579         int ret;
580
581         info = kzalloc(sizeof(*info), GFP_KERNEL);
582         if (info == NULL) {
583                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
584                 return -ENOMEM;
585         }
586
587         /* Limit kernel param videoram amount to what is in xenstore */
588         if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
589                 if (val < video[KPARAM_MEM])
590                         video[KPARAM_MEM] = val;
591         }
592
593         /* If requested res does not fit in available memory, use default */
594         fb_size = video[KPARAM_MEM] * MB_;
595         if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
596                 video[KPARAM_WIDTH] = XENFB_WIDTH;
597                 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
598                 fb_size = XENFB_DEFAULT_FB_LEN;
599         }
600
601         dev_set_drvdata(&dev->dev, info);
602         info->xbdev = dev;
603         info->irq = -1;
604         info->x1 = info->y1 = INT_MAX;
605         spin_lock_init(&info->dirty_lock);
606         spin_lock_init(&info->resize_lock);
607         mutex_init(&info->mm_lock);
608         init_waitqueue_head(&info->wq);
609         init_timer(&info->refresh);
610         info->refresh.function = xenfb_timer;
611         info->refresh.data = (unsigned long)info;
612         INIT_LIST_HEAD(&info->mappings);
613
614         info->fb = vzalloc(fb_size);
615         if (info->fb == NULL)
616                 goto error_nomem;
617
618         info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
619
620         info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
621                               GFP_KERNEL);
622         if (info->pages == NULL)
623                 goto error_nomem;
624
625         info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
626         if (!info->mfns)
627                 goto error_nomem;
628
629         /* set up shared page */
630         info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
631         if (!info->page)
632                 goto error_nomem;
633
634         fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
635                                 /* see fishy hackery below */
636         if (fb_info == NULL)
637                 goto error_nomem;
638
639         /* FIXME fishy hackery */
640         fb_info->pseudo_palette = fb_info->par;
641         fb_info->par = info;
642         /* /FIXME */
643         fb_info->screen_base = info->fb;
644
645         fb_info->fbops = &xenfb_fb_ops;
646         fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
647         fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
648         fb_info->var.bits_per_pixel = XENFB_DEPTH;
649
650         fb_info->var.red = (struct fb_bitfield){16, 8, 0};
651         fb_info->var.green = (struct fb_bitfield){8, 8, 0};
652         fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
653
654         fb_info->var.activate = FB_ACTIVATE_NOW;
655         fb_info->var.height = -1;
656         fb_info->var.width = -1;
657         fb_info->var.vmode = FB_VMODE_NONINTERLACED;
658
659         fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
660         fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
661         fb_info->fix.smem_start = 0;
662         fb_info->fix.smem_len = fb_size;
663         strcpy(fb_info->fix.id, "xen");
664         fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
665         fb_info->fix.accel = FB_ACCEL_NONE;
666
667         fb_info->flags = FBINFO_FLAG_DEFAULT;
668
669         ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
670         if (ret < 0) {
671                 framebuffer_release(fb_info);
672                 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
673                 goto error;
674         }
675
676         xenfb_init_shared_page(info, fb_info);
677
678         ret = register_framebuffer(fb_info);
679         if (ret) {
680                 fb_dealloc_cmap(&info->fb_info->cmap);
681                 framebuffer_release(fb_info);
682                 xenbus_dev_fatal(dev, ret, "register_framebuffer");
683                 goto error;
684         }
685         info->fb_info = fb_info;
686
687         ret = xenfb_connect_backend(dev, info);
688         if (ret < 0)
689                 goto error;
690
691         xenfb_make_preferred_console();
692         return 0;
693
694  error_nomem:
695         ret = -ENOMEM;
696         xenbus_dev_fatal(dev, ret, "allocating device memory");
697  error:
698         xenfb_remove(dev);
699         return ret;
700 }
701
702 static int xenfb_resume(struct xenbus_device *dev)
703 {
704         struct xenfb_info *info = dev_get_drvdata(&dev->dev);
705
706         xenfb_disconnect_backend(info);
707         xenfb_init_shared_page(info, info->fb_info);
708         return xenfb_connect_backend(dev, info);
709 }
710
711 static int xenfb_remove(struct xenbus_device *dev)
712 {
713         struct xenfb_info *info = dev_get_drvdata(&dev->dev);
714
715         del_timer(&info->refresh);
716         if (info->kthread)
717                 kthread_stop(info->kthread);
718         xenfb_disconnect_backend(info);
719         if (info->fb_info) {
720                 unregister_framebuffer(info->fb_info);
721                 fb_dealloc_cmap(&info->fb_info->cmap);
722                 framebuffer_release(info->fb_info);
723         }
724         free_page((unsigned long)info->page);
725         vfree(info->mfns);
726         kfree(info->pages);
727         vfree(info->fb);
728         kfree(info);
729
730         return 0;
731 }
732
733 static void xenfb_init_shared_page(struct xenfb_info *info,
734                                    struct fb_info * fb_info)
735 {
736         int i;
737         int epd = PAGE_SIZE / sizeof(info->mfns[0]);
738
739         for (i = 0; i < info->nr_pages; i++)
740                 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
741
742         for (i = 0; i < info->nr_pages; i++)
743                 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
744
745         for (i = 0; i * epd < info->nr_pages; i++)
746                 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
747
748         info->page->width = fb_info->var.xres;
749         info->page->height = fb_info->var.yres;
750         info->page->depth = fb_info->var.bits_per_pixel;
751         info->page->line_length = fb_info->fix.line_length;
752         info->page->mem_length = fb_info->fix.smem_len;
753         info->page->in_cons = info->page->in_prod = 0;
754         info->page->out_cons = info->page->out_prod = 0;
755 }
756
757 static int xenfb_connect_backend(struct xenbus_device *dev,
758                                  struct xenfb_info *info)
759 {
760         int ret, irq;
761         struct xenbus_transaction xbt;
762
763         irq = bind_listening_port_to_irqhandler(
764                 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
765         if (irq < 0) {
766                 xenbus_dev_fatal(dev, irq,
767                                  "bind_listening_port_to_irqhandler");
768                 return irq;
769         }
770
771  again:
772         ret = xenbus_transaction_start(&xbt);
773         if (ret) {
774                 xenbus_dev_fatal(dev, ret, "starting transaction");
775                 goto unbind_irq;
776         }
777         ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
778                             virt_to_mfn(info->page));
779         if (ret)
780                 goto error_xenbus;
781         ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
782                             irq_to_evtchn_port(irq));
783         if (ret)
784                 goto error_xenbus;
785         ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
786                             XEN_IO_PROTO_ABI_NATIVE);
787         if (ret)
788                 goto error_xenbus;
789         ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
790         if (ret)
791                 goto error_xenbus;
792         ret = xenbus_transaction_end(xbt, 0);
793         if (ret) {
794                 if (ret == -EAGAIN)
795                         goto again;
796                 xenbus_dev_fatal(dev, ret, "completing transaction");
797                 goto unbind_irq;
798         }
799
800         info->irq = irq;
801         xenbus_switch_state(dev, XenbusStateInitialised);
802         return 0;
803
804  error_xenbus:
805         xenbus_transaction_end(xbt, 1);
806         xenbus_dev_fatal(dev, ret, "writing xenstore");
807  unbind_irq:
808         unbind_from_irqhandler(irq, info);
809         return ret;
810 }
811
812 static void xenfb_disconnect_backend(struct xenfb_info *info)
813 {
814         if (info->irq >= 0)
815                 unbind_from_irqhandler(info->irq, info);
816         info->irq = -1;
817 }
818
819 static void xenfb_backend_changed(struct xenbus_device *dev,
820                                   enum xenbus_state backend_state)
821 {
822         struct xenfb_info *info = dev_get_drvdata(&dev->dev);
823         int val;
824
825         switch (backend_state) {
826         case XenbusStateInitialising:
827         case XenbusStateInitialised:
828         case XenbusStateReconfiguring:
829         case XenbusStateReconfigured:
830         case XenbusStateUnknown:
831         case XenbusStateClosed:
832                 break;
833
834         case XenbusStateInitWait:
835         InitWait:
836                 xenbus_switch_state(dev, XenbusStateConnected);
837                 break;
838
839         case XenbusStateConnected:
840                 /*
841                  * Work around xenbus race condition: If backend goes
842                  * through InitWait to Connected fast enough, we can
843                  * get Connected twice here.
844                  */
845                 if (dev->state != XenbusStateConnected)
846                         goto InitWait; /* no InitWait seen yet, fudge it */
847
848
849                 if (xenbus_scanf(XBT_NIL, dev->otherend,
850                                         "feature-resize", "%d", &val) < 0)
851                         val = 0;
852                 info->feature_resize = val;
853
854                 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
855                                  "request-update", "%d", &val) < 0)
856                         val = 0;
857
858                 if (val && !info->kthread) {
859                         info->kthread = kthread_run(xenfb_thread, info,
860                                                     "xenfb thread");
861                         if (IS_ERR(info->kthread)) {
862                                 info->kthread = NULL;
863                                 xenbus_dev_fatal(dev, PTR_ERR(info->kthread),
864                                                 "xenfb_thread");
865                         }
866                 }
867                 break;
868
869         case XenbusStateClosing:
870                 // FIXME is this safe in any dev->state?
871                 xenbus_frontend_closed(dev);
872                 break;
873         }
874 }
875
876 static const struct xenbus_device_id xenfb_ids[] = {
877         { "vfb" },
878         { "" }
879 };
880 MODULE_ALIAS("xen:vfb");
881
882 static DEFINE_XENBUS_DRIVER(xenfb, ,
883         .probe = xenfb_probe,
884         .remove = xenfb_remove,
885         .resume = xenfb_resume,
886         .otherend_changed = xenfb_backend_changed,
887 );
888
889 static int __init xenfb_init(void)
890 {
891         if (!is_running_on_xen())
892                 return -ENODEV;
893
894         /* Nothing to do if running in dom0. */
895         if (is_initial_xendomain())
896                 return -ENODEV;
897
898         return xenbus_register_frontend(&xenfb_driver);
899 }
900
901 static void __exit xenfb_cleanup(void)
902 {
903         return xenbus_unregister_driver(&xenfb_driver);
904 }
905
906 module_init(xenfb_init);
907 module_exit(xenfb_cleanup);
908
909 MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
910 MODULE_LICENSE("GPL");