- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / char / tpm / tpm_vtpm.c
1 /*
2  * Copyright (C) 2006 IBM Corporation
3  *
4  * Authors:
5  * Stefan Berger <stefanb@us.ibm.com>
6  *
7  * Generic device driver part for device drivers in a virtualized
8  * environment.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation, version 2 of the
13  * License.
14  *
15  */
16
17 #include <asm/uaccess.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include "tpm.h"
24 #include "tpm_vtpm.h"
25
26 /* read status bits */
27 enum {
28         STATUS_BUSY = 0x01,
29         STATUS_DATA_AVAIL = 0x02,
30         STATUS_READY = 0x04
31 };
32
33 struct transmission {
34         struct list_head next;
35
36         unsigned char *request;
37         size_t  request_len;
38         size_t  request_buflen;
39
40         unsigned char *response;
41         size_t  response_len;
42         size_t  response_buflen;
43
44         unsigned int flags;
45 };
46
47 enum {
48         TRANSMISSION_FLAG_WAS_QUEUED = 0x1
49 };
50
51
52 enum {
53         DATAEX_FLAG_QUEUED_ONLY = 0x1
54 };
55
56
57 /* local variables */
58
59 /* local function prototypes */
60 static int _vtpm_send_queued(struct tpm_chip *chip);
61
62
63 /* =============================================================
64  * Some utility functions
65  * =============================================================
66  */
67 static void vtpm_state_init(struct vtpm_state *vtpms)
68 {
69         vtpms->current_request = NULL;
70         spin_lock_init(&vtpms->req_list_lock);
71         init_waitqueue_head(&vtpms->req_wait_queue);
72         INIT_LIST_HEAD(&vtpms->queued_requests);
73
74         vtpms->current_response = NULL;
75         spin_lock_init(&vtpms->resp_list_lock);
76         init_waitqueue_head(&vtpms->resp_wait_queue);
77
78         vtpms->disconnect_time = jiffies;
79 }
80
81
82 static inline struct transmission *transmission_alloc(void)
83 {
84         return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
85 }
86
87 static unsigned char *
88 transmission_set_req_buffer(struct transmission *t,
89                             unsigned char *buffer, size_t len)
90 {
91         if (t->request_buflen < len) {
92                 kfree(t->request);
93                 t->request = kmalloc(len, GFP_KERNEL);
94                 if (!t->request) {
95                         t->request_buflen = 0;
96                         return NULL;
97                 }
98                 t->request_buflen = len;
99         }
100
101         memcpy(t->request, buffer, len);
102         t->request_len = len;
103
104         return t->request;
105 }
106
107 static unsigned char *
108 transmission_set_res_buffer(struct transmission *t,
109                             const unsigned char *buffer, size_t len)
110 {
111         if (t->response_buflen < len) {
112                 kfree(t->response);
113                 t->response = kmalloc(len, GFP_ATOMIC);
114                 if (!t->response) {
115                         t->response_buflen = 0;
116                         return NULL;
117                 }
118                 t->response_buflen = len;
119         }
120
121         memcpy(t->response, buffer, len);
122         t->response_len = len;
123
124         return t->response;
125 }
126
127 static inline void transmission_free(struct transmission *t)
128 {
129         kfree(t->request);
130         kfree(t->response);
131         kfree(t);
132 }
133
134 /* =============================================================
135  * Interface with the lower layer driver
136  * =============================================================
137  */
138 /*
139  * Lower layer uses this function to make a response available.
140  */
141 int vtpm_vd_recv(const struct tpm_chip *chip,
142                  const unsigned char *buffer, size_t count,
143                  void *ptr)
144 {
145         unsigned long flags;
146         int ret_size = 0;
147         struct transmission *t;
148         struct vtpm_state *vtpms;
149
150         vtpms = (struct vtpm_state *)chip_get_private(chip);
151
152         /*
153          * The list with requests must contain one request
154          * only and the element there must be the one that
155          * was passed to me from the front-end.
156          */
157         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
158         if (vtpms->current_request != ptr) {
159                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
160                 return 0;
161         }
162
163         if ((t = vtpms->current_request)) {
164                 transmission_free(t);
165                 vtpms->current_request = NULL;
166         }
167
168         t = transmission_alloc();
169         if (t) {
170                 if (!transmission_set_res_buffer(t, buffer, count)) {
171                         transmission_free(t);
172                         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
173                         return -ENOMEM;
174                 }
175                 ret_size = count;
176                 vtpms->current_response = t;
177                 wake_up_interruptible(&vtpms->resp_wait_queue);
178         }
179         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
180
181         return ret_size;
182 }
183
184
185 /*
186  * Lower layer indicates its status (connected/disconnected)
187  */
188 void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
189 {
190         struct vtpm_state *vtpms;
191
192         vtpms = (struct vtpm_state *)chip_get_private(chip);
193
194         vtpms->vd_status = vd_status;
195         if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
196                 vtpms->disconnect_time = jiffies;
197         }
198 }
199
200 /* =============================================================
201  * Interface with the generic TPM driver
202  * =============================================================
203  */
204 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
205 {
206         int rc = 0;
207         unsigned long flags;
208         struct vtpm_state *vtpms;
209
210         vtpms = (struct vtpm_state *)chip_get_private(chip);
211
212         /*
213          * Check if the previous operation only queued the command
214          * In this case there won't be a response, so I just
215          * return from here and reset that flag. In any other
216          * case I should receive a response from the back-end.
217          */
218         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
219         if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
220                 vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
221                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
222                 /*
223                  * The first few commands (measurements) must be
224                  * queued since it might not be possible to talk to the
225                  * TPM, yet.
226                  * Return a response of up to 30 '0's.
227                  */
228
229                 count = min_t(size_t, count, 30);
230                 memset(buf, 0x0, count);
231                 return count;
232         }
233         /*
234          * Check whether something is in the responselist and if
235          * there's nothing in the list wait for something to appear.
236          */
237
238         if (!vtpms->current_response) {
239                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
240                 interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
241                                                1000);
242                 spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
243         }
244
245         if (vtpms->current_response) {
246                 struct transmission *t = vtpms->current_response;
247                 vtpms->current_response = NULL;
248                 rc = min(count, t->response_len);
249                 memcpy(buf, t->response, rc);
250                 transmission_free(t);
251         }
252
253         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
254         return rc;
255 }
256
257 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
258 {
259         int rc = 0;
260         unsigned long flags;
261         struct transmission *t = transmission_alloc();
262         struct vtpm_state *vtpms;
263
264         vtpms = (struct vtpm_state *)chip_get_private(chip);
265
266         if (!t)
267                 return -ENOMEM;
268         /*
269          * If there's a current request, it must be the
270          * previous request that has timed out.
271          */
272         spin_lock_irqsave(&vtpms->req_list_lock, flags);
273         if (vtpms->current_request != NULL) {
274                 printk("WARNING: Sending although there is a request outstanding.\n"
275                        "         Previous request must have timed out.\n");
276                 transmission_free(vtpms->current_request);
277                 vtpms->current_request = NULL;
278         }
279         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
280
281         /*
282          * Queue the packet if the driver below is not
283          * ready, yet, or there is any packet already
284          * in the queue.
285          * If the driver below is ready, unqueue all
286          * packets first before sending our current
287          * packet.
288          * For each unqueued packet, except for the
289          * last (=current) packet, call the function
290          * tpm_xen_recv to wait for the response to come
291          * back.
292          */
293         if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
294                 if (time_after(jiffies,
295                                vtpms->disconnect_time + HZ * 10)) {
296                         rc = -ENOENT;
297                 } else {
298                         goto queue_it;
299                 }
300         } else {
301                 /*
302                  * Send all queued packets.
303                  */
304                 if (_vtpm_send_queued(chip) == 0) {
305
306                         vtpms->current_request = t;
307
308                         rc = vtpm_vd_send(vtpms->tpm_private,
309                                           buf,
310                                           count,
311                                           t);
312                         /*
313                          * The generic TPM driver will call
314                          * the function to receive the response.
315                          */
316                         if (rc < 0) {
317                                 vtpms->current_request = NULL;
318                                 goto queue_it;
319                         }
320                 } else {
321 queue_it:
322                         if (!transmission_set_req_buffer(t, buf, count)) {
323                                 transmission_free(t);
324                                 rc = -ENOMEM;
325                                 goto exit;
326                         }
327                         /*
328                          * An error occurred. Don't event try
329                          * to send the current request. Just
330                          * queue it.
331                          */
332                         spin_lock_irqsave(&vtpms->req_list_lock, flags);
333                         vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
334                         list_add_tail(&t->next, &vtpms->queued_requests);
335                         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
336                 }
337         }
338
339 exit:
340         return rc;
341 }
342
343
344 /*
345  * Send all queued requests.
346  */
347 static int _vtpm_send_queued(struct tpm_chip *chip)
348 {
349         int rc;
350         int error = 0;
351         unsigned long flags;
352         unsigned char buffer[1];
353         struct vtpm_state *vtpms;
354         vtpms = (struct vtpm_state *)chip_get_private(chip);
355
356         spin_lock_irqsave(&vtpms->req_list_lock, flags);
357
358         while (!list_empty(&vtpms->queued_requests)) {
359                 /*
360                  * Need to dequeue them.
361                  * Read the result into a dummy buffer.
362                  */
363                 struct transmission *qt = (struct transmission *)
364                                           vtpms->queued_requests.next;
365                 list_del(&qt->next);
366                 vtpms->current_request = qt;
367                 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
368
369                 rc = vtpm_vd_send(vtpms->tpm_private,
370                                   qt->request,
371                                   qt->request_len,
372                                   qt);
373
374                 if (rc < 0) {
375                         spin_lock_irqsave(&vtpms->req_list_lock, flags);
376                         if ((qt = vtpms->current_request) != NULL) {
377                                 /*
378                                  * requeue it at the beginning
379                                  * of the list
380                                  */
381                                 list_add(&qt->next,
382                                          &vtpms->queued_requests);
383                         }
384                         vtpms->current_request = NULL;
385                         error = 1;
386                         break;
387                 }
388                 /*
389                  * After this point qt is not valid anymore!
390                  * It is freed when the front-end is delivering
391                  * the data by calling tpm_recv
392                  */
393                 /*
394                  * Receive response into provided dummy buffer
395                  */
396                 rc = vtpm_recv(chip, buffer, sizeof(buffer));
397                 spin_lock_irqsave(&vtpms->req_list_lock, flags);
398         }
399
400         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
401
402         return error;
403 }
404
405 static void vtpm_cancel(struct tpm_chip *chip)
406 {
407         unsigned long flags;
408         struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
409
410         spin_lock_irqsave(&vtpms->resp_list_lock,flags);
411
412         if (!vtpms->current_response && vtpms->current_request) {
413                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
414                 interruptible_sleep_on(&vtpms->resp_wait_queue);
415                 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
416         }
417
418         if (vtpms->current_response) {
419                 struct transmission *t = vtpms->current_response;
420                 vtpms->current_response = NULL;
421                 transmission_free(t);
422         }
423
424         spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
425 }
426
427 static u8 vtpm_status(struct tpm_chip *chip)
428 {
429         u8 rc = 0;
430         unsigned long flags;
431         struct vtpm_state *vtpms;
432
433         vtpms = (struct vtpm_state *)chip_get_private(chip);
434
435         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
436         /*
437          * Data are available if:
438          *  - there's a current response
439          *  - the last packet was queued only (this is fake, but necessary to
440          *      get the generic TPM layer to call the receive function.)
441          */
442         if (vtpms->current_response ||
443             0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
444                 rc = STATUS_DATA_AVAIL;
445         } else if (!vtpms->current_response && !vtpms->current_request) {
446                 rc = STATUS_READY;
447         }
448
449         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
450         return rc;
451 }
452
453 static struct file_operations vtpm_ops = {
454         .owner = THIS_MODULE,
455         .llseek = no_llseek,
456         .open = tpm_open,
457         .read = tpm_read,
458         .write = tpm_write,
459         .release = tpm_release,
460 };
461
462 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
463 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
464 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
465 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
466 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
467 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
468                    NULL);
469 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
470 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
471
472 static struct attribute *vtpm_attrs[] = {
473         &dev_attr_pubek.attr,
474         &dev_attr_pcrs.attr,
475         &dev_attr_enabled.attr,
476         &dev_attr_active.attr,
477         &dev_attr_owned.attr,
478         &dev_attr_temp_deactivated.attr,
479         &dev_attr_caps.attr,
480         &dev_attr_cancel.attr,
481         NULL,
482 };
483
484 static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
485
486 #define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
487
488 static struct tpm_vendor_specific tpm_vtpm = {
489         .recv = vtpm_recv,
490         .send = vtpm_send,
491         .cancel = vtpm_cancel,
492         .status = vtpm_status,
493         .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
494         .req_complete_val  = STATUS_DATA_AVAIL,
495         .req_canceled = STATUS_READY,
496         .attr_group = &vtpm_attr_grp,
497         .miscdev = {
498                 .fops = &vtpm_ops,
499         },
500         .duration = {
501                 TPM_LONG_TIMEOUT,
502                 TPM_LONG_TIMEOUT,
503                 TPM_LONG_TIMEOUT,
504         },
505 };
506
507 struct tpm_chip *init_vtpm(struct device *dev,
508                            struct tpm_private *tp)
509 {
510         long rc;
511         struct tpm_chip *chip;
512         struct vtpm_state *vtpms;
513
514         vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
515         if (!vtpms)
516                 return ERR_PTR(-ENOMEM);
517
518         vtpm_state_init(vtpms);
519         vtpms->tpm_private = tp;
520
521         chip = tpm_register_hardware(dev, &tpm_vtpm);
522         if (!chip) {
523                 rc = -ENODEV;
524                 goto err_free_mem;
525         }
526
527         chip_set_private(chip, vtpms);
528
529         return chip;
530
531 err_free_mem:
532         kfree(vtpms);
533
534         return ERR_PTR(rc);
535 }
536
537 void cleanup_vtpm(struct device *dev)
538 {
539         struct tpm_chip *chip = dev_get_drvdata(dev);
540         struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
541         tpm_remove_hardware(dev);
542         kfree(vtpms);
543 }