- patches.rt/0001-sched-count-of-queued-RT-tasks.patch: Delete.
[linux-flexiantxendom0-3.2.10.git] / drivers / char / tpm / tpm_vtpm.c
1 /*
2  * Copyright (C) 2006 IBM Corporation
3  *
4  * Authors:
5  * Stefan Berger <stefanb@us.ibm.com>
6  *
7  * Generic device driver part for device drivers in a virtualized
8  * environment.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation, version 2 of the
13  * License.
14  *
15  */
16
17 #include <asm/uaccess.h>
18 #include <linux/list.h>
19 #include <linux/device.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_device.h>
22 #include "tpm.h"
23 #include "tpm_vtpm.h"
24
25 /* read status bits */
26 enum {
27         STATUS_BUSY = 0x01,
28         STATUS_DATA_AVAIL = 0x02,
29         STATUS_READY = 0x04
30 };
31
32 struct transmission {
33         struct list_head next;
34
35         unsigned char *request;
36         size_t  request_len;
37         size_t  request_buflen;
38
39         unsigned char *response;
40         size_t  response_len;
41         size_t  response_buflen;
42
43         unsigned int flags;
44 };
45
46 enum {
47         TRANSMISSION_FLAG_WAS_QUEUED = 0x1
48 };
49
50
51 enum {
52         DATAEX_FLAG_QUEUED_ONLY = 0x1
53 };
54
55
56 /* local variables */
57
58 /* local function prototypes */
59 static int _vtpm_send_queued(struct tpm_chip *chip);
60
61
62 /* =============================================================
63  * Some utility functions
64  * =============================================================
65  */
66 static void vtpm_state_init(struct vtpm_state *vtpms)
67 {
68         vtpms->current_request = NULL;
69         spin_lock_init(&vtpms->req_list_lock);
70         init_waitqueue_head(&vtpms->req_wait_queue);
71         INIT_LIST_HEAD(&vtpms->queued_requests);
72
73         vtpms->current_response = NULL;
74         spin_lock_init(&vtpms->resp_list_lock);
75         init_waitqueue_head(&vtpms->resp_wait_queue);
76
77         vtpms->disconnect_time = jiffies;
78 }
79
80
81 static inline struct transmission *transmission_alloc(void)
82 {
83         return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
84 }
85
86 static unsigned char *
87 transmission_set_req_buffer(struct transmission *t,
88                             unsigned char *buffer, size_t len)
89 {
90         if (t->request_buflen < len) {
91                 kfree(t->request);
92                 t->request = kmalloc(len, GFP_KERNEL);
93                 if (!t->request) {
94                         t->request_buflen = 0;
95                         return NULL;
96                 }
97                 t->request_buflen = len;
98         }
99
100         memcpy(t->request, buffer, len);
101         t->request_len = len;
102
103         return t->request;
104 }
105
106 static unsigned char *
107 transmission_set_res_buffer(struct transmission *t,
108                             const unsigned char *buffer, size_t len)
109 {
110         if (t->response_buflen < len) {
111                 kfree(t->response);
112                 t->response = kmalloc(len, GFP_ATOMIC);
113                 if (!t->response) {
114                         t->response_buflen = 0;
115                         return NULL;
116                 }
117                 t->response_buflen = len;
118         }
119
120         memcpy(t->response, buffer, len);
121         t->response_len = len;
122
123         return t->response;
124 }
125
126 static inline void transmission_free(struct transmission *t)
127 {
128         kfree(t->request);
129         kfree(t->response);
130         kfree(t);
131 }
132
133 /* =============================================================
134  * Interface with the lower layer driver
135  * =============================================================
136  */
137 /*
138  * Lower layer uses this function to make a response available.
139  */
140 int vtpm_vd_recv(const struct tpm_chip *chip,
141                  const unsigned char *buffer, size_t count,
142                  void *ptr)
143 {
144         unsigned long flags;
145         int ret_size = 0;
146         struct transmission *t;
147         struct vtpm_state *vtpms;
148
149         vtpms = (struct vtpm_state *)chip_get_private(chip);
150
151         /*
152          * The list with requests must contain one request
153          * only and the element there must be the one that
154          * was passed to me from the front-end.
155          */
156         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
157         if (vtpms->current_request != ptr) {
158                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
159                 return 0;
160         }
161
162         if ((t = vtpms->current_request)) {
163                 transmission_free(t);
164                 vtpms->current_request = NULL;
165         }
166
167         t = transmission_alloc();
168         if (t) {
169                 if (!transmission_set_res_buffer(t, buffer, count)) {
170                         transmission_free(t);
171                         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
172                         return -ENOMEM;
173                 }
174                 ret_size = count;
175                 vtpms->current_response = t;
176                 wake_up_interruptible(&vtpms->resp_wait_queue);
177         }
178         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
179
180         return ret_size;
181 }
182
183
184 /*
185  * Lower layer indicates its status (connected/disconnected)
186  */
187 void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
188 {
189         struct vtpm_state *vtpms;
190
191         vtpms = (struct vtpm_state *)chip_get_private(chip);
192
193         vtpms->vd_status = vd_status;
194         if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
195                 vtpms->disconnect_time = jiffies;
196         }
197 }
198
199 /* =============================================================
200  * Interface with the generic TPM driver
201  * =============================================================
202  */
203 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
204 {
205         int rc = 0;
206         unsigned long flags;
207         struct vtpm_state *vtpms;
208
209         vtpms = (struct vtpm_state *)chip_get_private(chip);
210
211         /*
212          * Check if the previous operation only queued the command
213          * In this case there won't be a response, so I just
214          * return from here and reset that flag. In any other
215          * case I should receive a response from the back-end.
216          */
217         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
218         if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
219                 vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
220                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
221                 /*
222                  * The first few commands (measurements) must be
223                  * queued since it might not be possible to talk to the
224                  * TPM, yet.
225                  * Return a response of up to 30 '0's.
226                  */
227
228                 count = min_t(size_t, count, 30);
229                 memset(buf, 0x0, count);
230                 return count;
231         }
232         /*
233          * Check whether something is in the responselist and if
234          * there's nothing in the list wait for something to appear.
235          */
236
237         if (!vtpms->current_response) {
238                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
239                 interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
240                                                1000);
241                 spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
242         }
243
244         if (vtpms->current_response) {
245                 struct transmission *t = vtpms->current_response;
246                 vtpms->current_response = NULL;
247                 rc = min(count, t->response_len);
248                 memcpy(buf, t->response, rc);
249                 transmission_free(t);
250         }
251
252         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
253         return rc;
254 }
255
256 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
257 {
258         int rc = 0;
259         unsigned long flags;
260         struct transmission *t = transmission_alloc();
261         struct vtpm_state *vtpms;
262
263         vtpms = (struct vtpm_state *)chip_get_private(chip);
264
265         if (!t)
266                 return -ENOMEM;
267         /*
268          * If there's a current request, it must be the
269          * previous request that has timed out.
270          */
271         spin_lock_irqsave(&vtpms->req_list_lock, flags);
272         if (vtpms->current_request != NULL) {
273                 printk("WARNING: Sending although there is a request outstanding.\n"
274                        "         Previous request must have timed out.\n");
275                 transmission_free(vtpms->current_request);
276                 vtpms->current_request = NULL;
277         }
278         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
279
280         /*
281          * Queue the packet if the driver below is not
282          * ready, yet, or there is any packet already
283          * in the queue.
284          * If the driver below is ready, unqueue all
285          * packets first before sending our current
286          * packet.
287          * For each unqueued packet, except for the
288          * last (=current) packet, call the function
289          * tpm_xen_recv to wait for the response to come
290          * back.
291          */
292         if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
293                 if (time_after(jiffies,
294                                vtpms->disconnect_time + HZ * 10)) {
295                         rc = -ENOENT;
296                 } else {
297                         goto queue_it;
298                 }
299         } else {
300                 /*
301                  * Send all queued packets.
302                  */
303                 if (_vtpm_send_queued(chip) == 0) {
304
305                         vtpms->current_request = t;
306
307                         rc = vtpm_vd_send(vtpms->tpm_private,
308                                           buf,
309                                           count,
310                                           t);
311                         /*
312                          * The generic TPM driver will call
313                          * the function to receive the response.
314                          */
315                         if (rc < 0) {
316                                 vtpms->current_request = NULL;
317                                 goto queue_it;
318                         }
319                 } else {
320 queue_it:
321                         if (!transmission_set_req_buffer(t, buf, count)) {
322                                 transmission_free(t);
323                                 rc = -ENOMEM;
324                                 goto exit;
325                         }
326                         /*
327                          * An error occurred. Don't event try
328                          * to send the current request. Just
329                          * queue it.
330                          */
331                         spin_lock_irqsave(&vtpms->req_list_lock, flags);
332                         vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
333                         list_add_tail(&t->next, &vtpms->queued_requests);
334                         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
335                 }
336         }
337
338 exit:
339         return rc;
340 }
341
342
343 /*
344  * Send all queued requests.
345  */
346 static int _vtpm_send_queued(struct tpm_chip *chip)
347 {
348         int rc;
349         int error = 0;
350         long flags;
351         unsigned char buffer[1];
352         struct vtpm_state *vtpms;
353         vtpms = (struct vtpm_state *)chip_get_private(chip);
354
355         spin_lock_irqsave(&vtpms->req_list_lock, flags);
356
357         while (!list_empty(&vtpms->queued_requests)) {
358                 /*
359                  * Need to dequeue them.
360                  * Read the result into a dummy buffer.
361                  */
362                 struct transmission *qt = (struct transmission *)
363                                           vtpms->queued_requests.next;
364                 list_del(&qt->next);
365                 vtpms->current_request = qt;
366                 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
367
368                 rc = vtpm_vd_send(vtpms->tpm_private,
369                                   qt->request,
370                                   qt->request_len,
371                                   qt);
372
373                 if (rc < 0) {
374                         spin_lock_irqsave(&vtpms->req_list_lock, flags);
375                         if ((qt = vtpms->current_request) != NULL) {
376                                 /*
377                                  * requeue it at the beginning
378                                  * of the list
379                                  */
380                                 list_add(&qt->next,
381                                          &vtpms->queued_requests);
382                         }
383                         vtpms->current_request = NULL;
384                         error = 1;
385                         break;
386                 }
387                 /*
388                  * After this point qt is not valid anymore!
389                  * It is freed when the front-end is delivering
390                  * the data by calling tpm_recv
391                  */
392                 /*
393                  * Receive response into provided dummy buffer
394                  */
395                 rc = vtpm_recv(chip, buffer, sizeof(buffer));
396                 spin_lock_irqsave(&vtpms->req_list_lock, flags);
397         }
398
399         spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
400
401         return error;
402 }
403
404 static void vtpm_cancel(struct tpm_chip *chip)
405 {
406         unsigned long flags;
407         struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
408
409         spin_lock_irqsave(&vtpms->resp_list_lock,flags);
410
411         if (!vtpms->current_response && vtpms->current_request) {
412                 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
413                 interruptible_sleep_on(&vtpms->resp_wait_queue);
414                 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
415         }
416
417         if (vtpms->current_response) {
418                 struct transmission *t = vtpms->current_response;
419                 vtpms->current_response = NULL;
420                 transmission_free(t);
421         }
422
423         spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
424 }
425
426 static u8 vtpm_status(struct tpm_chip *chip)
427 {
428         u8 rc = 0;
429         unsigned long flags;
430         struct vtpm_state *vtpms;
431
432         vtpms = (struct vtpm_state *)chip_get_private(chip);
433
434         spin_lock_irqsave(&vtpms->resp_list_lock, flags);
435         /*
436          * Data are available if:
437          *  - there's a current response
438          *  - the last packet was queued only (this is fake, but necessary to
439          *      get the generic TPM layer to call the receive function.)
440          */
441         if (vtpms->current_response ||
442             0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
443                 rc = STATUS_DATA_AVAIL;
444         } else if (!vtpms->current_response && !vtpms->current_request) {
445                 rc = STATUS_READY;
446         }
447
448         spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
449         return rc;
450 }
451
452 static struct file_operations vtpm_ops = {
453         .owner = THIS_MODULE,
454         .llseek = no_llseek,
455         .open = tpm_open,
456         .read = tpm_read,
457         .write = tpm_write,
458         .release = tpm_release,
459 };
460
461 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
462 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
463 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
464 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
465 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
466 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
467                    NULL);
468 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
469 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
470
471 static struct attribute *vtpm_attrs[] = {
472         &dev_attr_pubek.attr,
473         &dev_attr_pcrs.attr,
474         &dev_attr_enabled.attr,
475         &dev_attr_active.attr,
476         &dev_attr_owned.attr,
477         &dev_attr_temp_deactivated.attr,
478         &dev_attr_caps.attr,
479         &dev_attr_cancel.attr,
480         NULL,
481 };
482
483 static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
484
485 #define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
486
487 static struct tpm_vendor_specific tpm_vtpm = {
488         .recv = vtpm_recv,
489         .send = vtpm_send,
490         .cancel = vtpm_cancel,
491         .status = vtpm_status,
492         .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
493         .req_complete_val  = STATUS_DATA_AVAIL,
494         .req_canceled = STATUS_READY,
495         .attr_group = &vtpm_attr_grp,
496         .miscdev = {
497                 .fops = &vtpm_ops,
498         },
499         .duration = {
500                 TPM_LONG_TIMEOUT,
501                 TPM_LONG_TIMEOUT,
502                 TPM_LONG_TIMEOUT,
503         },
504 };
505
506 struct tpm_chip *init_vtpm(struct device *dev,
507                            struct tpm_private *tp)
508 {
509         long rc;
510         struct tpm_chip *chip;
511         struct vtpm_state *vtpms;
512
513         vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
514         if (!vtpms)
515                 return ERR_PTR(-ENOMEM);
516
517         vtpm_state_init(vtpms);
518         vtpms->tpm_private = tp;
519
520         chip = tpm_register_hardware(dev, &tpm_vtpm);
521         if (!chip) {
522                 rc = -ENODEV;
523                 goto err_free_mem;
524         }
525
526         chip_set_private(chip, vtpms);
527
528         return chip;
529
530 err_free_mem:
531         kfree(vtpms);
532
533         return ERR_PTR(rc);
534 }
535
536 void cleanup_vtpm(struct device *dev)
537 {
538         struct tpm_chip *chip = dev_get_drvdata(dev);
539         struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
540         tpm_remove_hardware(dev);
541         kfree(vtpms);
542 }