2 * Copyright (C) 2006 IBM Corporation
5 * Stefan Berger <stefanb@us.ibm.com>
7 * Generic device driver part for device drivers in a virtualized
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2 of the
17 #include <asm/uaccess.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
26 /* read status bits */
29 STATUS_DATA_AVAIL = 0x02,
34 struct list_head next;
36 unsigned char *request;
38 size_t request_buflen;
40 unsigned char *response;
42 size_t response_buflen;
48 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
53 DATAEX_FLAG_QUEUED_ONLY = 0x1
59 /* local function prototypes */
60 static int _vtpm_send_queued(struct tpm_chip *chip);
63 /* =============================================================
64 * Some utility functions
65 * =============================================================
67 static void vtpm_state_init(struct vtpm_state *vtpms)
69 vtpms->current_request = NULL;
70 spin_lock_init(&vtpms->req_list_lock);
71 init_waitqueue_head(&vtpms->req_wait_queue);
72 INIT_LIST_HEAD(&vtpms->queued_requests);
74 vtpms->current_response = NULL;
75 spin_lock_init(&vtpms->resp_list_lock);
76 init_waitqueue_head(&vtpms->resp_wait_queue);
78 vtpms->disconnect_time = jiffies;
82 static inline struct transmission *transmission_alloc(void)
84 return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
87 static unsigned char *
88 transmission_set_req_buffer(struct transmission *t,
89 unsigned char *buffer, size_t len)
91 if (t->request_buflen < len) {
93 t->request = kmalloc(len, GFP_KERNEL);
95 t->request_buflen = 0;
98 t->request_buflen = len;
101 memcpy(t->request, buffer, len);
102 t->request_len = len;
107 static unsigned char *
108 transmission_set_res_buffer(struct transmission *t,
109 const unsigned char *buffer, size_t len)
111 if (t->response_buflen < len) {
113 t->response = kmalloc(len, GFP_ATOMIC);
115 t->response_buflen = 0;
118 t->response_buflen = len;
121 memcpy(t->response, buffer, len);
122 t->response_len = len;
127 static inline void transmission_free(struct transmission *t)
134 /* =============================================================
135 * Interface with the lower layer driver
136 * =============================================================
139 * Lower layer uses this function to make a response available.
141 int vtpm_vd_recv(const struct tpm_chip *chip,
142 const unsigned char *buffer, size_t count,
147 struct transmission *t;
148 struct vtpm_state *vtpms;
150 vtpms = (struct vtpm_state *)chip_get_private(chip);
153 * The list with requests must contain one request
154 * only and the element there must be the one that
155 * was passed to me from the front-end.
157 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
158 if (vtpms->current_request != ptr) {
159 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
163 if ((t = vtpms->current_request)) {
164 transmission_free(t);
165 vtpms->current_request = NULL;
168 t = transmission_alloc();
170 if (!transmission_set_res_buffer(t, buffer, count)) {
171 transmission_free(t);
172 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
176 vtpms->current_response = t;
177 wake_up_interruptible(&vtpms->resp_wait_queue);
179 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
186 * Lower layer indicates its status (connected/disconnected)
188 void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
190 struct vtpm_state *vtpms;
192 vtpms = (struct vtpm_state *)chip_get_private(chip);
194 vtpms->vd_status = vd_status;
195 if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
196 vtpms->disconnect_time = jiffies;
200 /* =============================================================
201 * Interface with the generic TPM driver
202 * =============================================================
204 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
208 struct vtpm_state *vtpms;
210 vtpms = (struct vtpm_state *)chip_get_private(chip);
213 * Check if the previous operation only queued the command
214 * In this case there won't be a response, so I just
215 * return from here and reset that flag. In any other
216 * case I should receive a response from the back-end.
218 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
219 if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
220 vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
221 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
223 * The first few commands (measurements) must be
224 * queued since it might not be possible to talk to the
226 * Return a response of up to 30 '0's.
229 count = min_t(size_t, count, 30);
230 memset(buf, 0x0, count);
234 * Check whether something is in the responselist and if
235 * there's nothing in the list wait for something to appear.
238 if (!vtpms->current_response) {
239 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
240 interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
242 spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
245 if (vtpms->current_response) {
246 struct transmission *t = vtpms->current_response;
247 vtpms->current_response = NULL;
248 rc = min(count, t->response_len);
249 memcpy(buf, t->response, rc);
250 transmission_free(t);
253 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
257 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
261 struct transmission *t = transmission_alloc();
262 struct vtpm_state *vtpms;
264 vtpms = (struct vtpm_state *)chip_get_private(chip);
269 * If there's a current request, it must be the
270 * previous request that has timed out.
272 spin_lock_irqsave(&vtpms->req_list_lock, flags);
273 if (vtpms->current_request != NULL) {
274 printk("WARNING: Sending although there is a request outstanding.\n"
275 " Previous request must have timed out.\n");
276 transmission_free(vtpms->current_request);
277 vtpms->current_request = NULL;
279 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
282 * Queue the packet if the driver below is not
283 * ready, yet, or there is any packet already
285 * If the driver below is ready, unqueue all
286 * packets first before sending our current
288 * For each unqueued packet, except for the
289 * last (=current) packet, call the function
290 * tpm_xen_recv to wait for the response to come
293 if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
294 if (time_after(jiffies,
295 vtpms->disconnect_time + HZ * 10)) {
302 * Send all queued packets.
304 if (_vtpm_send_queued(chip) == 0) {
306 vtpms->current_request = t;
308 rc = vtpm_vd_send(vtpms->tpm_private,
313 * The generic TPM driver will call
314 * the function to receive the response.
317 vtpms->current_request = NULL;
322 if (!transmission_set_req_buffer(t, buf, count)) {
323 transmission_free(t);
328 * An error occurred. Don't event try
329 * to send the current request. Just
332 spin_lock_irqsave(&vtpms->req_list_lock, flags);
333 vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
334 list_add_tail(&t->next, &vtpms->queued_requests);
335 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
345 * Send all queued requests.
347 static int _vtpm_send_queued(struct tpm_chip *chip)
352 unsigned char buffer[1];
353 struct vtpm_state *vtpms;
354 vtpms = (struct vtpm_state *)chip_get_private(chip);
356 spin_lock_irqsave(&vtpms->req_list_lock, flags);
358 while (!list_empty(&vtpms->queued_requests)) {
360 * Need to dequeue them.
361 * Read the result into a dummy buffer.
363 struct transmission *qt = (struct transmission *)
364 vtpms->queued_requests.next;
366 vtpms->current_request = qt;
367 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
369 rc = vtpm_vd_send(vtpms->tpm_private,
375 spin_lock_irqsave(&vtpms->req_list_lock, flags);
376 if ((qt = vtpms->current_request) != NULL) {
378 * requeue it at the beginning
382 &vtpms->queued_requests);
384 vtpms->current_request = NULL;
389 * After this point qt is not valid anymore!
390 * It is freed when the front-end is delivering
391 * the data by calling tpm_recv
394 * Receive response into provided dummy buffer
396 rc = vtpm_recv(chip, buffer, sizeof(buffer));
397 spin_lock_irqsave(&vtpms->req_list_lock, flags);
400 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
405 static void vtpm_cancel(struct tpm_chip *chip)
408 struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
410 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
412 if (!vtpms->current_response && vtpms->current_request) {
413 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
414 interruptible_sleep_on(&vtpms->resp_wait_queue);
415 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
418 if (vtpms->current_response) {
419 struct transmission *t = vtpms->current_response;
420 vtpms->current_response = NULL;
421 transmission_free(t);
424 spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
427 static u8 vtpm_status(struct tpm_chip *chip)
431 struct vtpm_state *vtpms;
433 vtpms = (struct vtpm_state *)chip_get_private(chip);
435 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
437 * Data are available if:
438 * - there's a current response
439 * - the last packet was queued only (this is fake, but necessary to
440 * get the generic TPM layer to call the receive function.)
442 if (vtpms->current_response ||
443 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
444 rc = STATUS_DATA_AVAIL;
445 } else if (!vtpms->current_response && !vtpms->current_request) {
449 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
453 static struct file_operations vtpm_ops = {
454 .owner = THIS_MODULE,
459 .release = tpm_release,
462 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
463 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
464 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
465 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
466 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
467 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
469 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
470 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
472 static struct attribute *vtpm_attrs[] = {
473 &dev_attr_pubek.attr,
475 &dev_attr_enabled.attr,
476 &dev_attr_active.attr,
477 &dev_attr_owned.attr,
478 &dev_attr_temp_deactivated.attr,
480 &dev_attr_cancel.attr,
484 static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
486 #define TPM_LONG_TIMEOUT (10 * 60 * HZ)
488 static struct tpm_vendor_specific tpm_vtpm = {
491 .cancel = vtpm_cancel,
492 .status = vtpm_status,
493 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
494 .req_complete_val = STATUS_DATA_AVAIL,
495 .req_canceled = STATUS_READY,
496 .attr_group = &vtpm_attr_grp,
507 struct tpm_chip *init_vtpm(struct device *dev,
508 struct tpm_private *tp)
511 struct tpm_chip *chip;
512 struct vtpm_state *vtpms;
514 vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
516 return ERR_PTR(-ENOMEM);
518 vtpm_state_init(vtpms);
519 vtpms->tpm_private = tp;
521 chip = tpm_register_hardware(dev, &tpm_vtpm);
527 chip_set_private(chip, vtpms);
537 void cleanup_vtpm(struct device *dev)
539 struct tpm_chip *chip = dev_get_drvdata(dev);
540 struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
541 tpm_remove_hardware(dev);