2 * Copyright (C) 2006 IBM Corporation
5 * Stefan Berger <stefanb@us.ibm.com>
7 * Generic device driver part for device drivers in a virtualized
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2 of the
17 #include <asm/uaccess.h>
18 #include <linux/list.h>
19 #include <linux/device.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_device.h>
25 /* read status bits */
28 STATUS_DATA_AVAIL = 0x02,
33 struct list_head next;
35 unsigned char *request;
37 size_t request_buflen;
39 unsigned char *response;
41 size_t response_buflen;
47 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
52 DATAEX_FLAG_QUEUED_ONLY = 0x1
58 /* local function prototypes */
59 static int _vtpm_send_queued(struct tpm_chip *chip);
62 /* =============================================================
63 * Some utility functions
64 * =============================================================
66 static void vtpm_state_init(struct vtpm_state *vtpms)
68 vtpms->current_request = NULL;
69 spin_lock_init(&vtpms->req_list_lock);
70 init_waitqueue_head(&vtpms->req_wait_queue);
71 INIT_LIST_HEAD(&vtpms->queued_requests);
73 vtpms->current_response = NULL;
74 spin_lock_init(&vtpms->resp_list_lock);
75 init_waitqueue_head(&vtpms->resp_wait_queue);
77 vtpms->disconnect_time = jiffies;
81 static inline struct transmission *transmission_alloc(void)
83 return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
86 static unsigned char *
87 transmission_set_req_buffer(struct transmission *t,
88 unsigned char *buffer, size_t len)
90 if (t->request_buflen < len) {
92 t->request = kmalloc(len, GFP_KERNEL);
94 t->request_buflen = 0;
97 t->request_buflen = len;
100 memcpy(t->request, buffer, len);
101 t->request_len = len;
106 static unsigned char *
107 transmission_set_res_buffer(struct transmission *t,
108 const unsigned char *buffer, size_t len)
110 if (t->response_buflen < len) {
112 t->response = kmalloc(len, GFP_ATOMIC);
114 t->response_buflen = 0;
117 t->response_buflen = len;
120 memcpy(t->response, buffer, len);
121 t->response_len = len;
126 static inline void transmission_free(struct transmission *t)
133 /* =============================================================
134 * Interface with the lower layer driver
135 * =============================================================
138 * Lower layer uses this function to make a response available.
140 int vtpm_vd_recv(const struct tpm_chip *chip,
141 const unsigned char *buffer, size_t count,
146 struct transmission *t;
147 struct vtpm_state *vtpms;
149 vtpms = (struct vtpm_state *)chip_get_private(chip);
152 * The list with requests must contain one request
153 * only and the element there must be the one that
154 * was passed to me from the front-end.
156 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
157 if (vtpms->current_request != ptr) {
158 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
162 if ((t = vtpms->current_request)) {
163 transmission_free(t);
164 vtpms->current_request = NULL;
167 t = transmission_alloc();
169 if (!transmission_set_res_buffer(t, buffer, count)) {
170 transmission_free(t);
171 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
175 vtpms->current_response = t;
176 wake_up_interruptible(&vtpms->resp_wait_queue);
178 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
185 * Lower layer indicates its status (connected/disconnected)
187 void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
189 struct vtpm_state *vtpms;
191 vtpms = (struct vtpm_state *)chip_get_private(chip);
193 vtpms->vd_status = vd_status;
194 if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
195 vtpms->disconnect_time = jiffies;
199 /* =============================================================
200 * Interface with the generic TPM driver
201 * =============================================================
203 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
207 struct vtpm_state *vtpms;
209 vtpms = (struct vtpm_state *)chip_get_private(chip);
212 * Check if the previous operation only queued the command
213 * In this case there won't be a response, so I just
214 * return from here and reset that flag. In any other
215 * case I should receive a response from the back-end.
217 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
218 if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
219 vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
220 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
222 * The first few commands (measurements) must be
223 * queued since it might not be possible to talk to the
225 * Return a response of up to 30 '0's.
228 count = min_t(size_t, count, 30);
229 memset(buf, 0x0, count);
233 * Check whether something is in the responselist and if
234 * there's nothing in the list wait for something to appear.
237 if (!vtpms->current_response) {
238 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
239 interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
241 spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
244 if (vtpms->current_response) {
245 struct transmission *t = vtpms->current_response;
246 vtpms->current_response = NULL;
247 rc = min(count, t->response_len);
248 memcpy(buf, t->response, rc);
249 transmission_free(t);
252 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
256 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
260 struct transmission *t = transmission_alloc();
261 struct vtpm_state *vtpms;
263 vtpms = (struct vtpm_state *)chip_get_private(chip);
268 * If there's a current request, it must be the
269 * previous request that has timed out.
271 spin_lock_irqsave(&vtpms->req_list_lock, flags);
272 if (vtpms->current_request != NULL) {
273 printk("WARNING: Sending although there is a request outstanding.\n"
274 " Previous request must have timed out.\n");
275 transmission_free(vtpms->current_request);
276 vtpms->current_request = NULL;
278 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
281 * Queue the packet if the driver below is not
282 * ready, yet, or there is any packet already
284 * If the driver below is ready, unqueue all
285 * packets first before sending our current
287 * For each unqueued packet, except for the
288 * last (=current) packet, call the function
289 * tpm_xen_recv to wait for the response to come
292 if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
293 if (time_after(jiffies,
294 vtpms->disconnect_time + HZ * 10)) {
301 * Send all queued packets.
303 if (_vtpm_send_queued(chip) == 0) {
305 vtpms->current_request = t;
307 rc = vtpm_vd_send(vtpms->tpm_private,
312 * The generic TPM driver will call
313 * the function to receive the response.
316 vtpms->current_request = NULL;
321 if (!transmission_set_req_buffer(t, buf, count)) {
322 transmission_free(t);
327 * An error occurred. Don't event try
328 * to send the current request. Just
331 spin_lock_irqsave(&vtpms->req_list_lock, flags);
332 vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
333 list_add_tail(&t->next, &vtpms->queued_requests);
334 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
344 * Send all queued requests.
346 static int _vtpm_send_queued(struct tpm_chip *chip)
351 unsigned char buffer[1];
352 struct vtpm_state *vtpms;
353 vtpms = (struct vtpm_state *)chip_get_private(chip);
355 spin_lock_irqsave(&vtpms->req_list_lock, flags);
357 while (!list_empty(&vtpms->queued_requests)) {
359 * Need to dequeue them.
360 * Read the result into a dummy buffer.
362 struct transmission *qt = (struct transmission *)
363 vtpms->queued_requests.next;
365 vtpms->current_request = qt;
366 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
368 rc = vtpm_vd_send(vtpms->tpm_private,
374 spin_lock_irqsave(&vtpms->req_list_lock, flags);
375 if ((qt = vtpms->current_request) != NULL) {
377 * requeue it at the beginning
381 &vtpms->queued_requests);
383 vtpms->current_request = NULL;
388 * After this point qt is not valid anymore!
389 * It is freed when the front-end is delivering
390 * the data by calling tpm_recv
393 * Receive response into provided dummy buffer
395 rc = vtpm_recv(chip, buffer, sizeof(buffer));
396 spin_lock_irqsave(&vtpms->req_list_lock, flags);
399 spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
404 static void vtpm_cancel(struct tpm_chip *chip)
407 struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
409 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
411 if (!vtpms->current_response && vtpms->current_request) {
412 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
413 interruptible_sleep_on(&vtpms->resp_wait_queue);
414 spin_lock_irqsave(&vtpms->resp_list_lock,flags);
417 if (vtpms->current_response) {
418 struct transmission *t = vtpms->current_response;
419 vtpms->current_response = NULL;
420 transmission_free(t);
423 spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
426 static u8 vtpm_status(struct tpm_chip *chip)
430 struct vtpm_state *vtpms;
432 vtpms = (struct vtpm_state *)chip_get_private(chip);
434 spin_lock_irqsave(&vtpms->resp_list_lock, flags);
436 * Data are available if:
437 * - there's a current response
438 * - the last packet was queued only (this is fake, but necessary to
439 * get the generic TPM layer to call the receive function.)
441 if (vtpms->current_response ||
442 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
443 rc = STATUS_DATA_AVAIL;
444 } else if (!vtpms->current_response && !vtpms->current_request) {
448 spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
452 static struct file_operations vtpm_ops = {
453 .owner = THIS_MODULE,
458 .release = tpm_release,
461 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
462 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
463 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
464 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
465 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
466 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
468 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
469 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
471 static struct attribute *vtpm_attrs[] = {
472 &dev_attr_pubek.attr,
474 &dev_attr_enabled.attr,
475 &dev_attr_active.attr,
476 &dev_attr_owned.attr,
477 &dev_attr_temp_deactivated.attr,
479 &dev_attr_cancel.attr,
483 static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
485 #define TPM_LONG_TIMEOUT (10 * 60 * HZ)
487 static struct tpm_vendor_specific tpm_vtpm = {
490 .cancel = vtpm_cancel,
491 .status = vtpm_status,
492 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
493 .req_complete_val = STATUS_DATA_AVAIL,
494 .req_canceled = STATUS_READY,
495 .attr_group = &vtpm_attr_grp,
506 struct tpm_chip *init_vtpm(struct device *dev,
507 struct tpm_private *tp)
510 struct tpm_chip *chip;
511 struct vtpm_state *vtpms;
513 vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
515 return ERR_PTR(-ENOMEM);
517 vtpm_state_init(vtpms);
518 vtpms->tpm_private = tp;
520 chip = tpm_register_hardware(dev, &tpm_vtpm);
526 chip_set_private(chip, vtpms);
536 void cleanup_vtpm(struct device *dev)
538 struct tpm_chip *chip = dev_get_drvdata(dev);
539 struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
540 tpm_remove_hardware(dev);