firewire: rename source files
authorStefan Richter <stefanr@s5r6.in-berlin.de>
Fri, 5 Jun 2009 14:26:18 +0000 (16:26 +0200)
committerStefan Richter <stefanr@s5r6.in-berlin.de>
Fri, 5 Jun 2009 14:26:18 +0000 (16:26 +0200)
The source files of firewire-core, firewire-ohci, firewire-sbp2, i.e.
 "drivers/firewire/fw-*.c"
are renamed to
 "drivers/firewire/core-*.c",
 "drivers/firewire/ohci.c",
 "drivers/firewire/sbp2.c".

The old fw- prefix was redundant to the directory name.  The new core-
prefix distinguishes the files according to which driver they belong to.

This change comes a little late, but still before further firewire
drivers are added as anticipated RSN.

Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>

17 files changed:
drivers/firewire/Makefile
drivers/firewire/core-card.c [new file with mode: 0644]
drivers/firewire/core-cdev.c [new file with mode: 0644]
drivers/firewire/core-device.c [new file with mode: 0644]
drivers/firewire/core-iso.c [new file with mode: 0644]
drivers/firewire/core-topology.c [new file with mode: 0644]
drivers/firewire/core-transaction.c [new file with mode: 0644]
drivers/firewire/fw-card.c [deleted file]
drivers/firewire/fw-cdev.c [deleted file]
drivers/firewire/fw-device.c [deleted file]
drivers/firewire/fw-iso.c [deleted file]
drivers/firewire/fw-ohci.c [deleted file]
drivers/firewire/fw-sbp2.c [deleted file]
drivers/firewire/fw-topology.c [deleted file]
drivers/firewire/fw-transaction.c [deleted file]
drivers/firewire/ohci.c [new file with mode: 0644]
drivers/firewire/sbp2.c [new file with mode: 0644]

index a7c31e9..bc3b9bf 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the Linux IEEE 1394 implementation
 #
 
-firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
-                   fw-device.o fw-cdev.o
-firewire-ohci-y += fw-ohci.o
-firewire-sbp2-y += fw-sbp2.o
+firewire-core-y += core-card.o core-cdev.o core-device.o \
+                   core-iso.o core-topology.o core-transaction.o
+firewire-ohci-y += ohci.o
+firewire-sbp2-y += sbp2.o
 
 obj-$(CONFIG_FIREWIRE) += firewire-core.o
 obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
new file mode 100644 (file)
index 0000000..ba6cd70
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/crc-itu-t.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+int fw_compute_block_crc(u32 *block)
+{
+       __be32 be32_block[256];
+       int i, length;
+
+       length = (*block >> 16) & 0xff;
+       for (i = 0; i < length; i++)
+               be32_block[i] = cpu_to_be32(block[i + 1]);
+       *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
+
+       return length;
+}
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static LIST_HEAD(descriptor_list);
+static int descriptor_count;
+
+#define BIB_CRC(v)             ((v) <<  0)
+#define BIB_CRC_LENGTH(v)      ((v) << 16)
+#define BIB_INFO_LENGTH(v)     ((v) << 24)
+
+#define BIB_LINK_SPEED(v)      ((v) <<  0)
+#define BIB_GENERATION(v)      ((v) <<  4)
+#define BIB_MAX_ROM(v)         ((v) <<  8)
+#define BIB_MAX_RECEIVE(v)     ((v) << 12)
+#define BIB_CYC_CLK_ACC(v)     ((v) << 16)
+#define BIB_PMC                        ((1) << 27)
+#define BIB_BMC                        ((1) << 28)
+#define BIB_ISC                        ((1) << 29)
+#define BIB_CMC                        ((1) << 30)
+#define BIB_IMC                        ((1) << 31)
+
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+{
+       struct fw_descriptor *desc;
+       static u32 config_rom[256];
+       int i, j, length;
+
+       /*
+        * Initialize contents of config rom buffer.  On the OHCI
+        * controller, block reads to the config rom accesses the host
+        * memory, but quadlet read access the hardware bus info block
+        * registers.  That's just crack, but it means we should make
+        * sure the contents of bus info block in host memory matches
+        * the version stored in the OHCI registers.
+        */
+
+       memset(config_rom, 0, sizeof(config_rom));
+       config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
+       config_rom[1] = 0x31333934;
+
+       config_rom[2] =
+               BIB_LINK_SPEED(card->link_speed) |
+               BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
+               BIB_MAX_ROM(2) |
+               BIB_MAX_RECEIVE(card->max_receive) |
+               BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
+       config_rom[3] = card->guid >> 32;
+       config_rom[4] = card->guid;
+
+       /* Generate root directory. */
+       i = 5;
+       config_rom[i++] = 0;
+       config_rom[i++] = 0x0c0083c0; /* node capabilities */
+       j = i + descriptor_count;
+
+       /* Generate root directory entries for descriptors. */
+       list_for_each_entry (desc, &descriptor_list, link) {
+               if (desc->immediate > 0)
+                       config_rom[i++] = desc->immediate;
+               config_rom[i] = desc->key | (j - i);
+               i++;
+               j += desc->length;
+       }
+
+       /* Update root directory length. */
+       config_rom[5] = (i - 5 - 1) << 16;
+
+       /* End of root directory, now copy in descriptors. */
+       list_for_each_entry (desc, &descriptor_list, link) {
+               memcpy(&config_rom[i], desc->data, desc->length * 4);
+               i += desc->length;
+       }
+
+       /* Calculate CRCs for all blocks in the config rom.  This
+        * assumes that CRC length and info length are identical for
+        * the bus info block, which is always the case for this
+        * implementation. */
+       for (i = 0; i < j; i += length + 1)
+               length = fw_compute_block_crc(config_rom + i);
+
+       *config_rom_length = j;
+
+       return config_rom;
+}
+
+static void update_config_roms(void)
+{
+       struct fw_card *card;
+       u32 *config_rom;
+       size_t length;
+
+       list_for_each_entry (card, &card_list, link) {
+               config_rom = generate_config_rom(card, &length);
+               card->driver->set_config_rom(card, config_rom, length);
+       }
+}
+
+int fw_core_add_descriptor(struct fw_descriptor *desc)
+{
+       size_t i;
+
+       /*
+        * Check descriptor is valid; the length of all blocks in the
+        * descriptor has to add up to exactly the length of the
+        * block.
+        */
+       i = 0;
+       while (i < desc->length)
+               i += (desc->data[i] >> 16) + 1;
+
+       if (i != desc->length)
+               return -EINVAL;
+
+       mutex_lock(&card_mutex);
+
+       list_add_tail(&desc->link, &descriptor_list);
+       descriptor_count++;
+       if (desc->immediate > 0)
+               descriptor_count++;
+       update_config_roms();
+
+       mutex_unlock(&card_mutex);
+
+       return 0;
+}
+
+void fw_core_remove_descriptor(struct fw_descriptor *desc)
+{
+       mutex_lock(&card_mutex);
+
+       list_del(&desc->link);
+       descriptor_count--;
+       if (desc->immediate > 0)
+               descriptor_count--;
+       update_config_roms();
+
+       mutex_unlock(&card_mutex);
+}
+
+static int set_broadcast_channel(struct device *dev, void *data)
+{
+       fw_device_set_broadcast_channel(fw_device(dev), (long)data);
+       return 0;
+}
+
+static void allocate_broadcast_channel(struct fw_card *card, int generation)
+{
+       int channel, bandwidth = 0;
+
+       fw_iso_resource_manage(card, generation, 1ULL << 31,
+                              &channel, &bandwidth, true);
+       if (channel == 31) {
+               card->broadcast_channel_allocated = true;
+               device_for_each_child(card->device, (void *)(long)generation,
+                                     set_broadcast_channel);
+       }
+}
+
+static const char gap_count_table[] = {
+       63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+};
+
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+{
+       int scheduled;
+
+       fw_card_get(card);
+       scheduled = schedule_delayed_work(&card->work, delay);
+       if (!scheduled)
+               fw_card_put(card);
+}
+
+static void fw_card_bm_work(struct work_struct *work)
+{
+       struct fw_card *card = container_of(work, struct fw_card, work.work);
+       struct fw_device *root_device;
+       struct fw_node *root_node;
+       unsigned long flags;
+       int root_id, new_root_id, irm_id, local_id;
+       int gap_count, generation, grace, rcode;
+       bool do_reset = false;
+       bool root_device_is_running;
+       bool root_device_is_cmc;
+       __be32 lock_data[2];
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       if (card->local_node == NULL) {
+               spin_unlock_irqrestore(&card->lock, flags);
+               goto out_put_card;
+       }
+
+       generation = card->generation;
+       root_node = card->root_node;
+       fw_node_get(root_node);
+       root_device = root_node->data;
+       root_device_is_running = root_device &&
+                       atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+       root_device_is_cmc = root_device && root_device->cmc;
+       root_id  = root_node->node_id;
+       irm_id   = card->irm_node->node_id;
+       local_id = card->local_node->node_id;
+
+       grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+
+       if (is_next_generation(generation, card->bm_generation) ||
+           (card->bm_generation != generation && grace)) {
+               /*
+                * This first step is to figure out who is IRM and
+                * then try to become bus manager.  If the IRM is not
+                * well defined (e.g. does not have an active link
+                * layer or does not responds to our lock request, we
+                * will have to do a little vigilante bus management.
+                * In that case, we do a goto into the gap count logic
+                * so that when we do the reset, we still optimize the
+                * gap count.  That could well save a reset in the
+                * next generation.
+                */
+
+               if (!card->irm_node->link_on) {
+                       new_root_id = local_id;
+                       fw_notify("IRM has link off, making local node (%02x) root.\n",
+                                 new_root_id);
+                       goto pick_me;
+               }
+
+               lock_data[0] = cpu_to_be32(0x3f);
+               lock_data[1] = cpu_to_be32(local_id);
+
+               spin_unlock_irqrestore(&card->lock, flags);
+
+               rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                               irm_id, generation, SCODE_100,
+                               CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
+                               lock_data, sizeof(lock_data));
+
+               if (rcode == RCODE_GENERATION)
+                       /* Another bus reset, BM work has been rescheduled. */
+                       goto out;
+
+               if (rcode == RCODE_COMPLETE &&
+                   lock_data[0] != cpu_to_be32(0x3f)) {
+
+                       /* Somebody else is BM.  Only act as IRM. */
+                       if (local_id == irm_id)
+                               allocate_broadcast_channel(card, generation);
+
+                       goto out;
+               }
+
+               spin_lock_irqsave(&card->lock, flags);
+
+               if (rcode != RCODE_COMPLETE) {
+                       /*
+                        * The lock request failed, maybe the IRM
+                        * isn't really IRM capable after all. Let's
+                        * do a bus reset and pick the local node as
+                        * root, and thus, IRM.
+                        */
+                       new_root_id = local_id;
+                       fw_notify("BM lock failed, making local node (%02x) root.\n",
+                                 new_root_id);
+                       goto pick_me;
+               }
+       } else if (card->bm_generation != generation) {
+               /*
+                * We weren't BM in the last generation, and the last
+                * bus reset is less than 125ms ago.  Reschedule this job.
+                */
+               spin_unlock_irqrestore(&card->lock, flags);
+               fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
+               goto out;
+       }
+
+       /*
+        * We're bus manager for this generation, so next step is to
+        * make sure we have an active cycle master and do gap count
+        * optimization.
+        */
+       card->bm_generation = generation;
+
+       if (root_device == NULL) {
+               /*
+                * Either link_on is false, or we failed to read the
+                * config rom.  In either case, pick another root.
+                */
+               new_root_id = local_id;
+       } else if (!root_device_is_running) {
+               /*
+                * If we haven't probed this device yet, bail out now
+                * and let's try again once that's done.
+                */
+               spin_unlock_irqrestore(&card->lock, flags);
+               goto out;
+       } else if (root_device_is_cmc) {
+               /*
+                * FIXME: I suppose we should set the cmstr bit in the
+                * STATE_CLEAR register of this node, as described in
+                * 1394-1995, 8.4.2.6.  Also, send out a force root
+                * packet for this node.
+                */
+               new_root_id = root_id;
+       } else {
+               /*
+                * Current root has an active link layer and we
+                * successfully read the config rom, but it's not
+                * cycle master capable.
+                */
+               new_root_id = local_id;
+       }
+
+ pick_me:
+       /*
+        * Pick a gap count from 1394a table E-1.  The table doesn't cover
+        * the typically much larger 1394b beta repeater delays though.
+        */
+       if (!card->beta_repeaters_present &&
+           root_node->max_hops < ARRAY_SIZE(gap_count_table))
+               gap_count = gap_count_table[root_node->max_hops];
+       else
+               gap_count = 63;
+
+       /*
+        * Finally, figure out if we should do a reset or not.  If we have
+        * done less than 5 resets with the same physical topology and we
+        * have either a new root or a new gap count setting, let's do it.
+        */
+
+       if (card->bm_retries++ < 5 &&
+           (card->gap_count != gap_count || new_root_id != root_id))
+               do_reset = true;
+
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (do_reset) {
+               fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
+                         card->index, new_root_id, gap_count);
+               fw_send_phy_config(card, new_root_id, generation, gap_count);
+               fw_core_initiate_bus_reset(card, 1);
+               /* Will allocate broadcast channel after the reset. */
+       } else {
+               if (local_id == irm_id)
+                       allocate_broadcast_channel(card, generation);
+       }
+
+ out:
+       fw_node_put(root_node);
+ out_put_card:
+       fw_card_put(card);
+}
+
+static void flush_timer_callback(unsigned long data)
+{
+       struct fw_card *card = (struct fw_card *)data;
+
+       fw_flush_transactions(card);
+}
+
+void fw_card_initialize(struct fw_card *card,
+                       const struct fw_card_driver *driver,
+                       struct device *device)
+{
+       static atomic_t index = ATOMIC_INIT(-1);
+
+       card->index = atomic_inc_return(&index);
+       card->driver = driver;
+       card->device = device;
+       card->current_tlabel = 0;
+       card->tlabel_mask = 0;
+       card->color = 0;
+       card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
+
+       kref_init(&card->kref);
+       init_completion(&card->done);
+       INIT_LIST_HEAD(&card->transaction_list);
+       spin_lock_init(&card->lock);
+       setup_timer(&card->flush_timer,
+                   flush_timer_callback, (unsigned long)card);
+
+       card->local_node = NULL;
+
+       INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+}
+EXPORT_SYMBOL(fw_card_initialize);
+
+int fw_card_add(struct fw_card *card,
+               u32 max_receive, u32 link_speed, u64 guid)
+{
+       u32 *config_rom;
+       size_t length;
+       int ret;
+
+       card->max_receive = max_receive;
+       card->link_speed = link_speed;
+       card->guid = guid;
+
+       mutex_lock(&card_mutex);
+       config_rom = generate_config_rom(card, &length);
+       list_add_tail(&card->link, &card_list);
+       mutex_unlock(&card_mutex);
+
+       ret = card->driver->enable(card, config_rom, length);
+       if (ret < 0) {
+               mutex_lock(&card_mutex);
+               list_del(&card->link);
+               mutex_unlock(&card_mutex);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(fw_card_add);
+
+
+/*
+ * The next few functions implements a dummy driver that use once a
+ * card driver shuts down an fw_card.  This allows the driver to
+ * cleanly unload, as all IO to the card will be handled by the dummy
+ * driver instead of calling into the (possibly) unloaded module.  The
+ * dummy driver just fails all IO.
+ */
+
+static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+       BUG();
+       return -1;
+}
+
+static int dummy_update_phy_reg(struct fw_card *card, int address,
+                               int clear_bits, int set_bits)
+{
+       return -ENODEV;
+}
+
+static int dummy_set_config_rom(struct fw_card *card,
+                               u32 *config_rom, size_t length)
+{
+       /*
+        * We take the card out of card_list before setting the dummy
+        * driver, so this should never get called.
+        */
+       BUG();
+       return -1;
+}
+
+static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+       packet->callback(packet, card, -ENODEV);
+}
+
+static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+       packet->callback(packet, card, -ENODEV);
+}
+
+static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+       return -ENOENT;
+}
+
+static int dummy_enable_phys_dma(struct fw_card *card,
+                                int node_id, int generation)
+{
+       return -ENODEV;
+}
+
+static struct fw_card_driver dummy_driver = {
+       .enable          = dummy_enable,
+       .update_phy_reg  = dummy_update_phy_reg,
+       .set_config_rom  = dummy_set_config_rom,
+       .send_request    = dummy_send_request,
+       .cancel_packet   = dummy_cancel_packet,
+       .send_response   = dummy_send_response,
+       .enable_phys_dma = dummy_enable_phys_dma,
+};
+
+void fw_card_release(struct kref *kref)
+{
+       struct fw_card *card = container_of(kref, struct fw_card, kref);
+
+       complete(&card->done);
+}
+
+void fw_core_remove_card(struct fw_card *card)
+{
+       card->driver->update_phy_reg(card, 4,
+                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+       fw_core_initiate_bus_reset(card, 1);
+
+       mutex_lock(&card_mutex);
+       list_del_init(&card->link);
+       mutex_unlock(&card_mutex);
+
+       /* Set up the dummy driver. */
+       card->driver = &dummy_driver;
+
+       fw_destroy_nodes(card);
+
+       /* Wait for all users, especially device workqueue jobs, to finish. */
+       fw_card_put(card);
+       wait_for_completion(&card->done);
+
+       WARN_ON(!list_empty(&card->transaction_list));
+       del_timer_sync(&card->flush_timer);
+}
+EXPORT_SYMBOL(fw_core_remove_card);
+
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+{
+       int reg = short_reset ? 5 : 1;
+       int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+       return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
new file mode 100644 (file)
index 0000000..042c045
--- /dev/null
@@ -0,0 +1,1458 @@
+/*
+ * Char device for device raw access
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/compat.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-cdev.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "core.h"
+
+struct client {
+       u32 version;
+       struct fw_device *device;
+
+       spinlock_t lock;
+       bool in_shutdown;
+       struct idr resource_idr;
+       struct list_head event_list;
+       wait_queue_head_t wait;
+       u64 bus_reset_closure;
+
+       struct fw_iso_context *iso_context;
+       u64 iso_closure;
+       struct fw_iso_buffer buffer;
+       unsigned long vm_start;
+
+       struct list_head link;
+       struct kref kref;
+};
+
+static inline void client_get(struct client *client)
+{
+       kref_get(&client->kref);
+}
+
+static void client_release(struct kref *kref)
+{
+       struct client *client = container_of(kref, struct client, kref);
+
+       fw_device_put(client->device);
+       kfree(client);
+}
+
+static void client_put(struct client *client)
+{
+       kref_put(&client->kref, client_release);
+}
+
+struct client_resource;
+typedef void (*client_resource_release_fn_t)(struct client *,
+                                            struct client_resource *);
+struct client_resource {
+       client_resource_release_fn_t release;
+       int handle;
+};
+
+struct address_handler_resource {
+       struct client_resource resource;
+       struct fw_address_handler handler;
+       __u64 closure;
+       struct client *client;
+};
+
+struct outbound_transaction_resource {
+       struct client_resource resource;
+       struct fw_transaction transaction;
+};
+
+struct inbound_transaction_resource {
+       struct client_resource resource;
+       struct fw_request *request;
+       void *data;
+       size_t length;
+};
+
+struct descriptor_resource {
+       struct client_resource resource;
+       struct fw_descriptor descriptor;
+       u32 data[0];
+};
+
+struct iso_resource {
+       struct client_resource resource;
+       struct client *client;
+       /* Schedule work and access todo only with client->lock held. */
+       struct delayed_work work;
+       enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
+             ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
+       int generation;
+       u64 channels;
+       s32 bandwidth;
+       struct iso_resource_event *e_alloc, *e_dealloc;
+};
+
+static void schedule_iso_resource(struct iso_resource *);
+static void release_iso_resource(struct client *, struct client_resource *);
+
+/*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+ * the first field in a struct XYZ_event.
+ */
+struct event {
+       struct { void *data; size_t size; } v[2];
+       struct list_head link;
+};
+
+struct bus_reset_event {
+       struct event event;
+       struct fw_cdev_event_bus_reset reset;
+};
+
+struct outbound_transaction_event {
+       struct event event;
+       struct client *client;
+       struct outbound_transaction_resource r;
+       struct fw_cdev_event_response response;
+};
+
+struct inbound_transaction_event {
+       struct event event;
+       struct fw_cdev_event_request request;
+};
+
+struct iso_interrupt_event {
+       struct event event;
+       struct fw_cdev_event_iso_interrupt interrupt;
+};
+
+struct iso_resource_event {
+       struct event event;
+       struct fw_cdev_event_iso_resource resource;
+};
+
+static inline void __user *u64_to_uptr(__u64 value)
+{
+       return (void __user *)(unsigned long)value;
+}
+
+static inline __u64 uptr_to_u64(void __user *ptr)
+{
+       return (__u64)(unsigned long)ptr;
+}
+
+static int fw_device_op_open(struct inode *inode, struct file *file)
+{
+       struct fw_device *device;
+       struct client *client;
+
+       device = fw_device_get_by_devt(inode->i_rdev);
+       if (device == NULL)
+               return -ENODEV;
+
+       if (fw_device_is_shutdown(device)) {
+               fw_device_put(device);
+               return -ENODEV;
+       }
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (client == NULL) {
+               fw_device_put(device);
+               return -ENOMEM;
+       }
+
+       client->device = device;
+       spin_lock_init(&client->lock);
+       idr_init(&client->resource_idr);
+       INIT_LIST_HEAD(&client->event_list);
+       init_waitqueue_head(&client->wait);
+       kref_init(&client->kref);
+
+       file->private_data = client;
+
+       mutex_lock(&device->client_list_mutex);
+       list_add_tail(&client->link, &device->client_list);
+       mutex_unlock(&device->client_list_mutex);
+
+       return 0;
+}
+
+static void queue_event(struct client *client, struct event *event,
+                       void *data0, size_t size0, void *data1, size_t size1)
+{
+       unsigned long flags;
+
+       event->v[0].data = data0;
+       event->v[0].size = size0;
+       event->v[1].data = data1;
+       event->v[1].size = size1;
+
+       spin_lock_irqsave(&client->lock, flags);
+       if (client->in_shutdown)
+               kfree(event);
+       else
+               list_add_tail(&event->link, &client->event_list);
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       wake_up_interruptible(&client->wait);
+}
+
+static int dequeue_event(struct client *client,
+                        char __user *buffer, size_t count)
+{
+       struct event *event;
+       size_t size, total;
+       int i, ret;
+
+       ret = wait_event_interruptible(client->wait,
+                       !list_empty(&client->event_list) ||
+                       fw_device_is_shutdown(client->device));
+       if (ret < 0)
+               return ret;
+
+       if (list_empty(&client->event_list) &&
+                      fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       spin_lock_irq(&client->lock);
+       event = list_first_entry(&client->event_list, struct event, link);
+       list_del(&event->link);
+       spin_unlock_irq(&client->lock);
+
+       total = 0;
+       for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
+               size = min(event->v[i].size, count - total);
+               if (copy_to_user(buffer + total, event->v[i].data, size)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               total += size;
+       }
+       ret = total;
+
+ out:
+       kfree(event);
+
+       return ret;
+}
+
+static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
+                                size_t count, loff_t *offset)
+{
+       struct client *client = file->private_data;
+
+       return dequeue_event(client, buffer, count);
+}
+
+static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+                                struct client *client)
+{
+       struct fw_card *card = client->device->card;
+
+       spin_lock_irq(&card->lock);
+
+       event->closure       = client->bus_reset_closure;
+       event->type          = FW_CDEV_EVENT_BUS_RESET;
+       event->generation    = client->device->generation;
+       event->node_id       = client->device->node_id;
+       event->local_node_id = card->local_node->node_id;
+       event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
+       event->irm_node_id   = card->irm_node->node_id;
+       event->root_node_id  = card->root_node->node_id;
+
+       spin_unlock_irq(&card->lock);
+}
+
+static void for_each_client(struct fw_device *device,
+                           void (*callback)(struct client *client))
+{
+       struct client *c;
+
+       mutex_lock(&device->client_list_mutex);
+       list_for_each_entry(c, &device->client_list, link)
+               callback(c);
+       mutex_unlock(&device->client_list_mutex);
+}
+
+static int schedule_reallocations(int id, void *p, void *data)
+{
+       struct client_resource *r = p;
+
+       if (r->release == release_iso_resource)
+               schedule_iso_resource(container_of(r,
+                                       struct iso_resource, resource));
+       return 0;
+}
+
+static void queue_bus_reset_event(struct client *client)
+{
+       struct bus_reset_event *e;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (e == NULL) {
+               fw_notify("Out of memory when allocating bus reset event\n");
+               return;
+       }
+
+       fill_bus_reset_event(&e->reset, client);
+
+       queue_event(client, &e->event,
+                   &e->reset, sizeof(e->reset), NULL, 0);
+
+       spin_lock_irq(&client->lock);
+       idr_for_each(&client->resource_idr, schedule_reallocations, client);
+       spin_unlock_irq(&client->lock);
+}
+
+void fw_device_cdev_update(struct fw_device *device)
+{
+       for_each_client(device, queue_bus_reset_event);
+}
+
+static void wake_up_client(struct client *client)
+{
+       wake_up_interruptible(&client->wait);
+}
+
+void fw_device_cdev_remove(struct fw_device *device)
+{
+       for_each_client(device, wake_up_client);
+}
+
+static int ioctl_get_info(struct client *client, void *buffer)
+{
+       struct fw_cdev_get_info *get_info = buffer;
+       struct fw_cdev_event_bus_reset bus_reset;
+       unsigned long ret = 0;
+
+       client->version = get_info->version;
+       get_info->version = FW_CDEV_VERSION;
+       get_info->card = client->device->card->index;
+
+       down_read(&fw_device_rwsem);
+
+       if (get_info->rom != 0) {
+               void __user *uptr = u64_to_uptr(get_info->rom);
+               size_t want = get_info->rom_length;
+               size_t have = client->device->config_rom_length * 4;
+
+               ret = copy_to_user(uptr, client->device->config_rom,
+                                  min(want, have));
+       }
+       get_info->rom_length = client->device->config_rom_length * 4;
+
+       up_read(&fw_device_rwsem);
+
+       if (ret != 0)
+               return -EFAULT;
+
+       client->bus_reset_closure = get_info->bus_reset_closure;
+       if (get_info->bus_reset != 0) {
+               void __user *uptr = u64_to_uptr(get_info->bus_reset);
+
+               fill_bus_reset_event(&bus_reset, client);
+               if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int add_client_resource(struct client *client,
+                              struct client_resource *resource, gfp_t gfp_mask)
+{
+       unsigned long flags;
+       int ret;
+
+ retry:
+       if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&client->lock, flags);
+       if (client->in_shutdown)
+               ret = -ECANCELED;
+       else
+               ret = idr_get_new(&client->resource_idr, resource,
+                                 &resource->handle);
+       if (ret >= 0) {
+               client_get(client);
+               if (resource->release == release_iso_resource)
+                       schedule_iso_resource(container_of(resource,
+                                               struct iso_resource, resource));
+       }
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       if (ret == -EAGAIN)
+               goto retry;
+
+       return ret < 0 ? ret : 0;
+}
+
+static int release_client_resource(struct client *client, u32 handle,
+                                  client_resource_release_fn_t release,
+                                  struct client_resource **resource)
+{
+       struct client_resource *r;
+
+       spin_lock_irq(&client->lock);
+       if (client->in_shutdown)
+               r = NULL;
+       else
+               r = idr_find(&client->resource_idr, handle);
+       if (r && r->release == release)
+               idr_remove(&client->resource_idr, handle);
+       spin_unlock_irq(&client->lock);
+
+       if (!(r && r->release == release))
+               return -EINVAL;
+
+       if (resource)
+               *resource = r;
+       else
+               r->release(client, r);
+
+       client_put(client);
+
+       return 0;
+}
+
+static void release_transaction(struct client *client,
+                               struct client_resource *resource)
+{
+       struct outbound_transaction_resource *r = container_of(resource,
+                       struct outbound_transaction_resource, resource);
+
+       fw_cancel_transaction(client->device->card, &r->transaction);
+}
+
+static void complete_transaction(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct outbound_transaction_event *e = data;
+       struct fw_cdev_event_response *rsp = &e->response;
+       struct client *client = e->client;
+       unsigned long flags;
+
+       if (length < rsp->length)
+               rsp->length = length;
+       if (rcode == RCODE_COMPLETE)
+               memcpy(rsp->data, payload, rsp->length);
+
+       spin_lock_irqsave(&client->lock, flags);
+       /*
+        * 1. If called while in shutdown, the idr tree must be left untouched.
+        *    The idr handle will be removed and the client reference will be
+        *    dropped later.
+        * 2. If the call chain was release_client_resource ->
+        *    release_transaction -> complete_transaction (instead of a normal
+        *    conclusion of the transaction), i.e. if this resource was already
+        *    unregistered from the idr, the client reference will be dropped
+        *    by release_client_resource and we must not drop it here.
+        */
+       if (!client->in_shutdown &&
+           idr_find(&client->resource_idr, e->r.resource.handle)) {
+               idr_remove(&client->resource_idr, e->r.resource.handle);
+               /* Drop the idr's reference */
+               client_put(client);
+       }
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       rsp->type = FW_CDEV_EVENT_RESPONSE;
+       rsp->rcode = rcode;
+
+       /*
+        * In the case that sizeof(*rsp) doesn't align with the position of the
+        * data, and the read is short, preserve an extra copy of the data
+        * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
+        * for short reads and some apps depended on it, this is both safe
+        * and prudent for compatibility.
+        */
+       if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
+               queue_event(client, &e->event, rsp, sizeof(*rsp),
+                           rsp->data, rsp->length);
+       else
+               queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
+                           NULL, 0);
+
+       /* Drop the transaction callback's reference */
+       client_put(client);
+}
+
+static int init_request(struct client *client,
+                       struct fw_cdev_send_request *request,
+                       int destination_id, int speed)
+{
+       struct outbound_transaction_event *e;
+       int ret;
+
+       if (request->tcode != TCODE_STREAM_DATA &&
+           (request->length > 4096 || request->length > 512 << speed))
+               return -EIO;
+
+       e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+
+       e->client = client;
+       e->response.length = request->length;
+       e->response.closure = request->closure;
+
+       if (request->data &&
+           copy_from_user(e->response.data,
+                          u64_to_uptr(request->data), request->length)) {
+               ret = -EFAULT;
+               goto failed;
+       }
+
+       e->r.resource.release = release_transaction;
+       ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
+       if (ret < 0)
+               goto failed;
+
+       /* Get a reference for the transaction callback */
+       client_get(client);
+
+       fw_send_request(client->device->card, &e->r.transaction,
+                       request->tcode, destination_id, request->generation,
+                       speed, request->offset, e->response.data,
+                       request->length, complete_transaction, e);
+       return 0;
+
+ failed:
+       kfree(e);
+
+       return ret;
+}
+
+static int ioctl_send_request(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_request *request = buffer;
+
+       switch (request->tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_READ_QUADLET_REQUEST:
+       case TCODE_READ_BLOCK_REQUEST:
+       case TCODE_LOCK_MASK_SWAP:
+       case TCODE_LOCK_COMPARE_SWAP:
+       case TCODE_LOCK_FETCH_ADD:
+       case TCODE_LOCK_LITTLE_ADD:
+       case TCODE_LOCK_BOUNDED_ADD:
+       case TCODE_LOCK_WRAP_ADD:
+       case TCODE_LOCK_VENDOR_DEPENDENT:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return init_request(client, request, client->device->node_id,
+                           client->device->max_speed);
+}
+
+static void release_request(struct client *client,
+                           struct client_resource *resource)
+{
+       struct inbound_transaction_resource *r = container_of(resource,
+                       struct inbound_transaction_resource, resource);
+
+       fw_send_response(client->device->card, r->request,
+                        RCODE_CONFLICT_ERROR);
+       kfree(r);
+}
+
+static void handle_request(struct fw_card *card, struct fw_request *request,
+                          int tcode, int destination, int source,
+                          int generation, int speed,
+                          unsigned long long offset,
+                          void *payload, size_t length, void *callback_data)
+{
+       struct address_handler_resource *handler = callback_data;
+       struct inbound_transaction_resource *r;
+       struct inbound_transaction_event *e;
+       int ret;
+
+       r = kmalloc(sizeof(*r), GFP_ATOMIC);
+       e = kmalloc(sizeof(*e), GFP_ATOMIC);
+       if (r == NULL || e == NULL)
+               goto failed;
+
+       r->request = request;
+       r->data    = payload;
+       r->length  = length;
+
+       r->resource.release = release_request;
+       ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
+       if (ret < 0)
+               goto failed;
+
+       e->request.type    = FW_CDEV_EVENT_REQUEST;
+       e->request.tcode   = tcode;
+       e->request.offset  = offset;
+       e->request.length  = length;
+       e->request.handle  = r->resource.handle;
+       e->request.closure = handler->closure;
+
+       queue_event(handler->client, &e->event,
+                   &e->request, sizeof(e->request), payload, length);
+       return;
+
+ failed:
+       kfree(r);
+       kfree(e);
+       fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+}
+
+static void release_address_handler(struct client *client,
+                                   struct client_resource *resource)
+{
+       struct address_handler_resource *r =
+           container_of(resource, struct address_handler_resource, resource);
+
+       fw_core_remove_address_handler(&r->handler);
+       kfree(r);
+}
+
+static int ioctl_allocate(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate *request = buffer;
+       struct address_handler_resource *r;
+       struct fw_address_region region;
+       int ret;
+
+       r = kmalloc(sizeof(*r), GFP_KERNEL);
+       if (r == NULL)
+               return -ENOMEM;
+
+       region.start = request->offset;
+       region.end = request->offset + request->length;
+       r->handler.length = request->length;
+       r->handler.address_callback = handle_request;
+       r->handler.callback_data = r;
+       r->closure = request->closure;
+       r->client = client;
+
+       ret = fw_core_add_address_handler(&r->handler, &region);
+       if (ret < 0) {
+               kfree(r);
+               return ret;
+       }
+
+       r->resource.release = release_address_handler;
+       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+       if (ret < 0) {
+               release_address_handler(client, &r->resource);
+               return ret;
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+}
+
+static int ioctl_deallocate(struct client *client, void *buffer)
+{
+       struct fw_cdev_deallocate *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_address_handler, NULL);
+}
+
+static int ioctl_send_response(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_response *request = buffer;
+       struct client_resource *resource;
+       struct inbound_transaction_resource *r;
+
+       if (release_client_resource(client, request->handle,
+                                   release_request, &resource) < 0)
+               return -EINVAL;
+
+       r = container_of(resource, struct inbound_transaction_resource,
+                        resource);
+       if (request->length < r->length)
+               r->length = request->length;
+       if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
+               return -EFAULT;
+
+       fw_send_response(client->device->card, r->request, request->rcode);
+       kfree(r);
+
+       return 0;
+}
+
+static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+{
+       struct fw_cdev_initiate_bus_reset *request = buffer;
+       int short_reset;
+
+       short_reset = (request->type == FW_CDEV_SHORT_RESET);
+
+       return fw_core_initiate_bus_reset(client->device->card, short_reset);
+}
+
+static void release_descriptor(struct client *client,
+                              struct client_resource *resource)
+{
+       struct descriptor_resource *r =
+               container_of(resource, struct descriptor_resource, resource);
+
+       fw_core_remove_descriptor(&r->descriptor);
+       kfree(r);
+}
+
+static int ioctl_add_descriptor(struct client *client, void *buffer)
+{
+       struct fw_cdev_add_descriptor *request = buffer;
+       struct descriptor_resource *r;
+       int ret;
+
+       /* Access policy: Allow this ioctl only on local nodes' device files. */
+       if (!client->device->is_local)
+               return -ENOSYS;
+
+       if (request->length > 256)
+               return -EINVAL;
+
+       r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+       if (r == NULL)
+               return -ENOMEM;
+
+       if (copy_from_user(r->data,
+                          u64_to_uptr(request->data), request->length * 4)) {
+               ret = -EFAULT;
+               goto failed;
+       }
+
+       r->descriptor.length    = request->length;
+       r->descriptor.immediate = request->immediate;
+       r->descriptor.key       = request->key;
+       r->descriptor.data      = r->data;
+
+       ret = fw_core_add_descriptor(&r->descriptor);
+       if (ret < 0)
+               goto failed;
+
+       r->resource.release = release_descriptor;
+       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+       if (ret < 0) {
+               fw_core_remove_descriptor(&r->descriptor);
+               goto failed;
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+ failed:
+       kfree(r);
+
+       return ret;
+}
+
+static int ioctl_remove_descriptor(struct client *client, void *buffer)
+{
+       struct fw_cdev_remove_descriptor *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_descriptor, NULL);
+}
+
+static void iso_callback(struct fw_iso_context *context, u32 cycle,
+                        size_t header_length, void *header, void *data)
+{
+       struct client *client = data;
+       struct iso_interrupt_event *e;
+
+       e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+       if (e == NULL)
+               return;
+
+       e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
+       e->interrupt.closure   = client->iso_closure;
+       e->interrupt.cycle     = cycle;
+       e->interrupt.header_length = header_length;
+       memcpy(e->interrupt.header, header, header_length);
+       queue_event(client, &e->event, &e->interrupt,
+                   sizeof(e->interrupt) + header_length, NULL, 0);
+}
+
+static int ioctl_create_iso_context(struct client *client, void *buffer)
+{
+       struct fw_cdev_create_iso_context *request = buffer;
+       struct fw_iso_context *context;
+
+       /* We only support one context at this time. */
+       if (client->iso_context != NULL)
+               return -EBUSY;
+
+       if (request->channel > 63)
+               return -EINVAL;
+
+       switch (request->type) {
+       case FW_ISO_CONTEXT_RECEIVE:
+               if (request->header_size < 4 || (request->header_size & 3))
+                       return -EINVAL;
+
+               break;
+
+       case FW_ISO_CONTEXT_TRANSMIT:
+               if (request->speed > SCODE_3200)
+                       return -EINVAL;
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       context =  fw_iso_context_create(client->device->card,
+                                        request->type,
+                                        request->channel,
+                                        request->speed,
+                                        request->header_size,
+                                        iso_callback, client);
+       if (IS_ERR(context))
+               return PTR_ERR(context);
+
+       client->iso_closure = request->closure;
+       client->iso_context = context;
+
+       /* We only support one context at this time. */
+       request->handle = 0;
+
+       return 0;
+}
+
+/* Macros for decoding the iso packet control header. */
+#define GET_PAYLOAD_LENGTH(v)  ((v) & 0xffff)
+#define GET_INTERRUPT(v)       (((v) >> 16) & 0x01)
+#define GET_SKIP(v)            (((v) >> 17) & 0x01)
+#define GET_TAG(v)             (((v) >> 18) & 0x03)
+#define GET_SY(v)              (((v) >> 20) & 0x0f)
+#define GET_HEADER_LENGTH(v)   (((v) >> 24) & 0xff)
+
+static int ioctl_queue_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_queue_iso *request = buffer;
+       struct fw_cdev_iso_packet __user *p, *end, *next;
+       struct fw_iso_context *ctx = client->iso_context;
+       unsigned long payload, buffer_end, header_length;
+       u32 control;
+       int count;
+       struct {
+               struct fw_iso_packet packet;
+               u8 header[256];
+       } u;
+
+       if (ctx == NULL || request->handle != 0)
+               return -EINVAL;
+
+       /*
+        * If the user passes a non-NULL data pointer, has mmap()'ed
+        * the iso buffer, and the pointer points inside the buffer,
+        * we setup the payload pointers accordingly.  Otherwise we
+        * set them both to 0, which will still let packets with
+        * payload_length == 0 through.  In other words, if no packets
+        * use the indirect payload, the iso buffer need not be mapped
+        * and the request->data pointer is ignored.
+        */
+
+       payload = (unsigned long)request->data - client->vm_start;
+       buffer_end = client->buffer.page_count << PAGE_SHIFT;
+       if (request->data == 0 || client->buffer.pages == NULL ||
+           payload >= buffer_end) {
+               payload = 0;
+               buffer_end = 0;
+       }
+
+       p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+
+       if (!access_ok(VERIFY_READ, p, request->size))
+               return -EFAULT;
+
+       end = (void __user *)p + request->size;
+       count = 0;
+       while (p < end) {
+               if (get_user(control, &p->control))
+                       return -EFAULT;
+               u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
+               u.packet.interrupt = GET_INTERRUPT(control);
+               u.packet.skip = GET_SKIP(control);
+               u.packet.tag = GET_TAG(control);
+               u.packet.sy = GET_SY(control);
+               u.packet.header_length = GET_HEADER_LENGTH(control);
+
+               if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+                       header_length = u.packet.header_length;
+               } else {
+                       /*
+                        * We require that header_length is a multiple of
+                        * the fixed header size, ctx->header_size.
+                        */
+                       if (ctx->header_size == 0) {
+                               if (u.packet.header_length > 0)
+                                       return -EINVAL;
+                       } else if (u.packet.header_length % ctx->header_size != 0) {
+                               return -EINVAL;
+                       }
+                       header_length = 0;
+               }
+
+               next = (struct fw_cdev_iso_packet __user *)
+                       &p->header[header_length / 4];
+               if (next > end)
+                       return -EINVAL;
+               if (__copy_from_user
+                   (u.packet.header, p->header, header_length))
+                       return -EFAULT;
+               if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+                   u.packet.header_length + u.packet.payload_length > 0)
+                       return -EINVAL;
+               if (payload + u.packet.payload_length > buffer_end)
+                       return -EINVAL;
+
+               if (fw_iso_context_queue(ctx, &u.packet,
+                                        &client->buffer, payload))
+                       break;
+
+               p = next;
+               payload += u.packet.payload_length;
+               count++;
+       }
+
+       request->size    -= uptr_to_u64(p) - request->packets;
+       request->packets  = uptr_to_u64(p);
+       request->data     = client->vm_start + payload;
+
+       return count;
+}
+
+static int ioctl_start_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_start_iso *request = buffer;
+
+       if (client->iso_context == NULL || request->handle != 0)
+               return -EINVAL;
+
+       if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
+               if (request->tags == 0 || request->tags > 15)
+                       return -EINVAL;
+
+               if (request->sync > 15)
+                       return -EINVAL;
+       }
+
+       return fw_iso_context_start(client->iso_context, request->cycle,
+                                   request->sync, request->tags);
+}
+
+static int ioctl_stop_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_stop_iso *request = buffer;
+
+       if (client->iso_context == NULL || request->handle != 0)
+               return -EINVAL;
+
+       return fw_iso_context_stop(client->iso_context);
+}
+
+static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+{
+       struct fw_cdev_get_cycle_timer *request = buffer;
+       struct fw_card *card = client->device->card;
+       unsigned long long bus_time;
+       struct timeval tv;
+       unsigned long flags;
+
+       preempt_disable();
+       local_irq_save(flags);
+
+       bus_time = card->driver->get_bus_time(card);
+       do_gettimeofday(&tv);
+
+       local_irq_restore(flags);
+       preempt_enable();
+
+       request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
+       request->cycle_timer = bus_time & 0xffffffff;
+       return 0;
+}
+
+static void iso_resource_work(struct work_struct *work)
+{
+       struct iso_resource_event *e;
+       struct iso_resource *r =
+                       container_of(work, struct iso_resource, work.work);
+       struct client *client = r->client;
+       int generation, channel, bandwidth, todo;
+       bool skip, free, success;
+
+       spin_lock_irq(&client->lock);
+       generation = client->device->generation;
+       todo = r->todo;
+       /* Allow 1000ms grace period for other reallocations. */
+       if (todo == ISO_RES_ALLOC &&
+           time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+               if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
+                       client_get(client);
+               skip = true;
+       } else {
+               /* We could be called twice within the same generation. */
+               skip = todo == ISO_RES_REALLOC &&
+                      r->generation == generation;
+       }
+       free = todo == ISO_RES_DEALLOC ||
+              todo == ISO_RES_ALLOC_ONCE ||
+              todo == ISO_RES_DEALLOC_ONCE;
+       r->generation = generation;
+       spin_unlock_irq(&client->lock);
+
+       if (skip)
+               goto out;
+
+       bandwidth = r->bandwidth;
+
+       fw_iso_resource_manage(client->device->card, generation,
+                       r->channels, &channel, &bandwidth,
+                       todo == ISO_RES_ALLOC ||
+                       todo == ISO_RES_REALLOC ||
+                       todo == ISO_RES_ALLOC_ONCE);
+       /*
+        * Is this generation outdated already?  As long as this resource sticks
+        * in the idr, it will be scheduled again for a newer generation or at
+        * shutdown.
+        */
+       if (channel == -EAGAIN &&
+           (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
+               goto out;
+
+       success = channel >= 0 || bandwidth > 0;
+
+       spin_lock_irq(&client->lock);
+       /*
+        * Transit from allocation to reallocation, except if the client
+        * requested deallocation in the meantime.
+        */
+       if (r->todo == ISO_RES_ALLOC)
+               r->todo = ISO_RES_REALLOC;
+       /*
+        * Allocation or reallocation failure?  Pull this resource out of the
+        * idr and prepare for deletion, unless the client is shutting down.
+        */
+       if (r->todo == ISO_RES_REALLOC && !success &&
+           !client->in_shutdown &&
+           idr_find(&client->resource_idr, r->resource.handle)) {
+               idr_remove(&client->resource_idr, r->resource.handle);
+               client_put(client);
+               free = true;
+       }
+       spin_unlock_irq(&client->lock);
+
+       if (todo == ISO_RES_ALLOC && channel >= 0)
+               r->channels = 1ULL << channel;
+
+       if (todo == ISO_RES_REALLOC && success)
+               goto out;
+
+       if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
+               e = r->e_alloc;
+               r->e_alloc = NULL;
+       } else {
+               e = r->e_dealloc;
+               r->e_dealloc = NULL;
+       }
+       e->resource.handle      = r->resource.handle;
+       e->resource.channel     = channel;
+       e->resource.bandwidth   = bandwidth;
+
+       queue_event(client, &e->event,
+                   &e->resource, sizeof(e->resource), NULL, 0);
+
+       if (free) {
+               cancel_delayed_work(&r->work);
+               kfree(r->e_alloc);
+               kfree(r->e_dealloc);
+               kfree(r);
+       }
+ out:
+       client_put(client);
+}
+
+static void schedule_iso_resource(struct iso_resource *r)
+{
+       client_get(r->client);
+       if (!schedule_delayed_work(&r->work, 0))
+               client_put(r->client);
+}
+
+static void release_iso_resource(struct client *client,
+                                struct client_resource *resource)
+{
+       struct iso_resource *r =
+               container_of(resource, struct iso_resource, resource);
+
+       spin_lock_irq(&client->lock);
+       r->todo = ISO_RES_DEALLOC;
+       schedule_iso_resource(r);
+       spin_unlock_irq(&client->lock);
+}
+
+static int init_iso_resource(struct client *client,
+               struct fw_cdev_allocate_iso_resource *request, int todo)
+{
+       struct iso_resource_event *e1, *e2;
+       struct iso_resource *r;
+       int ret;
+
+       if ((request->channels == 0 && request->bandwidth == 0) ||
+           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+           request->bandwidth < 0)
+               return -EINVAL;
+
+       r  = kmalloc(sizeof(*r), GFP_KERNEL);
+       e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
+       e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
+       if (r == NULL || e1 == NULL || e2 == NULL) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       INIT_DELAYED_WORK(&r->work, iso_resource_work);
+       r->client       = client;
+       r->todo         = todo;
+       r->generation   = -1;
+       r->channels     = request->channels;
+       r->bandwidth    = request->bandwidth;
+       r->e_alloc      = e1;
+       r->e_dealloc    = e2;
+
+       e1->resource.closure    = request->closure;
+       e1->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
+       e2->resource.closure    = request->closure;
+       e2->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
+
+       if (todo == ISO_RES_ALLOC) {
+               r->resource.release = release_iso_resource;
+               ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+               if (ret < 0)
+                       goto fail;
+       } else {
+               r->resource.release = NULL;
+               r->resource.handle = -1;
+               schedule_iso_resource(r);
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+ fail:
+       kfree(r);
+       kfree(e1);
+       kfree(e2);
+
+       return ret;
+}
+
+static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_ALLOC);
+}
+
+static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+{
+       struct fw_cdev_deallocate *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_iso_resource, NULL);
+}
+
+static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+}
+
+static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+}
+
+/*
+ * Returns a speed code:  Maximum speed to or from this device,
+ * limited by the device's link speed, the local node's link speed,
+ * and all PHY port speeds between the two links.
+ */
+static int ioctl_get_speed(struct client *client, void *buffer)
+{
+       return client->device->max_speed;
+}
+
+static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_request *request = buffer;
+
+       switch (request->tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Security policy: Only allow accesses to Units Space. */
+       if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+               return -EACCES;
+
+       return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+}
+
+static int ioctl_send_stream_packet(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_stream_packet *p = buffer;
+       struct fw_cdev_send_request request;
+       int dest;
+
+       if (p->speed > client->device->card->link_speed ||
+           p->length > 1024 << p->speed)
+               return -EIO;
+
+       if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+               return -EINVAL;
+
+       dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+       request.tcode           = TCODE_STREAM_DATA;
+       request.length          = p->length;
+       request.closure         = p->closure;
+       request.data            = p->data;
+       request.generation      = p->generation;
+
+       return init_request(client, &request, dest, p->speed);
+}
+
+static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+       ioctl_get_info,
+       ioctl_send_request,
+       ioctl_allocate,
+       ioctl_deallocate,
+       ioctl_send_response,
+       ioctl_initiate_bus_reset,
+       ioctl_add_descriptor,
+       ioctl_remove_descriptor,
+       ioctl_create_iso_context,
+       ioctl_queue_iso,
+       ioctl_start_iso,
+       ioctl_stop_iso,
+       ioctl_get_cycle_timer,
+       ioctl_allocate_iso_resource,
+       ioctl_deallocate_iso_resource,
+       ioctl_allocate_iso_resource_once,
+       ioctl_deallocate_iso_resource_once,
+       ioctl_get_speed,
+       ioctl_send_broadcast_request,
+       ioctl_send_stream_packet,
+};
+
+static int dispatch_ioctl(struct client *client,
+                         unsigned int cmd, void __user *arg)
+{
+       char buffer[256];
+       int ret;
+
+       if (_IOC_TYPE(cmd) != '#' ||
+           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+               return -EINVAL;
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+       }
+
+       ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+       if (ret < 0)
+               return ret;
+
+       if (_IOC_DIR(cmd) & _IOC_READ) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+       }
+
+       return ret;
+}
+
+static long fw_device_op_ioctl(struct file *file,
+                              unsigned int cmd, unsigned long arg)
+{
+       struct client *client = file->private_data;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       return dispatch_ioctl(client, cmd, (void __user *) arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long fw_device_op_compat_ioctl(struct file *file,
+                                     unsigned int cmd, unsigned long arg)
+{
+       struct client *client = file->private_data;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       return dispatch_ioctl(client, cmd, compat_ptr(arg));
+}
+#endif
+
+static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct client *client = file->private_data;
+       enum dma_data_direction direction;
+       unsigned long size;
+       int page_count, ret;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       /* FIXME: We could support multiple buffers, but we don't. */
+       if (client->buffer.pages != NULL)
+               return -EBUSY;
+
+       if (!(vma->vm_flags & VM_SHARED))
+               return -EINVAL;
+
+       if (vma->vm_start & ~PAGE_MASK)
+               return -EINVAL;
+
+       client->vm_start = vma->vm_start;
+       size = vma->vm_end - vma->vm_start;
+       page_count = size >> PAGE_SHIFT;
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (vma->vm_flags & VM_WRITE)
+               direction = DMA_TO_DEVICE;
+       else
+               direction = DMA_FROM_DEVICE;
+
+       ret = fw_iso_buffer_init(&client->buffer, client->device->card,
+                                page_count, direction);
+       if (ret < 0)
+               return ret;
+
+       ret = fw_iso_buffer_map(&client->buffer, vma);
+       if (ret < 0)
+               fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+       return ret;
+}
+
+static int shutdown_resource(int id, void *p, void *data)
+{
+       struct client_resource *r = p;
+       struct client *client = data;
+
+       r->release(client, r);
+       client_put(client);
+
+       return 0;
+}
+
+static int fw_device_op_release(struct inode *inode, struct file *file)
+{
+       struct client *client = file->private_data;
+       struct event *e, *next_e;
+
+       mutex_lock(&client->device->client_list_mutex);
+       list_del(&client->link);
+       mutex_unlock(&client->device->client_list_mutex);
+
+       if (client->iso_context)
+               fw_iso_context_destroy(client->iso_context);
+
+       if (client->buffer.pages)
+               fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+       /* Freeze client->resource_idr and client->event_list */
+       spin_lock_irq(&client->lock);
+       client->in_shutdown = true;
+       spin_unlock_irq(&client->lock);
+
+       idr_for_each(&client->resource_idr, shutdown_resource, client);
+       idr_remove_all(&client->resource_idr);
+       idr_destroy(&client->resource_idr);
+
+       list_for_each_entry_safe(e, next_e, &client->event_list, link)
+               kfree(e);
+
+       client_put(client);
+
+       return 0;
+}
+
+static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+{
+       struct client *client = file->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(file, &client->wait, pt);
+
+       if (fw_device_is_shutdown(client->device))
+               mask |= POLLHUP | POLLERR;
+       if (!list_empty(&client->event_list))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+
+const struct file_operations fw_device_ops = {
+       .owner          = THIS_MODULE,
+       .open           = fw_device_op_open,
+       .read           = fw_device_op_read,
+       .unlocked_ioctl = fw_device_op_ioctl,
+       .poll           = fw_device_op_poll,
+       .release        = fw_device_op_release,
+       .mmap           = fw_device_op_mmap,
+
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = fw_device_op_compat_ioctl,
+#endif
+};
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
new file mode 100644 (file)
index 0000000..65d84dd
--- /dev/null
@@ -0,0 +1,1196 @@
+/*
+ * Device probing and sysfs code.
+ *
+ * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+#include "core.h"
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+{
+       ci->p = p + 1;
+       ci->end = ci->p + (p[0] >> 16);
+}
+EXPORT_SYMBOL(fw_csr_iterator_init);
+
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
+{
+       *key = *ci->p >> 24;
+       *value = *ci->p & 0xffffff;
+
+       return ci->p++ < ci->end;
+}
+EXPORT_SYMBOL(fw_csr_iterator_next);
+
+static int is_fw_unit(struct device *dev);
+
+static int match_unit_directory(u32 *directory, u32 match_flags,
+                               const struct ieee1394_device_id *id)
+{
+       struct fw_csr_iterator ci;
+       int key, value, match;
+
+       match = 0;
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key == CSR_VENDOR && value == id->vendor_id)
+                       match |= IEEE1394_MATCH_VENDOR_ID;
+               if (key == CSR_MODEL && value == id->model_id)
+                       match |= IEEE1394_MATCH_MODEL_ID;
+               if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
+                       match |= IEEE1394_MATCH_SPECIFIER_ID;
+               if (key == CSR_VERSION && value == id->version)
+                       match |= IEEE1394_MATCH_VERSION;
+       }
+
+       return (match & match_flags) == match_flags;
+}
+
+static int fw_unit_match(struct device *dev, struct device_driver *drv)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_device *device;
+       const struct ieee1394_device_id *id;
+
+       /* We only allow binding to fw_units. */
+       if (!is_fw_unit(dev))
+               return 0;
+
+       device = fw_device(unit->device.parent);
+       id = container_of(drv, struct fw_driver, driver)->id_table;
+
+       for (; id->match_flags != 0; id++) {
+               if (match_unit_directory(unit->directory, id->match_flags, id))
+                       return 1;
+
+               /* Also check vendor ID in the root directory. */
+               if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
+                   match_unit_directory(&device->config_rom[5],
+                               IEEE1394_MATCH_VENDOR_ID, id) &&
+                   match_unit_directory(unit->directory, id->match_flags
+                               & ~IEEE1394_MATCH_VENDOR_ID, id))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+{
+       struct fw_device *device = fw_device(unit->device.parent);
+       struct fw_csr_iterator ci;
+
+       int key, value;
+       int vendor = 0;
+       int model = 0;
+       int specifier_id = 0;
+       int version = 0;
+
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_VENDOR:
+                       vendor = value;
+                       break;
+               case CSR_MODEL:
+                       model = value;
+                       break;
+               }
+       }
+
+       fw_csr_iterator_init(&ci, unit->directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_SPECIFIER_ID:
+                       specifier_id = value;
+                       break;
+               case CSR_VERSION:
+                       version = value;
+                       break;
+               }
+       }
+
+       return snprintf(buffer, buffer_size,
+                       "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
+                       vendor, model, specifier_id, version);
+}
+
+static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       char modalias[64];
+
+       get_modalias(unit, modalias, sizeof(modalias));
+
+       if (add_uevent_var(env, "MODALIAS=%s", modalias))
+               return -ENOMEM;
+
+       return 0;
+}
+
+struct bus_type fw_bus_type = {
+       .name = "firewire",
+       .match = fw_unit_match,
+};
+EXPORT_SYMBOL(fw_bus_type);
+
+int fw_device_enable_phys_dma(struct fw_device *device)
+{
+       int generation = device->generation;
+
+       /* device->node_id, accessed below, must not be older than generation */
+       smp_rmb();
+
+       return device->card->driver->enable_phys_dma(device->card,
+                                                    device->node_id,
+                                                    generation);
+}
+EXPORT_SYMBOL(fw_device_enable_phys_dma);
+
+struct config_rom_attribute {
+       struct device_attribute attr;
+       u32 key;
+};
+
+static ssize_t show_immediate(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+{
+       struct config_rom_attribute *attr =
+               container_of(dattr, struct config_rom_attribute, attr);
+       struct fw_csr_iterator ci;
+       u32 *dir;
+       int key, value, ret = -ENOENT;
+
+       down_read(&fw_device_rwsem);
+
+       if (is_fw_unit(dev))
+               dir = fw_unit(dev)->directory;
+       else
+               dir = fw_device(dev)->config_rom + 5;
+
+       fw_csr_iterator_init(&ci, dir);
+       while (fw_csr_iterator_next(&ci, &key, &value))
+               if (attr->key == key) {
+                       ret = snprintf(buf, buf ? PAGE_SIZE : 0,
+                                      "0x%06x\n", value);
+                       break;
+               }
+
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+#define IMMEDIATE_ATTR(name, key)                              \
+       { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
+
+static ssize_t show_text_leaf(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+{
+       struct config_rom_attribute *attr =
+               container_of(dattr, struct config_rom_attribute, attr);
+       struct fw_csr_iterator ci;
+       u32 *dir, *block = NULL, *p, *end;
+       int length, key, value, last_key = 0, ret = -ENOENT;
+       char *b;
+
+       down_read(&fw_device_rwsem);
+
+       if (is_fw_unit(dev))
+               dir = fw_unit(dev)->directory;
+       else
+               dir = fw_device(dev)->config_rom + 5;
+
+       fw_csr_iterator_init(&ci, dir);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (attr->key == last_key &&
+                   key == (CSR_DESCRIPTOR | CSR_LEAF))
+                       block = ci.p - 1 + value;
+               last_key = key;
+       }
+
+       if (block == NULL)
+               goto out;
+
+       length = min(block[0] >> 16, 256U);
+       if (length < 3)
+               goto out;
+
+       if (block[1] != 0 || block[2] != 0)
+               /* Unknown encoding. */
+               goto out;
+
+       if (buf == NULL) {
+               ret = length * 4;
+               goto out;
+       }
+
+       b = buf;
+       end = &block[length + 1];
+       for (p = &block[3]; p < end; p++, b += 4)
+               * (u32 *) b = (__force u32) __cpu_to_be32(*p);
+
+       /* Strip trailing whitespace and add newline. */
+       while (b--, (isspace(*b) || *b == '\0') && b > buf);
+       strcpy(b + 1, "\n");
+       ret = b + 2 - buf;
+ out:
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+#define TEXT_LEAF_ATTR(name, key)                              \
+       { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
+
+static struct config_rom_attribute config_rom_attributes[] = {
+       IMMEDIATE_ATTR(vendor, CSR_VENDOR),
+       IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
+       IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
+       IMMEDIATE_ATTR(version, CSR_VERSION),
+       IMMEDIATE_ATTR(model, CSR_MODEL),
+       TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
+       TEXT_LEAF_ATTR(model_name, CSR_MODEL),
+       TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
+};
+
+static void init_fw_attribute_group(struct device *dev,
+                                   struct device_attribute *attrs,
+                                   struct fw_attribute_group *group)
+{
+       struct device_attribute *attr;
+       int i, j;
+
+       for (j = 0; attrs[j].attr.name != NULL; j++)
+               group->attrs[j] = &attrs[j].attr;
+
+       for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
+               attr = &config_rom_attributes[i].attr;
+               if (attr->show(dev, attr, NULL) < 0)
+                       continue;
+               group->attrs[j++] = &attr->attr;
+       }
+
+       group->attrs[j] = NULL;
+       group->groups[0] = &group->group;
+       group->groups[1] = NULL;
+       group->group.attrs = group->attrs;
+       dev->groups = group->groups;
+}
+
+static ssize_t modalias_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       int length;
+
+       length = get_modalias(unit, buf, PAGE_SIZE);
+       strcpy(buf + length, "\n");
+
+       return length + 1;
+}
+
+static ssize_t rom_index_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev->parent);
+       struct fw_unit *unit = fw_unit(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       (int)(unit->directory - device->config_rom));
+}
+
+static struct device_attribute fw_unit_attributes[] = {
+       __ATTR_RO(modalias),
+       __ATTR_RO(rom_index),
+       __ATTR_NULL,
+};
+
+static ssize_t config_rom_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       size_t length;
+
+       down_read(&fw_device_rwsem);
+       length = device->config_rom_length * 4;
+       memcpy(buf, device->config_rom, length);
+       up_read(&fw_device_rwsem);
+
+       return length;
+}
+
+static ssize_t guid_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       int ret;
+
+       down_read(&fw_device_rwsem);
+       ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
+                      device->config_rom[3], device->config_rom[4]);
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+static int units_sprintf(char *buf, u32 *directory)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+       int specifier_id = 0;
+       int version = 0;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_SPECIFIER_ID:
+                       specifier_id = value;
+                       break;
+               case CSR_VERSION:
+                       version = value;
+                       break;
+               }
+       }
+
+       return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
+}
+
+static ssize_t units_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       struct fw_csr_iterator ci;
+       int key, value, i = 0;
+
+       down_read(&fw_device_rwsem);
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key != (CSR_UNIT | CSR_DIRECTORY))
+                       continue;
+               i += units_sprintf(&buf[i], ci.p + value - 1);
+               if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
+                       break;
+       }
+       up_read(&fw_device_rwsem);
+
+       if (i)
+               buf[i - 1] = '\n';
+
+       return i;
+}
+
+static struct device_attribute fw_device_attributes[] = {
+       __ATTR_RO(config_rom),
+       __ATTR_RO(guid),
+       __ATTR_RO(units),
+       __ATTR_NULL,
+};
+
+static int read_rom(struct fw_device *device,
+                   int generation, int index, u32 *data)
+{
+       int rcode;
+
+       /* device->node_id, accessed below, must not be older than generation */
+       smp_rmb();
+
+       rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
+                       device->node_id, generation, device->max_speed,
+                       (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
+                       data, 4);
+       be32_to_cpus(data);
+
+       return rcode;
+}
+
+#define READ_BIB_ROM_SIZE      256
+#define READ_BIB_STACK_SIZE    16
+
+/*
+ * Read the bus info block, perform a speed probe, and read all of the rest of
+ * the config ROM.  We do all this with a cached bus generation.  If the bus
+ * generation changes under us, read_bus_info_block will fail and get retried.
+ * It's better to start all over in this case because the node from which we
+ * are reading the ROM may have changed the ROM during the reset.
+ */
+static int read_bus_info_block(struct fw_device *device, int generation)
+{
+       u32 *rom, *stack, *old_rom, *new_rom;
+       u32 sp, key;
+       int i, end, length, ret = -1;
+
+       rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
+                     sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+       if (rom == NULL)
+               return -ENOMEM;
+
+       stack = &rom[READ_BIB_ROM_SIZE];
+
+       device->max_speed = SCODE_100;
+
+       /* First read the bus info block. */
+       for (i = 0; i < 5; i++) {
+               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+                       goto out;
+               /*
+                * As per IEEE1212 7.2, during power-up, devices can
+                * reply with a 0 for the first quadlet of the config
+                * rom to indicate that they are booting (for example,
+                * if the firmware is on the disk of a external
+                * harddisk).  In that case we just fail, and the
+                * retry mechanism will try again later.
+                */
+               if (i == 0 && rom[i] == 0)
+                       goto out;
+       }
+
+       device->max_speed = device->node->max_speed;
+
+       /*
+        * Determine the speed of
+        *   - devices with link speed less than PHY speed,
+        *   - devices with 1394b PHY (unless only connected to 1394a PHYs),
+        *   - all devices if there are 1394b repeaters.
+        * Note, we cannot use the bus info block's link_spd as starting point
+        * because some buggy firmwares set it lower than necessary and because
+        * 1394-1995 nodes do not have the field.
+        */
+       if ((rom[2] & 0x7) < device->max_speed ||
+           device->max_speed == SCODE_BETA ||
+           device->card->beta_repeaters_present) {
+               u32 dummy;
+
+               /* for S1600 and S3200 */
+               if (device->max_speed == SCODE_BETA)
+                       device->max_speed = device->card->link_speed;
+
+               while (device->max_speed > SCODE_100) {
+                       if (read_rom(device, generation, 0, &dummy) ==
+                           RCODE_COMPLETE)
+                               break;
+                       device->max_speed--;
+               }
+       }
+
+       /*
+        * Now parse the config rom.  The config rom is a recursive
+        * directory structure so we parse it using a stack of
+        * references to the blocks that make up the structure.  We
+        * push a reference to the root directory on the stack to
+        * start things off.
+        */
+       length = i;
+       sp = 0;
+       stack[sp++] = 0xc0000005;
+       while (sp > 0) {
+               /*
+                * Pop the next block reference of the stack.  The
+                * lower 24 bits is the offset into the config rom,
+                * the upper 8 bits are the type of the reference the
+                * block.
+                */
+               key = stack[--sp];
+               i = key & 0xffffff;
+               if (i >= READ_BIB_ROM_SIZE)
+                       /*
+                        * The reference points outside the standard
+                        * config rom area, something's fishy.
+                        */
+                       goto out;
+
+               /* Read header quadlet for the block to get the length. */
+               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+                       goto out;
+               end = i + (rom[i] >> 16) + 1;
+               i++;
+               if (end > READ_BIB_ROM_SIZE)
+                       /*
+                        * This block extends outside standard config
+                        * area (and the array we're reading it
+                        * into).  That's broken, so ignore this
+                        * device.
+                        */
+                       goto out;
+
+               /*
+                * Now read in the block.  If this is a directory
+                * block, check the entries as we read them to see if
+                * it references another block, and push it in that case.
+                */
+               while (i < end) {
+                       if (read_rom(device, generation, i, &rom[i]) !=
+                           RCODE_COMPLETE)
+                               goto out;
+                       if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
+                           sp < READ_BIB_STACK_SIZE)
+                               stack[sp++] = i + rom[i];
+                       i++;
+               }
+               if (length < i)
+                       length = i;
+       }
+
+       old_rom = device->config_rom;
+       new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
+       if (new_rom == NULL)
+               goto out;
+
+       down_write(&fw_device_rwsem);
+       device->config_rom = new_rom;
+       device->config_rom_length = length;
+       up_write(&fw_device_rwsem);
+
+       kfree(old_rom);
+       ret = 0;
+       device->cmc = rom[2] >> 30 & 1;
+ out:
+       kfree(rom);
+
+       return ret;
+}
+
+static void fw_unit_release(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+
+       kfree(unit);
+}
+
+static struct device_type fw_unit_type = {
+       .uevent         = fw_unit_uevent,
+       .release        = fw_unit_release,
+};
+
+static int is_fw_unit(struct device *dev)
+{
+       return dev->type == &fw_unit_type;
+}
+
+static void create_units(struct fw_device *device)
+{
+       struct fw_csr_iterator ci;
+       struct fw_unit *unit;
+       int key, value, i;
+
+       i = 0;
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key != (CSR_UNIT | CSR_DIRECTORY))
+                       continue;
+
+               /*
+                * Get the address of the unit directory and try to
+                * match the drivers id_tables against it.
+                */
+               unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+               if (unit == NULL) {
+                       fw_error("failed to allocate memory for unit\n");
+                       continue;
+               }
+
+               unit->directory = ci.p + value - 1;
+               unit->device.bus = &fw_bus_type;
+               unit->device.type = &fw_unit_type;
+               unit->device.parent = &device->device;
+               dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
+
+               BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
+                               ARRAY_SIZE(fw_unit_attributes) +
+                               ARRAY_SIZE(config_rom_attributes));
+               init_fw_attribute_group(&unit->device,
+                                       fw_unit_attributes,
+                                       &unit->attribute_group);
+
+               if (device_register(&unit->device) < 0)
+                       goto skip_unit;
+
+               continue;
+
+       skip_unit:
+               kfree(unit);
+       }
+}
+
+static int shutdown_unit(struct device *device, void *data)
+{
+       device_unregister(device);
+
+       return 0;
+}
+
+/*
+ * fw_device_rwsem acts as dual purpose mutex:
+ *   - serializes accesses to fw_device_idr,
+ *   - serializes accesses to fw_device.config_rom/.config_rom_length and
+ *     fw_unit.directory, unless those accesses happen at safe occasions
+ */
+DECLARE_RWSEM(fw_device_rwsem);
+
+DEFINE_IDR(fw_device_idr);
+int fw_cdev_major;
+
+struct fw_device *fw_device_get_by_devt(dev_t devt)
+{
+       struct fw_device *device;
+
+       down_read(&fw_device_rwsem);
+       device = idr_find(&fw_device_idr, MINOR(devt));
+       if (device)
+               fw_device_get(device);
+       up_read(&fw_device_rwsem);
+
+       return device;
+}
+
+/*
+ * These defines control the retry behavior for reading the config
+ * rom.  It shouldn't be necessary to tweak these; if the device
+ * doesn't respond to a config rom read within 10 seconds, it's not
+ * going to respond at all.  As for the initial delay, a lot of
+ * devices will be able to respond within half a second after bus
+ * reset.  On the other hand, it's not really worth being more
+ * aggressive than that, since it scales pretty well; if 10 devices
+ * are plugged in, they're all getting read within one second.
+ */
+
+#define MAX_RETRIES    10
+#define RETRY_DELAY    (3 * HZ)
+#define INITIAL_DELAY  (HZ / 2)
+#define SHUTDOWN_DELAY (2 * HZ)
+
+static void fw_device_shutdown(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       int minor = MINOR(device->device.devt);
+
+       if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
+           && !list_empty(&device->card->link)) {
+               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+               return;
+       }
+
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_GONE,
+                          FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
+               return;
+
+       fw_device_cdev_remove(device);
+       device_for_each_child(&device->device, NULL, shutdown_unit);
+       device_unregister(&device->device);
+
+       down_write(&fw_device_rwsem);
+       idr_remove(&fw_device_idr, minor);
+       up_write(&fw_device_rwsem);
+
+       fw_device_put(device);
+}
+
+static void fw_device_release(struct device *dev)
+{
+       struct fw_device *device = fw_device(dev);
+       struct fw_card *card = device->card;
+       unsigned long flags;
+
+       /*
+        * Take the card lock so we don't set this to NULL while a
+        * FW_NODE_UPDATED callback is being handled or while the
+        * bus manager work looks at this node.
+        */
+       spin_lock_irqsave(&card->lock, flags);
+       device->node->data = NULL;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       fw_node_put(device->node);
+       kfree(device->config_rom);
+       kfree(device);
+       fw_card_put(card);
+}
+
+static struct device_type fw_device_type = {
+       .release = fw_device_release,
+};
+
+static int update_unit(struct device *dev, void *data)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+       if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+               down(&dev->sem);
+               driver->update(unit);
+               up(&dev->sem);
+       }
+
+       return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+
+       fw_device_cdev_update(device);
+       device_for_each_child(&device->device, NULL, update_unit);
+}
+
+/*
+ * If a device was pending for deletion because its node went away but its
+ * bus info block and root directory header matches that of a newly discovered
+ * device, revive the existing fw_device.
+ * The newly allocated fw_device becomes obsolete instead.
+ */
+static int lookup_existing_device(struct device *dev, void *data)
+{
+       struct fw_device *old = fw_device(dev);
+       struct fw_device *new = data;
+       struct fw_card *card = new->card;
+       int match = 0;
+
+       down_read(&fw_device_rwsem); /* serialize config_rom access */
+       spin_lock_irq(&card->lock);  /* serialize node access */
+
+       if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
+           atomic_cmpxchg(&old->state,
+                          FW_DEVICE_GONE,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+               struct fw_node *current_node = new->node;
+               struct fw_node *obsolete_node = old->node;
+
+               new->node = obsolete_node;
+               new->node->data = new;
+               old->node = current_node;
+               old->node->data = old;
+
+               old->max_speed = new->max_speed;
+               old->node_id = current_node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               old->generation = card->generation;
+               old->config_rom_retries = 0;
+               fw_notify("rediscovered device %s\n", dev_name(dev));
+
+               PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+               schedule_delayed_work(&old->work, 0);
+
+               if (current_node == card->root_node)
+                       fw_schedule_bm_work(card, 0);
+
+               match = 1;
+       }
+
+       spin_unlock_irq(&card->lock);
+       up_read(&fw_device_rwsem);
+
+       return match;
+}
+
+enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
+
+void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
+{
+       struct fw_card *card = device->card;
+       __be32 data;
+       int rcode;
+
+       if (!card->broadcast_channel_allocated)
+               return;
+
+       if (device->bc_implemented == BC_UNKNOWN) {
+               rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
+                               device->node_id, generation, device->max_speed,
+                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+                               &data, 4);
+               switch (rcode) {
+               case RCODE_COMPLETE:
+                       if (data & cpu_to_be32(1 << 31)) {
+                               device->bc_implemented = BC_IMPLEMENTED;
+                               break;
+                       }
+                       /* else fall through to case address error */
+               case RCODE_ADDRESS_ERROR:
+                       device->bc_implemented = BC_UNIMPLEMENTED;
+               }
+       }
+
+       if (device->bc_implemented == BC_IMPLEMENTED) {
+               data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
+                                  BROADCAST_CHANNEL_VALID);
+               fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+                               device->node_id, generation, device->max_speed,
+                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+                               &data, 4);
+       }
+}
+
+static void fw_device_init(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       struct device *revived_dev;
+       int minor, ret;
+
+       /*
+        * All failure paths here set node->data to NULL, so that we
+        * don't try to do device_for_each_child() on a kfree()'d
+        * device.
+        */
+
+       if (read_bus_info_block(device, device->generation) < 0) {
+               if (device->config_rom_retries < MAX_RETRIES &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY);
+               } else {
+                       fw_notify("giving up on config rom for node id %x\n",
+                                 device->node_id);
+                       if (device->node == device->card->root_node)
+                               fw_schedule_bm_work(device->card, 0);
+                       fw_device_release(&device->device);
+               }
+               return;
+       }
+
+       revived_dev = device_find_child(device->card->device,
+                                       device, lookup_existing_device);
+       if (revived_dev) {
+               put_device(revived_dev);
+               fw_device_release(&device->device);
+
+               return;
+       }
+
+       device_initialize(&device->device);
+
+       fw_device_get(device);
+       down_write(&fw_device_rwsem);
+       ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+             idr_get_new(&fw_device_idr, device, &minor) :
+             -ENOMEM;
+       up_write(&fw_device_rwsem);
+
+       if (ret < 0)
+               goto error;
+
+       device->device.bus = &fw_bus_type;
+       device->device.type = &fw_device_type;
+       device->device.parent = device->card->device;
+       device->device.devt = MKDEV(fw_cdev_major, minor);
+       dev_set_name(&device->device, "fw%d", minor);
+
+       BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
+                       ARRAY_SIZE(fw_device_attributes) +
+                       ARRAY_SIZE(config_rom_attributes));
+       init_fw_attribute_group(&device->device,
+                               fw_device_attributes,
+                               &device->attribute_group);
+
+       if (device_add(&device->device)) {
+               fw_error("Failed to add device.\n");
+               goto error_with_cdev;
+       }
+
+       create_units(device);
+
+       /*
+        * Transition the device to running state.  If it got pulled
+        * out from under us while we did the intialization work, we
+        * have to shut down the device again here.  Normally, though,
+        * fw_node_event will be responsible for shutting it down when
+        * necessary.  We have to use the atomic cmpxchg here to avoid
+        * racing with the FW_NODE_DESTROYED case in
+        * fw_node_event().
+        */
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_INITIALIZING,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+               PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+       } else {
+               if (device->config_rom_retries)
+                       fw_notify("created device %s: GUID %08x%08x, S%d00, "
+                                 "%d config ROM retries\n",
+                                 dev_name(&device->device),
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed,
+                                 device->config_rom_retries);
+               else
+                       fw_notify("created device %s: GUID %08x%08x, S%d00\n",
+                                 dev_name(&device->device),
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed);
+               device->config_rom_retries = 0;
+
+               fw_device_set_broadcast_channel(device, device->generation);
+       }
+
+       /*
+        * Reschedule the IRM work if we just finished reading the
+        * root node config rom.  If this races with a bus reset we
+        * just end up running the IRM work a couple of extra times -
+        * pretty harmless.
+        */
+       if (device->node == device->card->root_node)
+               fw_schedule_bm_work(device->card, 0);
+
+       return;
+
+ error_with_cdev:
+       down_write(&fw_device_rwsem);
+       idr_remove(&fw_device_idr, minor);
+       up_write(&fw_device_rwsem);
+ error:
+       fw_device_put(device);          /* fw_device_idr's reference */
+
+       put_device(&device->device);    /* our reference */
+}
+
+enum {
+       REREAD_BIB_ERROR,
+       REREAD_BIB_GONE,
+       REREAD_BIB_UNCHANGED,
+       REREAD_BIB_CHANGED,
+};
+
+/* Reread and compare bus info block and header of root directory */
+static int reread_bus_info_block(struct fw_device *device, int generation)
+{
+       u32 q;
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
+                       return REREAD_BIB_ERROR;
+
+               if (i == 0 && q == 0)
+                       return REREAD_BIB_GONE;
+
+               if (q != device->config_rom[i])
+                       return REREAD_BIB_CHANGED;
+       }
+
+       return REREAD_BIB_UNCHANGED;
+}
+
+static void fw_device_refresh(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       struct fw_card *card = device->card;
+       int node_id = device->node_id;
+
+       switch (reread_bus_info_block(device, device->generation)) {
+       case REREAD_BIB_ERROR:
+               if (device->config_rom_retries < MAX_RETRIES / 2 &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY / 2);
+
+                       return;
+               }
+               goto give_up;
+
+       case REREAD_BIB_GONE:
+               goto gone;
+
+       case REREAD_BIB_UNCHANGED:
+               if (atomic_cmpxchg(&device->state,
+                                  FW_DEVICE_INITIALIZING,
+                                  FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
+                       goto gone;
+
+               fw_device_update(work);
+               device->config_rom_retries = 0;
+               goto out;
+
+       case REREAD_BIB_CHANGED:
+               break;
+       }
+
+       /*
+        * Something changed.  We keep things simple and don't investigate
+        * further.  We just destroy all previous units and create new ones.
+        */
+       device_for_each_child(&device->device, NULL, shutdown_unit);
+
+       if (read_bus_info_block(device, device->generation) < 0) {
+               if (device->config_rom_retries < MAX_RETRIES &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY);
+
+                       return;
+               }
+               goto give_up;
+       }
+
+       create_units(device);
+
+       /* Userspace may want to re-read attributes. */
+       kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
+
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_INITIALIZING,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
+               goto gone;
+
+       fw_notify("refreshed device %s\n", dev_name(&device->device));
+       device->config_rom_retries = 0;
+       goto out;
+
+ give_up:
+       fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
+ gone:
+       atomic_set(&device->state, FW_DEVICE_GONE);
+       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+       schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+ out:
+       if (node_id == card->root_node->node_id)
+               fw_schedule_bm_work(card, 0);
+}
+
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+{
+       struct fw_device *device;
+
+       switch (event) {
+       case FW_NODE_CREATED:
+       case FW_NODE_LINK_ON:
+               if (!node->link_on)
+                       break;
+ create:
+               device = kzalloc(sizeof(*device), GFP_ATOMIC);
+               if (device == NULL)
+                       break;
+
+               /*
+                * Do minimal intialization of the device here, the
+                * rest will happen in fw_device_init().
+                *
+                * Attention:  A lot of things, even fw_device_get(),
+                * cannot be done before fw_device_init() finished!
+                * You can basically just check device->state and
+                * schedule work until then, but only while holding
+                * card->lock.
+                */
+               atomic_set(&device->state, FW_DEVICE_INITIALIZING);
+               device->card = fw_card_get(card);
+               device->node = fw_node_get(node);
+               device->node_id = node->node_id;
+               device->generation = card->generation;
+               device->is_local = node == card->local_node;
+               mutex_init(&device->client_list_mutex);
+               INIT_LIST_HEAD(&device->client_list);
+
+               /*
+                * Set the node data to point back to this device so
+                * FW_NODE_UPDATED callbacks can update the node_id
+                * and generation for the device.
+                */
+               node->data = device;
+
+               /*
+                * Many devices are slow to respond after bus resets,
+                * especially if they are bus powered and go through
+                * power-up after getting plugged in.  We schedule the
+                * first config rom scan half a second after bus reset.
+                */
+               INIT_DELAYED_WORK(&device->work, fw_device_init);
+               schedule_delayed_work(&device->work, INITIAL_DELAY);
+               break;
+
+       case FW_NODE_INITIATED_RESET:
+               device = node->data;
+               if (device == NULL)
+                       goto create;
+
+               device->node_id = node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               device->generation = card->generation;
+               if (atomic_cmpxchg(&device->state,
+                           FW_DEVICE_RUNNING,
+                           FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+                       schedule_delayed_work(&device->work,
+                               device->is_local ? 0 : INITIAL_DELAY);
+               }
+               break;
+
+       case FW_NODE_UPDATED:
+               if (!node->link_on || node->data == NULL)
+                       break;
+
+               device = node->data;
+               device->node_id = node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               device->generation = card->generation;
+               if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+                       schedule_delayed_work(&device->work, 0);
+               }
+               break;
+
+       case FW_NODE_DESTROYED:
+       case FW_NODE_LINK_OFF:
+               if (!node->data)
+                       break;
+
+               /*
+                * Destroy the device associated with the node.  There
+                * are two cases here: either the device is fully
+                * initialized (FW_DEVICE_RUNNING) or we're in the
+                * process of reading its config rom
+                * (FW_DEVICE_INITIALIZING).  If it is fully
+                * initialized we can reuse device->work to schedule a
+                * full fw_device_shutdown().  If not, there's work
+                * scheduled to read it's config rom, and we just put
+                * the device in shutdown state to have that code fail
+                * to create the device.
+                */
+               device = node->data;
+               if (atomic_xchg(&device->state,
+                               FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+                       schedule_delayed_work(&device->work,
+                               list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
+               }
+               break;
+       }
+}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
new file mode 100644 (file)
index 0000000..28076c8
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Isochronous I/O functionality:
+ *   - Isochronous DMA context management
+ *   - Isochronous bus resource management (channels, bandwidth), client side
+ *
+ * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+/*
+ * Isochronous DMA context management
+ */
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+                      int page_count, enum dma_data_direction direction)
+{
+       int i, j;
+       dma_addr_t address;
+
+       buffer->page_count = page_count;
+       buffer->direction = direction;
+
+       buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
+                               GFP_KERNEL);
+       if (buffer->pages == NULL)
+               goto out;
+
+       for (i = 0; i < buffer->page_count; i++) {
+               buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+               if (buffer->pages[i] == NULL)
+                       goto out_pages;
+
+               address = dma_map_page(card->device, buffer->pages[i],
+                                      0, PAGE_SIZE, direction);
+               if (dma_mapping_error(card->device, address)) {
+                       __free_page(buffer->pages[i]);
+                       goto out_pages;
+               }
+               set_page_private(buffer->pages[i], address);
+       }
+
+       return 0;
+
+ out_pages:
+       for (j = 0; j < i; j++) {
+               address = page_private(buffer->pages[j]);
+               dma_unmap_page(card->device, address,
+                              PAGE_SIZE, DMA_TO_DEVICE);
+               __free_page(buffer->pages[j]);
+       }
+       kfree(buffer->pages);
+ out:
+       buffer->pages = NULL;
+
+       return -ENOMEM;
+}
+
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+{
+       unsigned long uaddr;
+       int i, err;
+
+       uaddr = vma->vm_start;
+       for (i = 0; i < buffer->page_count; i++) {
+               err = vm_insert_page(vma, uaddr, buffer->pages[i]);
+               if (err)
+                       return err;
+
+               uaddr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
+                          struct fw_card *card)
+{
+       int i;
+       dma_addr_t address;
+
+       for (i = 0; i < buffer->page_count; i++) {
+               address = page_private(buffer->pages[i]);
+               dma_unmap_page(card->device, address,
+                              PAGE_SIZE, DMA_TO_DEVICE);
+               __free_page(buffer->pages[i]);
+       }
+
+       kfree(buffer->pages);
+       buffer->pages = NULL;
+}
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+               int type, int channel, int speed, size_t header_size,
+               fw_iso_callback_t callback, void *callback_data)
+{
+       struct fw_iso_context *ctx;
+
+       ctx = card->driver->allocate_iso_context(card,
+                                                type, channel, header_size);
+       if (IS_ERR(ctx))
+               return ctx;
+
+       ctx->card = card;
+       ctx->type = type;
+       ctx->channel = channel;
+       ctx->speed = speed;
+       ctx->header_size = header_size;
+       ctx->callback = callback;
+       ctx->callback_data = callback_data;
+
+       return ctx;
+}
+
+void fw_iso_context_destroy(struct fw_iso_context *ctx)
+{
+       struct fw_card *card = ctx->card;
+
+       card->driver->free_iso_context(ctx);
+}
+
+int fw_iso_context_start(struct fw_iso_context *ctx,
+                        int cycle, int sync, int tags)
+{
+       return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
+}
+
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+                        struct fw_iso_packet *packet,
+                        struct fw_iso_buffer *buffer,
+                        unsigned long payload)
+{
+       struct fw_card *card = ctx->card;
+
+       return card->driver->queue_iso(ctx, packet, buffer, payload);
+}
+
+int fw_iso_context_stop(struct fw_iso_context *ctx)
+{
+       return ctx->card->driver->stop_iso(ctx);
+}
+
+/*
+ * Isochronous bus resource management (channels, bandwidth), client side
+ */
+
+static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
+                           int bandwidth, bool allocate)
+{
+       __be32 data[2];
+       int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
+
+       /*
+        * On a 1394a IRM with low contention, try < 1 is enough.
+        * On a 1394-1995 IRM, we need at least try < 2.
+        * Let's just do try < 5.
+        */
+       for (try = 0; try < 5; try++) {
+               new = allocate ? old - bandwidth : old + bandwidth;
+               if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
+                       break;
+
+               data[0] = cpu_to_be32(old);
+               data[1] = cpu_to_be32(new);
+               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                               irm_id, generation, SCODE_100,
+                               CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
+                               data, sizeof(data))) {
+               case RCODE_GENERATION:
+                       /* A generation change frees all bandwidth. */
+                       return allocate ? -EAGAIN : bandwidth;
+
+               case RCODE_COMPLETE:
+                       if (be32_to_cpup(data) == old)
+                               return bandwidth;
+
+                       old = be32_to_cpup(data);
+                       /* Fall through. */
+               }
+       }
+
+       return -EIO;
+}
+
+static int manage_channel(struct fw_card *card, int irm_id, int generation,
+                         u32 channels_mask, u64 offset, bool allocate)
+{
+       __be32 data[2], c, all, old;
+       int i, retry = 5;
+
+       old = all = allocate ? cpu_to_be32(~0) : 0;
+
+       for (i = 0; i < 32; i++) {
+               if (!(channels_mask & 1 << i))
+                       continue;
+
+               c = cpu_to_be32(1 << (31 - i));
+               if ((old & c) != (all & c))
+                       continue;
+
+               data[0] = old;
+               data[1] = old ^ c;
+               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                                          irm_id, generation, SCODE_100,
+                                          offset, data, sizeof(data))) {
+               case RCODE_GENERATION:
+                       /* A generation change frees all channels. */
+                       return allocate ? -EAGAIN : i;
+
+               case RCODE_COMPLETE:
+                       if (data[0] == old)
+                               return i;
+
+                       old = data[0];
+
+                       /* Is the IRM 1394a-2000 compliant? */
+                       if ((data[0] & c) == (data[1] & c))
+                               continue;
+
+                       /* 1394-1995 IRM, fall through to retry. */
+               default:
+                       if (retry--)
+                               i--;
+               }
+       }
+
+       return -EIO;
+}
+
+static void deallocate_channel(struct fw_card *card, int irm_id,
+                              int generation, int channel)
+{
+       u32 mask;
+       u64 offset;
+
+       mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
+       offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
+                               CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
+
+       manage_channel(card, irm_id, generation, mask, offset, false);
+}
+
+/**
+ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ *
+ * In parameters: card, generation, channels_mask, bandwidth, allocate
+ * Out parameters: channel, bandwidth
+ * This function blocks (sleeps) during communication with the IRM.
+ *
+ * Allocates or deallocates at most one channel out of channels_mask.
+ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
+ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
+ * channel 0 and LSB for channel 63.)
+ * Allocates or deallocates as many bandwidth allocation units as specified.
+ *
+ * Returns channel < 0 if no channel was allocated or deallocated.
+ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
+ *
+ * If generation is stale, deallocations succeed but allocations fail with
+ * channel = -EAGAIN.
+ *
+ * If channel allocation fails, no bandwidth will be allocated either.
+ * If bandwidth allocation fails, no channel will be allocated either.
+ * But deallocations of channel and bandwidth are tried independently
+ * of each other's success.
+ */
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+                           u64 channels_mask, int *channel, int *bandwidth,
+                           bool allocate)
+{
+       u32 channels_hi = channels_mask;        /* channels 31...0 */
+       u32 channels_lo = channels_mask >> 32;  /* channels 63...32 */
+       int irm_id, ret, c = -EINVAL;
+
+       spin_lock_irq(&card->lock);
+       irm_id = card->irm_node->node_id;
+       spin_unlock_irq(&card->lock);
+
+       if (channels_hi)
+               c = manage_channel(card, irm_id, generation, channels_hi,
+                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+       if (channels_lo && c < 0) {
+               c = manage_channel(card, irm_id, generation, channels_lo,
+                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+               if (c >= 0)
+                       c += 32;
+       }
+       *channel = c;
+
+       if (allocate && channels_mask != 0 && c < 0)
+               *bandwidth = 0;
+
+       if (*bandwidth == 0)
+               return;
+
+       ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+       if (ret < 0)
+               *bandwidth = 0;
+
+       if (allocate && ret < 0 && c >= 0) {
+               deallocate_channel(card, irm_id, generation, c);
+               *channel = ret;
+       }
+}
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
new file mode 100644 (file)
index 0000000..fddf2b3
--- /dev/null
@@ -0,0 +1,572 @@
+/*
+ * Incremental bus scan, based on bus topology
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "core.h"
+
+#define SELF_ID_PHY_ID(q)              (((q) >> 24) & 0x3f)
+#define SELF_ID_EXTENDED(q)            (((q) >> 23) & 0x01)
+#define SELF_ID_LINK_ON(q)             (((q) >> 22) & 0x01)
+#define SELF_ID_GAP_COUNT(q)           (((q) >> 16) & 0x3f)
+#define SELF_ID_PHY_SPEED(q)           (((q) >> 14) & 0x03)
+#define SELF_ID_CONTENDER(q)           (((q) >> 11) & 0x01)
+#define SELF_ID_PHY_INITIATOR(q)       (((q) >>  1) & 0x01)
+#define SELF_ID_MORE_PACKETS(q)                (((q) >>  0) & 0x01)
+
+#define SELF_ID_EXT_SEQUENCE(q)                (((q) >> 20) & 0x07)
+
+#define SELFID_PORT_CHILD      0x3
+#define SELFID_PORT_PARENT     0x2
+#define SELFID_PORT_NCONN      0x1
+#define SELFID_PORT_NONE       0x0
+
+static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
+{
+       u32 q;
+       int port_type, shift, seq;
+
+       *total_port_count = 0;
+       *child_port_count = 0;
+
+       shift = 6;
+       q = *sid;
+       seq = 0;
+
+       while (1) {
+               port_type = (q >> shift) & 0x03;
+               switch (port_type) {
+               case SELFID_PORT_CHILD:
+                       (*child_port_count)++;
+               case SELFID_PORT_PARENT:
+               case SELFID_PORT_NCONN:
+                       (*total_port_count)++;
+               case SELFID_PORT_NONE:
+                       break;
+               }
+
+               shift -= 2;
+               if (shift == 0) {
+                       if (!SELF_ID_MORE_PACKETS(q))
+                               return sid + 1;
+
+                       shift = 16;
+                       sid++;
+                       q = *sid;
+
+                       /*
+                        * Check that the extra packets actually are
+                        * extended self ID packets and that the
+                        * sequence numbers in the extended self ID
+                        * packets increase as expected.
+                        */
+
+                       if (!SELF_ID_EXTENDED(q) ||
+                           seq != SELF_ID_EXT_SEQUENCE(q))
+                               return NULL;
+
+                       seq++;
+               }
+       }
+}
+
+static int get_port_type(u32 *sid, int port_index)
+{
+       int index, shift;
+
+       index = (port_index + 5) / 8;
+       shift = 16 - ((port_index + 5) & 7) * 2;
+       return (sid[index] >> shift) & 0x03;
+}
+
+static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
+{
+       struct fw_node *node;
+
+       node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
+                      GFP_ATOMIC);
+       if (node == NULL)
+               return NULL;
+
+       node->color = color;
+       node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
+       node->link_on = SELF_ID_LINK_ON(sid);
+       node->phy_speed = SELF_ID_PHY_SPEED(sid);
+       node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
+       node->port_count = port_count;
+
+       atomic_set(&node->ref_count, 1);
+       INIT_LIST_HEAD(&node->link);
+
+       return node;
+}
+
+/*
+ * Compute the maximum hop count for this node and it's children.  The
+ * maximum hop count is the maximum number of connections between any
+ * two nodes in the subtree rooted at this node.  We need this for
+ * setting the gap count.  As we build the tree bottom up in
+ * build_tree() below, this is fairly easy to do: for each node we
+ * maintain the max hop count and the max depth, ie the number of hops
+ * to the furthest leaf.  Computing the max hop count breaks down into
+ * two cases: either the path goes through this node, in which case
+ * the hop count is the sum of the two biggest child depths plus 2.
+ * Or it could be the case that the max hop path is entirely
+ * containted in a child tree, in which case the max hop count is just
+ * the max hop count of this child.
+ */
+static void update_hop_count(struct fw_node *node)
+{
+       int depths[2] = { -1, -1 };
+       int max_child_hops = 0;
+       int i;
+
+       for (i = 0; i < node->port_count; i++) {
+               if (node->ports[i] == NULL)
+                       continue;
+
+               if (node->ports[i]->max_hops > max_child_hops)
+                       max_child_hops = node->ports[i]->max_hops;
+
+               if (node->ports[i]->max_depth > depths[0]) {
+                       depths[1] = depths[0];
+                       depths[0] = node->ports[i]->max_depth;
+               } else if (node->ports[i]->max_depth > depths[1])
+                       depths[1] = node->ports[i]->max_depth;
+       }
+
+       node->max_depth = depths[0] + 1;
+       node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
+}
+
+static inline struct fw_node *fw_node(struct list_head *l)
+{
+       return list_entry(l, struct fw_node, link);
+}
+
+/**
+ * build_tree - Build the tree representation of the topology
+ * @self_ids: array of self IDs to create the tree from
+ * @self_id_count: the length of the self_ids array
+ * @local_id: the node ID of the local node
+ *
+ * This function builds the tree representation of the topology given
+ * by the self IDs from the latest bus reset.  During the construction
+ * of the tree, the function checks that the self IDs are valid and
+ * internally consistent.  On succcess this function returns the
+ * fw_node corresponding to the local card otherwise NULL.
+ */
+static struct fw_node *build_tree(struct fw_card *card,
+                                 u32 *sid, int self_id_count)
+{
+       struct fw_node *node, *child, *local_node, *irm_node;
+       struct list_head stack, *h;
+       u32 *next_sid, *end, q;
+       int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
+       int gap_count;
+       bool beta_repeaters_present;
+
+       local_node = NULL;
+       node = NULL;
+       INIT_LIST_HEAD(&stack);
+       stack_depth = 0;
+       end = sid + self_id_count;
+       phy_id = 0;
+       irm_node = NULL;
+       gap_count = SELF_ID_GAP_COUNT(*sid);
+       beta_repeaters_present = false;
+
+       while (sid < end) {
+               next_sid = count_ports(sid, &port_count, &child_port_count);
+
+               if (next_sid == NULL) {
+                       fw_error("Inconsistent extended self IDs.\n");
+                       return NULL;
+               }
+
+               q = *sid;
+               if (phy_id != SELF_ID_PHY_ID(q)) {
+                       fw_error("PHY ID mismatch in self ID: %d != %d.\n",
+                                phy_id, SELF_ID_PHY_ID(q));
+                       return NULL;
+               }
+
+               if (child_port_count > stack_depth) {
+                       fw_error("Topology stack underflow\n");
+                       return NULL;
+               }
+
+               /*
+                * Seek back from the top of our stack to find the
+                * start of the child nodes for this node.
+                */
+               for (i = 0, h = &stack; i < child_port_count; i++)
+                       h = h->prev;
+               /*
+                * When the stack is empty, this yields an invalid value,
+                * but that pointer will never be dereferenced.
+                */
+               child = fw_node(h);
+
+               node = fw_node_create(q, port_count, card->color);
+               if (node == NULL) {
+                       fw_error("Out of memory while building topology.\n");
+                       return NULL;
+               }
+
+               if (phy_id == (card->node_id & 0x3f))
+                       local_node = node;
+
+               if (SELF_ID_CONTENDER(q))
+                       irm_node = node;
+
+               parent_count = 0;
+
+               for (i = 0; i < port_count; i++) {
+                       switch (get_port_type(sid, i)) {
+                       case SELFID_PORT_PARENT:
+                               /*
+                                * Who's your daddy?  We dont know the
+                                * parent node at this time, so we
+                                * temporarily abuse node->color for
+                                * remembering the entry in the
+                                * node->ports array where the parent
+                                * node should be.  Later, when we
+                                * handle the parent node, we fix up
+                                * the reference.
+                                */
+                               parent_count++;
+                               node->color = i;
+                               break;
+
+                       case SELFID_PORT_CHILD:
+                               node->ports[i] = child;
+                               /*
+                                * Fix up parent reference for this
+                                * child node.
+                                */
+                               child->ports[child->color] = node;
+                               child->color = card->color;
+                               child = fw_node(child->link.next);
+                               break;
+                       }
+               }
+
+               /*
+                * Check that the node reports exactly one parent
+                * port, except for the root, which of course should
+                * have no parents.
+                */
+               if ((next_sid == end && parent_count != 0) ||
+                   (next_sid < end && parent_count != 1)) {
+                       fw_error("Parent port inconsistency for node %d: "
+                                "parent_count=%d\n", phy_id, parent_count);
+                       return NULL;
+               }
+
+               /* Pop the child nodes off the stack and push the new node. */
+               __list_del(h->prev, &stack);
+               list_add_tail(&node->link, &stack);
+               stack_depth += 1 - child_port_count;
+
+               if (node->phy_speed == SCODE_BETA &&
+                   parent_count + child_port_count > 1)
+                       beta_repeaters_present = true;
+
+               /*
+                * If PHYs report different gap counts, set an invalid count
+                * which will force a gap count reconfiguration and a reset.
+                */
+               if (SELF_ID_GAP_COUNT(q) != gap_count)
+                       gap_count = 0;
+
+               update_hop_count(node);
+
+               sid = next_sid;
+               phy_id++;
+       }
+
+       card->root_node = node;
+       card->irm_node = irm_node;
+       card->gap_count = gap_count;
+       card->beta_repeaters_present = beta_repeaters_present;
+
+       return local_node;
+}
+
+typedef void (*fw_node_callback_t)(struct fw_card * card,
+                                  struct fw_node * node,
+                                  struct fw_node * parent);
+
+static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
+                            fw_node_callback_t callback)
+{
+       struct list_head list;
+       struct fw_node *node, *next, *child, *parent;
+       int i;
+
+       INIT_LIST_HEAD(&list);
+
+       fw_node_get(root);
+       list_add_tail(&root->link, &list);
+       parent = NULL;
+       list_for_each_entry(node, &list, link) {
+               node->color = card->color;
+
+               for (i = 0; i < node->port_count; i++) {
+                       child = node->ports[i];
+                       if (!child)
+                               continue;
+                       if (child->color == card->color)
+                               parent = child;
+                       else {
+                               fw_node_get(child);
+                               list_add_tail(&child->link, &list);
+                       }
+               }
+
+               callback(card, node, parent);
+       }
+
+       list_for_each_entry_safe(node, next, &list, link)
+               fw_node_put(node);
+}
+
+static void report_lost_node(struct fw_card *card,
+                            struct fw_node *node, struct fw_node *parent)
+{
+       fw_node_event(card, node, FW_NODE_DESTROYED);
+       fw_node_put(node);
+
+       /* Topology has changed - reset bus manager retry counter */
+       card->bm_retries = 0;
+}
+
+static void report_found_node(struct fw_card *card,
+                             struct fw_node *node, struct fw_node *parent)
+{
+       int b_path = (node->phy_speed == SCODE_BETA);
+
+       if (parent != NULL) {
+               /* min() macro doesn't work here with gcc 3.4 */
+               node->max_speed = parent->max_speed < node->phy_speed ?
+                                       parent->max_speed : node->phy_speed;
+               node->b_path = parent->b_path && b_path;
+       } else {
+               node->max_speed = node->phy_speed;
+               node->b_path = b_path;
+       }
+
+       fw_node_event(card, node, FW_NODE_CREATED);
+
+       /* Topology has changed - reset bus manager retry counter */
+       card->bm_retries = 0;
+}
+
+void fw_destroy_nodes(struct fw_card *card)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       card->color++;
+       if (card->local_node != NULL)
+               for_each_fw_node(card, card->local_node, report_lost_node);
+       card->local_node = NULL;
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
+{
+       struct fw_node *tree;
+       int i;
+
+       tree = node1->ports[port];
+       node0->ports[port] = tree;
+       for (i = 0; i < tree->port_count; i++) {
+               if (tree->ports[i] == node1) {
+                       tree->ports[i] = node0;
+                       break;
+               }
+       }
+}
+
+/**
+ * update_tree - compare the old topology tree for card with the new
+ * one specified by root.  Queue the nodes and mark them as either
+ * found, lost or updated.  Update the nodes in the card topology tree
+ * as we go.
+ */
+static void update_tree(struct fw_card *card, struct fw_node *root)
+{
+       struct list_head list0, list1;
+       struct fw_node *node0, *node1, *next1;
+       int i, event;
+
+       INIT_LIST_HEAD(&list0);
+       list_add_tail(&card->local_node->link, &list0);
+       INIT_LIST_HEAD(&list1);
+       list_add_tail(&root->link, &list1);
+
+       node0 = fw_node(list0.next);
+       node1 = fw_node(list1.next);
+
+       while (&node0->link != &list0) {
+               WARN_ON(node0->port_count != node1->port_count);
+
+               if (node0->link_on && !node1->link_on)
+                       event = FW_NODE_LINK_OFF;
+               else if (!node0->link_on && node1->link_on)
+                       event = FW_NODE_LINK_ON;
+               else if (node1->initiated_reset && node1->link_on)
+                       event = FW_NODE_INITIATED_RESET;
+               else
+                       event = FW_NODE_UPDATED;
+
+               node0->node_id = node1->node_id;
+               node0->color = card->color;
+               node0->link_on = node1->link_on;
+               node0->initiated_reset = node1->initiated_reset;
+               node0->max_hops = node1->max_hops;
+               node1->color = card->color;
+               fw_node_event(card, node0, event);
+
+               if (card->root_node == node1)
+                       card->root_node = node0;
+               if (card->irm_node == node1)
+                       card->irm_node = node0;
+
+               for (i = 0; i < node0->port_count; i++) {
+                       if (node0->ports[i] && node1->ports[i]) {
+                               /*
+                                * This port didn't change, queue the
+                                * connected node for further
+                                * investigation.
+                                */
+                               if (node0->ports[i]->color == card->color)
+                                       continue;
+                               list_add_tail(&node0->ports[i]->link, &list0);
+                               list_add_tail(&node1->ports[i]->link, &list1);
+                       } else if (node0->ports[i]) {
+                               /*
+                                * The nodes connected here were
+                                * unplugged; unref the lost nodes and
+                                * queue FW_NODE_LOST callbacks for
+                                * them.
+                                */
+
+                               for_each_fw_node(card, node0->ports[i],
+                                                report_lost_node);
+                               node0->ports[i] = NULL;
+                       } else if (node1->ports[i]) {
+                               /*
+                                * One or more node were connected to
+                                * this port. Move the new nodes into
+                                * the tree and queue FW_NODE_CREATED
+                                * callbacks for them.
+                                */
+                               move_tree(node0, node1, i);
+                               for_each_fw_node(card, node0->ports[i],
+                                                report_found_node);
+                       }
+               }
+
+               node0 = fw_node(node0->link.next);
+               next1 = fw_node(node1->link.next);
+               fw_node_put(node1);
+               node1 = next1;
+       }
+}
+
+static void update_topology_map(struct fw_card *card,
+                               u32 *self_ids, int self_id_count)
+{
+       int node_count;
+
+       card->topology_map[1]++;
+       node_count = (card->root_node->node_id & 0x3f) + 1;
+       card->topology_map[2] = (node_count << 16) | self_id_count;
+       card->topology_map[0] = (self_id_count + 2) << 16;
+       memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
+       fw_compute_block_crc(card->topology_map);
+}
+
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
+                             int self_id_count, u32 *self_ids)
+{
+       struct fw_node *local_node;
+       unsigned long flags;
+
+       /*
+        * If the selfID buffer is not the immediate successor of the
+        * previously processed one, we cannot reliably compare the
+        * old and new topologies.
+        */
+       if (!is_next_generation(generation, card->generation) &&
+           card->local_node != NULL) {
+               fw_notify("skipped bus generations, destroying all nodes\n");
+               fw_destroy_nodes(card);
+               card->bm_retries = 0;
+       }
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       card->broadcast_channel_allocated = false;
+       card->node_id = node_id;
+       /*
+        * Update node_id before generation to prevent anybody from using
+        * a stale node_id together with a current generation.
+        */
+       smp_wmb();
+       card->generation = generation;
+       card->reset_jiffies = jiffies;
+       fw_schedule_bm_work(card, 0);
+
+       local_node = build_tree(card, self_ids, self_id_count);
+
+       update_topology_map(card, self_ids, self_id_count);
+
+       card->color++;
+
+       if (local_node == NULL) {
+               fw_error("topology build failed\n");
+               /* FIXME: We need to issue a bus reset in this case. */
+       } else if (card->local_node == NULL) {
+               card->local_node = local_node;
+               for_each_fw_node(card, local_node, report_found_node);
+       } else {
+               update_tree(card, local_node);
+       }
+
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
new file mode 100644 (file)
index 0000000..9a6ce9a
--- /dev/null
@@ -0,0 +1,978 @@
+/*
+ * Core IEEE1394 transaction logic
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+#define HEADER_PRI(pri)                        ((pri) << 0)
+#define HEADER_TCODE(tcode)            ((tcode) << 4)
+#define HEADER_RETRY(retry)            ((retry) << 8)
+#define HEADER_TLABEL(tlabel)          ((tlabel) << 10)
+#define HEADER_DESTINATION(destination)        ((destination) << 16)
+#define HEADER_SOURCE(source)          ((source) << 16)
+#define HEADER_RCODE(rcode)            ((rcode) << 12)
+#define HEADER_OFFSET_HIGH(offset_high)        ((offset_high) << 0)
+#define HEADER_DATA_LENGTH(length)     ((length) << 16)
+#define HEADER_EXTENDED_TCODE(tcode)   ((tcode) << 0)
+
+#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
+#define HEADER_GET_TLABEL(q)           (((q) >> 10) & 0x3f)
+#define HEADER_GET_RCODE(q)            (((q) >> 12) & 0x0f)
+#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_SOURCE(q)           (((q) >> 16) & 0xffff)
+#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
+
+#define HEADER_DESTINATION_IS_BROADCAST(q) \
+       (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
+
+#define PHY_PACKET_CONFIG      0x0
+#define PHY_PACKET_LINK_ON     0x1
+#define PHY_PACKET_SELF_ID     0x2
+
+#define PHY_CONFIG_GAP_COUNT(gap_count)        (((gap_count) << 16) | (1 << 22))
+#define PHY_CONFIG_ROOT_ID(node_id)    ((((node_id) & 0x3f) << 24) | (1 << 23))
+#define PHY_IDENTIFIER(id)             ((id) << 30)
+
+static int close_transaction(struct fw_transaction *transaction,
+                            struct fw_card *card, int rcode)
+{
+       struct fw_transaction *t;
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(t, &card->transaction_list, link) {
+               if (t == transaction) {
+                       list_del(&t->link);
+                       card->tlabel_mask &= ~(1 << t->tlabel);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&t->link != &card->transaction_list) {
+               t->callback(card, rcode, NULL, 0, t->callback_data);
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+/*
+ * Only valid for transactions that are potentially pending (ie have
+ * been sent).
+ */
+int fw_cancel_transaction(struct fw_card *card,
+                         struct fw_transaction *transaction)
+{
+       /*
+        * Cancel the packet transmission if it's still queued.  That
+        * will call the packet transmission callback which cancels
+        * the transaction.
+        */
+
+       if (card->driver->cancel_packet(card, &transaction->packet) == 0)
+               return 0;
+
+       /*
+        * If the request packet has already been sent, we need to see
+        * if the transaction is still pending and remove it in that case.
+        */
+
+       return close_transaction(transaction, card, RCODE_CANCELLED);
+}
+EXPORT_SYMBOL(fw_cancel_transaction);
+
+static void transmit_complete_callback(struct fw_packet *packet,
+                                      struct fw_card *card, int status)
+{
+       struct fw_transaction *t =
+           container_of(packet, struct fw_transaction, packet);
+
+       switch (status) {
+       case ACK_COMPLETE:
+               close_transaction(t, card, RCODE_COMPLETE);
+               break;
+       case ACK_PENDING:
+               t->timestamp = packet->timestamp;
+               break;
+       case ACK_BUSY_X:
+       case ACK_BUSY_A:
+       case ACK_BUSY_B:
+               close_transaction(t, card, RCODE_BUSY);
+               break;
+       case ACK_DATA_ERROR:
+               close_transaction(t, card, RCODE_DATA_ERROR);
+               break;
+       case ACK_TYPE_ERROR:
+               close_transaction(t, card, RCODE_TYPE_ERROR);
+               break;
+       default:
+               /*
+                * In this case the ack is really a juju specific
+                * rcode, so just forward that to the callback.
+                */
+               close_transaction(t, card, status);
+               break;
+       }
+}
+
+static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+               int destination_id, int source_id, int generation, int speed,
+               unsigned long long offset, void *payload, size_t length)
+{
+       int ext_tcode;
+
+       if (tcode == TCODE_STREAM_DATA) {
+               packet->header[0] =
+                       HEADER_DATA_LENGTH(length) |
+                       destination_id |
+                       HEADER_TCODE(TCODE_STREAM_DATA);
+               packet->header_length = 4;
+               packet->payload = payload;
+               packet->payload_length = length;
+
+               goto common;
+       }
+
+       if (tcode > 0x10) {
+               ext_tcode = tcode & ~0x10;
+               tcode = TCODE_LOCK_REQUEST;
+       } else
+               ext_tcode = 0;
+
+       packet->header[0] =
+               HEADER_RETRY(RETRY_X) |
+               HEADER_TLABEL(tlabel) |
+               HEADER_TCODE(tcode) |
+               HEADER_DESTINATION(destination_id);
+       packet->header[1] =
+               HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
+       packet->header[2] =
+               offset;
+
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+               packet->header[3] = *(u32 *)payload;
+               packet->header_length = 16;
+               packet->payload_length = 0;
+               break;
+
+       case TCODE_LOCK_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               packet->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(ext_tcode);
+               packet->header_length = 16;
+               packet->payload = payload;
+               packet->payload_length = length;
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               packet->header_length = 12;
+               packet->payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+               packet->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(ext_tcode);
+               packet->header_length = 16;
+               packet->payload_length = 0;
+               break;
+       }
+ common:
+       packet->speed = speed;
+       packet->generation = generation;
+       packet->ack = 0;
+       packet->payload_bus = 0;
+}
+
+/**
+ * This function provides low-level access to the IEEE1394 transaction
+ * logic.  Most C programs would use either fw_read(), fw_write() or
+ * fw_lock() instead - those function are convenience wrappers for
+ * this function.  The fw_send_request() function is primarily
+ * provided as a flexible, one-stop entry point for languages bindings
+ * and protocol bindings.
+ *
+ * FIXME: Document this function further, in particular the possible
+ * values for rcode in the callback.  In short, we map ACK_COMPLETE to
+ * RCODE_COMPLETE, internal errors set errno and set rcode to
+ * RCODE_SEND_ERROR (which is out of range for standard ieee1394
+ * rcodes).  All other rcodes are forwarded unchanged.  For all
+ * errors, payload is NULL, length is 0.
+ *
+ * Can not expect the callback to be called before the function
+ * returns, though this does happen in some cases (ACK_COMPLETE and
+ * errors).
+ *
+ * The payload is only used for write requests and must not be freed
+ * until the callback has been called.
+ *
+ * @param card the card from which to send the request
+ * @param tcode the tcode for this transaction.  Do not use
+ *   TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
+ *   etc. to specify tcode and ext_tcode.
+ * @param node_id the destination node ID (bus ID and PHY ID concatenated)
+ * @param generation the generation for which node_id is valid
+ * @param speed the speed to use for sending the request
+ * @param offset the 48 bit offset on the destination node
+ * @param payload the data payload for the request subaction
+ * @param length the length in bytes of the data to read
+ * @param callback function to be called when the transaction is completed
+ * @param callback_data pointer to arbitrary data, which will be
+ *   passed to the callback
+ *
+ * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * needs to synthesize @destination_id with fw_stream_packet_destination_id().
+ */
+void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+                    int destination_id, int generation, int speed,
+                    unsigned long long offset, void *payload, size_t length,
+                    fw_transaction_callback_t callback, void *callback_data)
+{
+       unsigned long flags;
+       int tlabel;
+
+       /*
+        * Bump the flush timer up 100ms first of all so we
+        * don't race with a flush timer callback.
+        */
+
+       mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+
+       /*
+        * Allocate tlabel from the bitmap and put the transaction on
+        * the list while holding the card spinlock.
+        */
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       tlabel = card->current_tlabel;
+       if (card->tlabel_mask & (1 << tlabel)) {
+               spin_unlock_irqrestore(&card->lock, flags);
+               callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+               return;
+       }
+
+       card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
+       card->tlabel_mask |= (1 << tlabel);
+
+       t->node_id = destination_id;
+       t->tlabel = tlabel;
+       t->callback = callback;
+       t->callback_data = callback_data;
+
+       fw_fill_request(&t->packet, tcode, t->tlabel,
+                       destination_id, card->node_id, generation,
+                       speed, offset, payload, length);
+       t->packet.callback = transmit_complete_callback;
+
+       list_add_tail(&t->link, &card->transaction_list);
+
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       card->driver->send_request(card, &t->packet);
+}
+EXPORT_SYMBOL(fw_send_request);
+
+struct transaction_callback_data {
+       struct completion done;
+       void *payload;
+       int rcode;
+};
+
+static void transaction_callback(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct transaction_callback_data *d = data;
+
+       if (rcode == RCODE_COMPLETE)
+               memcpy(d->payload, payload, length);
+       d->rcode = rcode;
+       complete(&d->done);
+}
+
+/**
+ * fw_run_transaction - send request and sleep until transaction is completed
+ *
+ * Returns the RCODE.
+ */
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+                      int generation, int speed, unsigned long long offset,
+                      void *payload, size_t length)
+{
+       struct transaction_callback_data d;
+       struct fw_transaction t;
+
+       init_completion(&d.done);
+       d.payload = payload;
+       fw_send_request(card, &t, tcode, destination_id, generation, speed,
+                       offset, payload, length, transaction_callback, &d);
+       wait_for_completion(&d.done);
+
+       return d.rcode;
+}
+EXPORT_SYMBOL(fw_run_transaction);
+
+static DEFINE_MUTEX(phy_config_mutex);
+static DECLARE_COMPLETION(phy_config_done);
+
+static void transmit_phy_packet_callback(struct fw_packet *packet,
+                                        struct fw_card *card, int status)
+{
+       complete(&phy_config_done);
+}
+
+static struct fw_packet phy_config_packet = {
+       .header_length  = 8,
+       .payload_length = 0,
+       .speed          = SCODE_100,
+       .callback       = transmit_phy_packet_callback,
+};
+
+void fw_send_phy_config(struct fw_card *card,
+                       int node_id, int generation, int gap_count)
+{
+       long timeout = DIV_ROUND_UP(HZ, 10);
+       u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+                  PHY_CONFIG_ROOT_ID(node_id) |
+                  PHY_CONFIG_GAP_COUNT(gap_count);
+
+       mutex_lock(&phy_config_mutex);
+
+       phy_config_packet.header[0] = data;
+       phy_config_packet.header[1] = ~data;
+       phy_config_packet.generation = generation;
+       INIT_COMPLETION(phy_config_done);
+
+       card->driver->send_request(card, &phy_config_packet);
+       wait_for_completion_timeout(&phy_config_done, timeout);
+
+       mutex_unlock(&phy_config_mutex);
+}
+
+void fw_flush_transactions(struct fw_card *card)
+{
+       struct fw_transaction *t, *next;
+       struct list_head list;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&list);
+       spin_lock_irqsave(&card->lock, flags);
+       list_splice_init(&card->transaction_list, &list);
+       card->tlabel_mask = 0;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       list_for_each_entry_safe(t, next, &list, link) {
+               card->driver->cancel_packet(card, &t->packet);
+
+               /*
+                * At this point cancel_packet will never call the
+                * transaction callback, since we just took all the
+                * transactions out of the list.  So do it here.
+                */
+               t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+       }
+}
+
+static struct fw_address_handler *lookup_overlapping_address_handler(
+       struct list_head *list, unsigned long long offset, size_t length)
+{
+       struct fw_address_handler *handler;
+
+       list_for_each_entry(handler, list, link) {
+               if (handler->offset < offset + length &&
+                   offset < handler->offset + handler->length)
+                       return handler;
+       }
+
+       return NULL;
+}
+
+static struct fw_address_handler *lookup_enclosing_address_handler(
+       struct list_head *list, unsigned long long offset, size_t length)
+{
+       struct fw_address_handler *handler;
+
+       list_for_each_entry(handler, list, link) {
+               if (handler->offset <= offset &&
+                   offset + length <= handler->offset + handler->length)
+                       return handler;
+       }
+
+       return NULL;
+}
+
+static DEFINE_SPINLOCK(address_handler_lock);
+static LIST_HEAD(address_handler_list);
+
+const struct fw_address_region fw_high_memory_region =
+       { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
+EXPORT_SYMBOL(fw_high_memory_region);
+
+#if 0
+const struct fw_address_region fw_low_memory_region =
+       { .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
+const struct fw_address_region fw_private_region =
+       { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
+const struct fw_address_region fw_csr_region =
+       { .start = CSR_REGISTER_BASE,
+         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
+const struct fw_address_region fw_unit_space_region =
+       { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
+#endif  /*  0  */
+
+/**
+ * fw_core_add_address_handler - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
+ *
+ * region->start, ->end, and handler->length have to be quadlet-aligned.
+ *
+ * When a request is received that falls within the specified address range,
+ * the specified callback is invoked.  The parameters passed to the callback
+ * give the details of the particular request.
+ *
+ * Return value:  0 on success, non-zero otherwise.
+ * The start offset of the handler's address region is determined by
+ * fw_core_add_address_handler() and is returned in handler->offset.
+ */
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+                               const struct fw_address_region *region)
+{
+       struct fw_address_handler *other;
+       unsigned long flags;
+       int ret = -EBUSY;
+
+       if (region->start & 0xffff000000000003ULL ||
+           region->end   & 0xffff000000000003ULL ||
+           region->start >= region->end ||
+           handler->length & 3 ||
+           handler->length == 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+
+       handler->offset = region->start;
+       while (handler->offset + handler->length <= region->end) {
+               other =
+                   lookup_overlapping_address_handler(&address_handler_list,
+                                                      handler->offset,
+                                                      handler->length);
+               if (other != NULL) {
+                       handler->offset += other->length;
+               } else {
+                       list_add_tail(&handler->link, &address_handler_list);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(fw_core_add_address_handler);
+
+/**
+ * fw_core_remove_address_handler - unregister an address handler
+ */
+void fw_core_remove_address_handler(struct fw_address_handler *handler)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+       list_del(&handler->link);
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+}
+EXPORT_SYMBOL(fw_core_remove_address_handler);
+
+struct fw_request {
+       struct fw_packet response;
+       u32 request_header[4];
+       int ack;
+       u32 length;
+       u32 data[0];
+};
+
+static void free_response_callback(struct fw_packet *packet,
+                                  struct fw_card *card, int status)
+{
+       struct fw_request *request;
+
+       request = container_of(packet, struct fw_request, response);
+       kfree(request);
+}
+
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+                     int rcode, void *payload, size_t length)
+{
+       int tcode, tlabel, extended_tcode, source, destination;
+
+       tcode          = HEADER_GET_TCODE(request_header[0]);
+       tlabel         = HEADER_GET_TLABEL(request_header[0]);
+       source         = HEADER_GET_DESTINATION(request_header[0]);
+       destination    = HEADER_GET_SOURCE(request_header[1]);
+       extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
+
+       response->header[0] =
+               HEADER_RETRY(RETRY_1) |
+               HEADER_TLABEL(tlabel) |
+               HEADER_DESTINATION(destination);
+       response->header[1] =
+               HEADER_SOURCE(source) |
+               HEADER_RCODE(rcode);
+       response->header[2] = 0;
+
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
+               response->header_length = 12;
+               response->payload_length = 0;
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               response->header[0] |=
+                       HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
+               if (payload != NULL)
+                       response->header[3] = *(u32 *)payload;
+               else
+                       response->header[3] = 0;
+               response->header_length = 16;
+               response->payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+       case TCODE_LOCK_REQUEST:
+               response->header[0] |= HEADER_TCODE(tcode + 2);
+               response->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(extended_tcode);
+               response->header_length = 16;
+               response->payload = payload;
+               response->payload_length = length;
+               break;
+
+       default:
+               BUG();
+               return;
+       }
+
+       response->payload_bus = 0;
+}
+EXPORT_SYMBOL(fw_fill_response);
+
+static struct fw_request *allocate_request(struct fw_packet *p)
+{
+       struct fw_request *request;
+       u32 *data, length;
+       int request_tcode, t;
+
+       request_tcode = HEADER_GET_TCODE(p->header[0]);
+       switch (request_tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+               data = &p->header[3];
+               length = 4;
+               break;
+
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_LOCK_REQUEST:
+               data = p->payload;
+               length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               data = NULL;
+               length = 4;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+               data = NULL;
+               length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       default:
+               fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
+                        p->header[0], p->header[1], p->header[2]);
+               return NULL;
+       }
+
+       request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
+       if (request == NULL)
+               return NULL;
+
+       t = (p->timestamp & 0x1fff) + 4000;
+       if (t >= 8000)
+               t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
+       else
+               t = (p->timestamp & ~0x1fff) + t;
+
+       request->response.speed = p->speed;
+       request->response.timestamp = t;
+       request->response.generation = p->generation;
+       request->response.ack = 0;
+       request->response.callback = free_response_callback;
+       request->ack = p->ack;
+       request->length = length;
+       if (data)
+               memcpy(request->data, data, length);
+
+       memcpy(request->request_header, p->header, sizeof(p->header));
+
+       return request;
+}
+
+void fw_send_response(struct fw_card *card,
+                     struct fw_request *request, int rcode)
+{
+       /* unified transaction or broadcast transaction: don't respond */
+       if (request->ack != ACK_PENDING ||
+           HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
+               kfree(request);
+               return;
+       }
+
+       if (rcode == RCODE_COMPLETE)
+               fw_fill_response(&request->response, request->request_header,
+                                rcode, request->data, request->length);
+       else
+               fw_fill_response(&request->response, request->request_header,
+                                rcode, NULL, 0);
+
+       card->driver->send_response(card, &request->response);
+}
+EXPORT_SYMBOL(fw_send_response);
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+       struct fw_address_handler *handler;
+       struct fw_request *request;
+       unsigned long long offset;
+       unsigned long flags;
+       int tcode, destination, source;
+
+       if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+               return;
+
+       request = allocate_request(p);
+       if (request == NULL) {
+               /* FIXME: send statically allocated busy packet. */
+               return;
+       }
+
+       offset      =
+               ((unsigned long long)
+                HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
+       tcode       = HEADER_GET_TCODE(p->header[0]);
+       destination = HEADER_GET_DESTINATION(p->header[0]);
+       source      = HEADER_GET_SOURCE(p->header[1]);
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+       handler = lookup_enclosing_address_handler(&address_handler_list,
+                                                  offset, request->length);
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+
+       /*
+        * FIXME: lookup the fw_node corresponding to the sender of
+        * this request and pass that to the address handler instead
+        * of the node ID.  We may also want to move the address
+        * allocations to fw_node so we only do this callback if the
+        * upper layers registered it for this node.
+        */
+
+       if (handler == NULL)
+               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+       else
+               handler->address_callback(card, request,
+                                         tcode, destination, source,
+                                         p->generation, p->speed, offset,
+                                         request->data, request->length,
+                                         handler->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_request);
+
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+{
+       struct fw_transaction *t;
+       unsigned long flags;
+       u32 *data;
+       size_t data_length;
+       int tcode, tlabel, destination, source, rcode;
+
+       tcode       = HEADER_GET_TCODE(p->header[0]);
+       tlabel      = HEADER_GET_TLABEL(p->header[0]);
+       destination = HEADER_GET_DESTINATION(p->header[0]);
+       source      = HEADER_GET_SOURCE(p->header[1]);
+       rcode       = HEADER_GET_RCODE(p->header[1]);
+
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(t, &card->transaction_list, link) {
+               if (t->node_id == source && t->tlabel == tlabel) {
+                       list_del(&t->link);
+                       card->tlabel_mask &= ~(1 << t->tlabel);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&t->link == &card->transaction_list) {
+               fw_notify("Unsolicited response (source %x, tlabel %x)\n",
+                         source, tlabel);
+               return;
+       }
+
+       /*
+        * FIXME: sanity check packet, is length correct, does tcodes
+        * and addresses match.
+        */
+
+       switch (tcode) {
+       case TCODE_READ_QUADLET_RESPONSE:
+               data = (u32 *) &p->header[3];
+               data_length = 4;
+               break;
+
+       case TCODE_WRITE_RESPONSE:
+               data = NULL;
+               data_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_RESPONSE:
+       case TCODE_LOCK_RESPONSE:
+               data = p->payload;
+               data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       default:
+               /* Should never happen, this is just to shut up gcc. */
+               data = NULL;
+               data_length = 0;
+               break;
+       }
+
+       /*
+        * The response handler may be executed while the request handler
+        * is still pending.  Cancel the request handler.
+        */
+       card->driver->cancel_packet(card, &t->packet);
+
+       t->callback(card, rcode, data, data_length, t->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_response);
+
+static const struct fw_address_region topology_map_region =
+       { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
+         .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
+
+static void handle_topology_map(struct fw_card *card, struct fw_request *request,
+               int tcode, int destination, int source, int generation,
+               int speed, unsigned long long offset,
+               void *payload, size_t length, void *callback_data)
+{
+       int i, start, end;
+       __be32 *map;
+
+       if (!TCODE_IS_READ_REQUEST(tcode)) {
+               fw_send_response(card, request, RCODE_TYPE_ERROR);
+               return;
+       }
+
+       if ((offset & 3) > 0 || (length & 3) > 0) {
+               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+               return;
+       }
+
+       start = (offset - topology_map_region.start) / 4;
+       end = start + length / 4;
+       map = payload;
+
+       for (i = 0; i < length / 4; i++)
+               map[i] = cpu_to_be32(card->topology_map[start + i]);
+
+       fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static struct fw_address_handler topology_map = {
+       .length                 = 0x200,
+       .address_callback       = handle_topology_map,
+};
+
+static const struct fw_address_region registers_region =
+       { .start = CSR_REGISTER_BASE,
+         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
+
+static void handle_registers(struct fw_card *card, struct fw_request *request,
+               int tcode, int destination, int source, int generation,
+               int speed, unsigned long long offset,
+               void *payload, size_t length, void *callback_data)
+{
+       int reg = offset & ~CSR_REGISTER_BASE;
+       unsigned long long bus_time;
+       __be32 *data = payload;
+       int rcode = RCODE_COMPLETE;
+
+       switch (reg) {
+       case CSR_CYCLE_TIME:
+       case CSR_BUS_TIME:
+               if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
+                       rcode = RCODE_TYPE_ERROR;
+                       break;
+               }
+
+               bus_time = card->driver->get_bus_time(card);
+               if (reg == CSR_CYCLE_TIME)
+                       *data = cpu_to_be32(bus_time);
+               else
+                       *data = cpu_to_be32(bus_time >> 25);
+               break;
+
+       case CSR_BROADCAST_CHANNEL:
+               if (tcode == TCODE_READ_QUADLET_REQUEST)
+                       *data = cpu_to_be32(card->broadcast_channel);
+               else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+                       card->broadcast_channel =
+                           (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
+                           BROADCAST_CHANNEL_INITIAL;
+               else
+                       rcode = RCODE_TYPE_ERROR;
+               break;
+
+       case CSR_BUS_MANAGER_ID:
+       case CSR_BANDWIDTH_AVAILABLE:
+       case CSR_CHANNELS_AVAILABLE_HI:
+       case CSR_CHANNELS_AVAILABLE_LO:
+               /*
+                * FIXME: these are handled by the OHCI hardware and
+                * the stack never sees these request. If we add
+                * support for a new type of controller that doesn't
+                * handle this in hardware we need to deal with these
+                * transactions.
+                */
+               BUG();
+               break;
+
+       case CSR_BUSY_TIMEOUT:
+               /* FIXME: Implement this. */
+
+       default:
+               rcode = RCODE_ADDRESS_ERROR;
+               break;
+       }
+
+       fw_send_response(card, request, rcode);
+}
+
+static struct fw_address_handler registers = {
+       .length                 = 0x400,
+       .address_callback       = handle_registers,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
+MODULE_LICENSE("GPL");
+
+static const u32 vendor_textual_descriptor[] = {
+       /* textual descriptor leaf () */
+       0x00060000,
+       0x00000000,
+       0x00000000,
+       0x4c696e75,             /* L i n u */
+       0x78204669,             /* x   F i */
+       0x72657769,             /* r e w i */
+       0x72650000,             /* r e     */
+};
+
+static const u32 model_textual_descriptor[] = {
+       /* model descriptor leaf () */
+       0x00030000,
+       0x00000000,
+       0x00000000,
+       0x4a756a75,             /* J u j u */
+};
+
+static struct fw_descriptor vendor_id_descriptor = {
+       .length = ARRAY_SIZE(vendor_textual_descriptor),
+       .immediate = 0x03d00d1e,
+       .key = 0x81000000,
+       .data = vendor_textual_descriptor,
+};
+
+static struct fw_descriptor model_id_descriptor = {
+       .length = ARRAY_SIZE(model_textual_descriptor),
+       .immediate = 0x17000001,
+       .key = 0x81000000,
+       .data = model_textual_descriptor,
+};
+
+static int __init fw_core_init(void)
+{
+       int ret;
+
+       ret = bus_register(&fw_bus_type);
+       if (ret < 0)
+               return ret;
+
+       fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
+       if (fw_cdev_major < 0) {
+               bus_unregister(&fw_bus_type);
+               return fw_cdev_major;
+       }
+
+       fw_core_add_address_handler(&topology_map, &topology_map_region);
+       fw_core_add_address_handler(&registers, &registers_region);
+       fw_core_add_descriptor(&vendor_id_descriptor);
+       fw_core_add_descriptor(&model_id_descriptor);
+
+       return 0;
+}
+
+static void __exit fw_core_cleanup(void)
+{
+       unregister_chrdev(fw_cdev_major, "firewire");
+       bus_unregister(&fw_bus_type);
+       idr_destroy(&fw_device_idr);
+}
+
+module_init(fw_core_init);
+module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
deleted file mode 100644 (file)
index ba6cd70..0000000
+++ /dev/null
@@ -1,567 +0,0 @@
-/*
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/bug.h>
-#include <linux/completion.h>
-#include <linux/crc-itu-t.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-
-#include <asm/atomic.h>
-#include <asm/byteorder.h>
-
-#include "core.h"
-
-int fw_compute_block_crc(u32 *block)
-{
-       __be32 be32_block[256];
-       int i, length;
-
-       length = (*block >> 16) & 0xff;
-       for (i = 0; i < length; i++)
-               be32_block[i] = cpu_to_be32(block[i + 1]);
-       *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
-
-       return length;
-}
-
-static DEFINE_MUTEX(card_mutex);
-static LIST_HEAD(card_list);
-
-static LIST_HEAD(descriptor_list);
-static int descriptor_count;
-
-#define BIB_CRC(v)             ((v) <<  0)
-#define BIB_CRC_LENGTH(v)      ((v) << 16)
-#define BIB_INFO_LENGTH(v)     ((v) << 24)
-
-#define BIB_LINK_SPEED(v)      ((v) <<  0)
-#define BIB_GENERATION(v)      ((v) <<  4)
-#define BIB_MAX_ROM(v)         ((v) <<  8)
-#define BIB_MAX_RECEIVE(v)     ((v) << 12)
-#define BIB_CYC_CLK_ACC(v)     ((v) << 16)
-#define BIB_PMC                        ((1) << 27)
-#define BIB_BMC                        ((1) << 28)
-#define BIB_ISC                        ((1) << 29)
-#define BIB_CMC                        ((1) << 30)
-#define BIB_IMC                        ((1) << 31)
-
-static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
-{
-       struct fw_descriptor *desc;
-       static u32 config_rom[256];
-       int i, j, length;
-
-       /*
-        * Initialize contents of config rom buffer.  On the OHCI
-        * controller, block reads to the config rom accesses the host
-        * memory, but quadlet read access the hardware bus info block
-        * registers.  That's just crack, but it means we should make
-        * sure the contents of bus info block in host memory matches
-        * the version stored in the OHCI registers.
-        */
-
-       memset(config_rom, 0, sizeof(config_rom));
-       config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
-       config_rom[1] = 0x31333934;
-
-       config_rom[2] =
-               BIB_LINK_SPEED(card->link_speed) |
-               BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
-               BIB_MAX_ROM(2) |
-               BIB_MAX_RECEIVE(card->max_receive) |
-               BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
-       config_rom[3] = card->guid >> 32;
-       config_rom[4] = card->guid;
-
-       /* Generate root directory. */
-       i = 5;
-       config_rom[i++] = 0;
-       config_rom[i++] = 0x0c0083c0; /* node capabilities */
-       j = i + descriptor_count;
-
-       /* Generate root directory entries for descriptors. */
-       list_for_each_entry (desc, &descriptor_list, link) {
-               if (desc->immediate > 0)
-                       config_rom[i++] = desc->immediate;
-               config_rom[i] = desc->key | (j - i);
-               i++;
-               j += desc->length;
-       }
-
-       /* Update root directory length. */
-       config_rom[5] = (i - 5 - 1) << 16;
-
-       /* End of root directory, now copy in descriptors. */
-       list_for_each_entry (desc, &descriptor_list, link) {
-               memcpy(&config_rom[i], desc->data, desc->length * 4);
-               i += desc->length;
-       }
-
-       /* Calculate CRCs for all blocks in the config rom.  This
-        * assumes that CRC length and info length are identical for
-        * the bus info block, which is always the case for this
-        * implementation. */
-       for (i = 0; i < j; i += length + 1)
-               length = fw_compute_block_crc(config_rom + i);
-
-       *config_rom_length = j;
-
-       return config_rom;
-}
-
-static void update_config_roms(void)
-{
-       struct fw_card *card;
-       u32 *config_rom;
-       size_t length;
-
-       list_for_each_entry (card, &card_list, link) {
-               config_rom = generate_config_rom(card, &length);
-               card->driver->set_config_rom(card, config_rom, length);
-       }
-}
-
-int fw_core_add_descriptor(struct fw_descriptor *desc)
-{
-       size_t i;
-
-       /*
-        * Check descriptor is valid; the length of all blocks in the
-        * descriptor has to add up to exactly the length of the
-        * block.
-        */
-       i = 0;
-       while (i < desc->length)
-               i += (desc->data[i] >> 16) + 1;
-
-       if (i != desc->length)
-               return -EINVAL;
-
-       mutex_lock(&card_mutex);
-
-       list_add_tail(&desc->link, &descriptor_list);
-       descriptor_count++;
-       if (desc->immediate > 0)
-               descriptor_count++;
-       update_config_roms();
-
-       mutex_unlock(&card_mutex);
-
-       return 0;
-}
-
-void fw_core_remove_descriptor(struct fw_descriptor *desc)
-{
-       mutex_lock(&card_mutex);
-
-       list_del(&desc->link);
-       descriptor_count--;
-       if (desc->immediate > 0)
-               descriptor_count--;
-       update_config_roms();
-
-       mutex_unlock(&card_mutex);
-}
-
-static int set_broadcast_channel(struct device *dev, void *data)
-{
-       fw_device_set_broadcast_channel(fw_device(dev), (long)data);
-       return 0;
-}
-
-static void allocate_broadcast_channel(struct fw_card *card, int generation)
-{
-       int channel, bandwidth = 0;
-
-       fw_iso_resource_manage(card, generation, 1ULL << 31,
-                              &channel, &bandwidth, true);
-       if (channel == 31) {
-               card->broadcast_channel_allocated = true;
-               device_for_each_child(card->device, (void *)(long)generation,
-                                     set_broadcast_channel);
-       }
-}
-
-static const char gap_count_table[] = {
-       63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
-void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
-{
-       int scheduled;
-
-       fw_card_get(card);
-       scheduled = schedule_delayed_work(&card->work, delay);
-       if (!scheduled)
-               fw_card_put(card);
-}
-
-static void fw_card_bm_work(struct work_struct *work)
-{
-       struct fw_card *card = container_of(work, struct fw_card, work.work);
-       struct fw_device *root_device;
-       struct fw_node *root_node;
-       unsigned long flags;
-       int root_id, new_root_id, irm_id, local_id;
-       int gap_count, generation, grace, rcode;
-       bool do_reset = false;
-       bool root_device_is_running;
-       bool root_device_is_cmc;
-       __be32 lock_data[2];
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       if (card->local_node == NULL) {
-               spin_unlock_irqrestore(&card->lock, flags);
-               goto out_put_card;
-       }
-
-       generation = card->generation;
-       root_node = card->root_node;
-       fw_node_get(root_node);
-       root_device = root_node->data;
-       root_device_is_running = root_device &&
-                       atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
-       root_device_is_cmc = root_device && root_device->cmc;
-       root_id  = root_node->node_id;
-       irm_id   = card->irm_node->node_id;
-       local_id = card->local_node->node_id;
-
-       grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
-
-       if (is_next_generation(generation, card->bm_generation) ||
-           (card->bm_generation != generation && grace)) {
-               /*
-                * This first step is to figure out who is IRM and
-                * then try to become bus manager.  If the IRM is not
-                * well defined (e.g. does not have an active link
-                * layer or does not responds to our lock request, we
-                * will have to do a little vigilante bus management.
-                * In that case, we do a goto into the gap count logic
-                * so that when we do the reset, we still optimize the
-                * gap count.  That could well save a reset in the
-                * next generation.
-                */
-
-               if (!card->irm_node->link_on) {
-                       new_root_id = local_id;
-                       fw_notify("IRM has link off, making local node (%02x) root.\n",
-                                 new_root_id);
-                       goto pick_me;
-               }
-
-               lock_data[0] = cpu_to_be32(0x3f);
-               lock_data[1] = cpu_to_be32(local_id);
-
-               spin_unlock_irqrestore(&card->lock, flags);
-
-               rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                               irm_id, generation, SCODE_100,
-                               CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
-                               lock_data, sizeof(lock_data));
-
-               if (rcode == RCODE_GENERATION)
-                       /* Another bus reset, BM work has been rescheduled. */
-                       goto out;
-
-               if (rcode == RCODE_COMPLETE &&
-                   lock_data[0] != cpu_to_be32(0x3f)) {
-
-                       /* Somebody else is BM.  Only act as IRM. */
-                       if (local_id == irm_id)
-                               allocate_broadcast_channel(card, generation);
-
-                       goto out;
-               }
-
-               spin_lock_irqsave(&card->lock, flags);
-
-               if (rcode != RCODE_COMPLETE) {
-                       /*
-                        * The lock request failed, maybe the IRM
-                        * isn't really IRM capable after all. Let's
-                        * do a bus reset and pick the local node as
-                        * root, and thus, IRM.
-                        */
-                       new_root_id = local_id;
-                       fw_notify("BM lock failed, making local node (%02x) root.\n",
-                                 new_root_id);
-                       goto pick_me;
-               }
-       } else if (card->bm_generation != generation) {
-               /*
-                * We weren't BM in the last generation, and the last
-                * bus reset is less than 125ms ago.  Reschedule this job.
-                */
-               spin_unlock_irqrestore(&card->lock, flags);
-               fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
-               goto out;
-       }
-
-       /*
-        * We're bus manager for this generation, so next step is to
-        * make sure we have an active cycle master and do gap count
-        * optimization.
-        */
-       card->bm_generation = generation;
-
-       if (root_device == NULL) {
-               /*
-                * Either link_on is false, or we failed to read the
-                * config rom.  In either case, pick another root.
-                */
-               new_root_id = local_id;
-       } else if (!root_device_is_running) {
-               /*
-                * If we haven't probed this device yet, bail out now
-                * and let's try again once that's done.
-                */
-               spin_unlock_irqrestore(&card->lock, flags);
-               goto out;
-       } else if (root_device_is_cmc) {
-               /*
-                * FIXME: I suppose we should set the cmstr bit in the
-                * STATE_CLEAR register of this node, as described in
-                * 1394-1995, 8.4.2.6.  Also, send out a force root
-                * packet for this node.
-                */
-               new_root_id = root_id;
-       } else {
-               /*
-                * Current root has an active link layer and we
-                * successfully read the config rom, but it's not
-                * cycle master capable.
-                */
-               new_root_id = local_id;
-       }
-
- pick_me:
-       /*
-        * Pick a gap count from 1394a table E-1.  The table doesn't cover
-        * the typically much larger 1394b beta repeater delays though.
-        */
-       if (!card->beta_repeaters_present &&
-           root_node->max_hops < ARRAY_SIZE(gap_count_table))
-               gap_count = gap_count_table[root_node->max_hops];
-       else
-               gap_count = 63;
-
-       /*
-        * Finally, figure out if we should do a reset or not.  If we have
-        * done less than 5 resets with the same physical topology and we
-        * have either a new root or a new gap count setting, let's do it.
-        */
-
-       if (card->bm_retries++ < 5 &&
-           (card->gap_count != gap_count || new_root_id != root_id))
-               do_reset = true;
-
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (do_reset) {
-               fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
-                         card->index, new_root_id, gap_count);
-               fw_send_phy_config(card, new_root_id, generation, gap_count);
-               fw_core_initiate_bus_reset(card, 1);
-               /* Will allocate broadcast channel after the reset. */
-       } else {
-               if (local_id == irm_id)
-                       allocate_broadcast_channel(card, generation);
-       }
-
- out:
-       fw_node_put(root_node);
- out_put_card:
-       fw_card_put(card);
-}
-
-static void flush_timer_callback(unsigned long data)
-{
-       struct fw_card *card = (struct fw_card *)data;
-
-       fw_flush_transactions(card);
-}
-
-void fw_card_initialize(struct fw_card *card,
-                       const struct fw_card_driver *driver,
-                       struct device *device)
-{
-       static atomic_t index = ATOMIC_INIT(-1);
-
-       card->index = atomic_inc_return(&index);
-       card->driver = driver;
-       card->device = device;
-       card->current_tlabel = 0;
-       card->tlabel_mask = 0;
-       card->color = 0;
-       card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
-
-       kref_init(&card->kref);
-       init_completion(&card->done);
-       INIT_LIST_HEAD(&card->transaction_list);
-       spin_lock_init(&card->lock);
-       setup_timer(&card->flush_timer,
-                   flush_timer_callback, (unsigned long)card);
-
-       card->local_node = NULL;
-
-       INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
-}
-EXPORT_SYMBOL(fw_card_initialize);
-
-int fw_card_add(struct fw_card *card,
-               u32 max_receive, u32 link_speed, u64 guid)
-{
-       u32 *config_rom;
-       size_t length;
-       int ret;
-
-       card->max_receive = max_receive;
-       card->link_speed = link_speed;
-       card->guid = guid;
-
-       mutex_lock(&card_mutex);
-       config_rom = generate_config_rom(card, &length);
-       list_add_tail(&card->link, &card_list);
-       mutex_unlock(&card_mutex);
-
-       ret = card->driver->enable(card, config_rom, length);
-       if (ret < 0) {
-               mutex_lock(&card_mutex);
-               list_del(&card->link);
-               mutex_unlock(&card_mutex);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(fw_card_add);
-
-
-/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card.  This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module.  The
- * dummy driver just fails all IO.
- */
-
-static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
-{
-       BUG();
-       return -1;
-}
-
-static int dummy_update_phy_reg(struct fw_card *card, int address,
-                               int clear_bits, int set_bits)
-{
-       return -ENODEV;
-}
-
-static int dummy_set_config_rom(struct fw_card *card,
-                               u32 *config_rom, size_t length)
-{
-       /*
-        * We take the card out of card_list before setting the dummy
-        * driver, so this should never get called.
-        */
-       BUG();
-       return -1;
-}
-
-static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
-{
-       packet->callback(packet, card, -ENODEV);
-}
-
-static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
-{
-       packet->callback(packet, card, -ENODEV);
-}
-
-static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
-{
-       return -ENOENT;
-}
-
-static int dummy_enable_phys_dma(struct fw_card *card,
-                                int node_id, int generation)
-{
-       return -ENODEV;
-}
-
-static struct fw_card_driver dummy_driver = {
-       .enable          = dummy_enable,
-       .update_phy_reg  = dummy_update_phy_reg,
-       .set_config_rom  = dummy_set_config_rom,
-       .send_request    = dummy_send_request,
-       .cancel_packet   = dummy_cancel_packet,
-       .send_response   = dummy_send_response,
-       .enable_phys_dma = dummy_enable_phys_dma,
-};
-
-void fw_card_release(struct kref *kref)
-{
-       struct fw_card *card = container_of(kref, struct fw_card, kref);
-
-       complete(&card->done);
-}
-
-void fw_core_remove_card(struct fw_card *card)
-{
-       card->driver->update_phy_reg(card, 4,
-                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
-       fw_core_initiate_bus_reset(card, 1);
-
-       mutex_lock(&card_mutex);
-       list_del_init(&card->link);
-       mutex_unlock(&card_mutex);
-
-       /* Set up the dummy driver. */
-       card->driver = &dummy_driver;
-
-       fw_destroy_nodes(card);
-
-       /* Wait for all users, especially device workqueue jobs, to finish. */
-       fw_card_put(card);
-       wait_for_completion(&card->done);
-
-       WARN_ON(!list_empty(&card->transaction_list));
-       del_timer_sync(&card->flush_timer);
-}
-EXPORT_SYMBOL(fw_core_remove_card);
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
-{
-       int reg = short_reset ? 5 : 1;
-       int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
-
-       return card->driver->update_phy_reg(card, reg, 0, bit);
-}
-EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
deleted file mode 100644 (file)
index 042c045..0000000
+++ /dev/null
@@ -1,1458 +0,0 @@
-/*
- * Char device for device raw access
- *
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/compat.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-cdev.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/poll.h>
-#include <linux/preempt.h>
-#include <linux/spinlock.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include "core.h"
-
-struct client {
-       u32 version;
-       struct fw_device *device;
-
-       spinlock_t lock;
-       bool in_shutdown;
-       struct idr resource_idr;
-       struct list_head event_list;
-       wait_queue_head_t wait;
-       u64 bus_reset_closure;
-
-       struct fw_iso_context *iso_context;
-       u64 iso_closure;
-       struct fw_iso_buffer buffer;
-       unsigned long vm_start;
-
-       struct list_head link;
-       struct kref kref;
-};
-
-static inline void client_get(struct client *client)
-{
-       kref_get(&client->kref);
-}
-
-static void client_release(struct kref *kref)
-{
-       struct client *client = container_of(kref, struct client, kref);
-
-       fw_device_put(client->device);
-       kfree(client);
-}
-
-static void client_put(struct client *client)
-{
-       kref_put(&client->kref, client_release);
-}
-
-struct client_resource;
-typedef void (*client_resource_release_fn_t)(struct client *,
-                                            struct client_resource *);
-struct client_resource {
-       client_resource_release_fn_t release;
-       int handle;
-};
-
-struct address_handler_resource {
-       struct client_resource resource;
-       struct fw_address_handler handler;
-       __u64 closure;
-       struct client *client;
-};
-
-struct outbound_transaction_resource {
-       struct client_resource resource;
-       struct fw_transaction transaction;
-};
-
-struct inbound_transaction_resource {
-       struct client_resource resource;
-       struct fw_request *request;
-       void *data;
-       size_t length;
-};
-
-struct descriptor_resource {
-       struct client_resource resource;
-       struct fw_descriptor descriptor;
-       u32 data[0];
-};
-
-struct iso_resource {
-       struct client_resource resource;
-       struct client *client;
-       /* Schedule work and access todo only with client->lock held. */
-       struct delayed_work work;
-       enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
-             ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
-       int generation;
-       u64 channels;
-       s32 bandwidth;
-       struct iso_resource_event *e_alloc, *e_dealloc;
-};
-
-static void schedule_iso_resource(struct iso_resource *);
-static void release_iso_resource(struct client *, struct client_resource *);
-
-/*
- * dequeue_event() just kfree()'s the event, so the event has to be
- * the first field in a struct XYZ_event.
- */
-struct event {
-       struct { void *data; size_t size; } v[2];
-       struct list_head link;
-};
-
-struct bus_reset_event {
-       struct event event;
-       struct fw_cdev_event_bus_reset reset;
-};
-
-struct outbound_transaction_event {
-       struct event event;
-       struct client *client;
-       struct outbound_transaction_resource r;
-       struct fw_cdev_event_response response;
-};
-
-struct inbound_transaction_event {
-       struct event event;
-       struct fw_cdev_event_request request;
-};
-
-struct iso_interrupt_event {
-       struct event event;
-       struct fw_cdev_event_iso_interrupt interrupt;
-};
-
-struct iso_resource_event {
-       struct event event;
-       struct fw_cdev_event_iso_resource resource;
-};
-
-static inline void __user *u64_to_uptr(__u64 value)
-{
-       return (void __user *)(unsigned long)value;
-}
-
-static inline __u64 uptr_to_u64(void __user *ptr)
-{
-       return (__u64)(unsigned long)ptr;
-}
-
-static int fw_device_op_open(struct inode *inode, struct file *file)
-{
-       struct fw_device *device;
-       struct client *client;
-
-       device = fw_device_get_by_devt(inode->i_rdev);
-       if (device == NULL)
-               return -ENODEV;
-
-       if (fw_device_is_shutdown(device)) {
-               fw_device_put(device);
-               return -ENODEV;
-       }
-
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (client == NULL) {
-               fw_device_put(device);
-               return -ENOMEM;
-       }
-
-       client->device = device;
-       spin_lock_init(&client->lock);
-       idr_init(&client->resource_idr);
-       INIT_LIST_HEAD(&client->event_list);
-       init_waitqueue_head(&client->wait);
-       kref_init(&client->kref);
-
-       file->private_data = client;
-
-       mutex_lock(&device->client_list_mutex);
-       list_add_tail(&client->link, &device->client_list);
-       mutex_unlock(&device->client_list_mutex);
-
-       return 0;
-}
-
-static void queue_event(struct client *client, struct event *event,
-                       void *data0, size_t size0, void *data1, size_t size1)
-{
-       unsigned long flags;
-
-       event->v[0].data = data0;
-       event->v[0].size = size0;
-       event->v[1].data = data1;
-       event->v[1].size = size1;
-
-       spin_lock_irqsave(&client->lock, flags);
-       if (client->in_shutdown)
-               kfree(event);
-       else
-               list_add_tail(&event->link, &client->event_list);
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       wake_up_interruptible(&client->wait);
-}
-
-static int dequeue_event(struct client *client,
-                        char __user *buffer, size_t count)
-{
-       struct event *event;
-       size_t size, total;
-       int i, ret;
-
-       ret = wait_event_interruptible(client->wait,
-                       !list_empty(&client->event_list) ||
-                       fw_device_is_shutdown(client->device));
-       if (ret < 0)
-               return ret;
-
-       if (list_empty(&client->event_list) &&
-                      fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       spin_lock_irq(&client->lock);
-       event = list_first_entry(&client->event_list, struct event, link);
-       list_del(&event->link);
-       spin_unlock_irq(&client->lock);
-
-       total = 0;
-       for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
-               size = min(event->v[i].size, count - total);
-               if (copy_to_user(buffer + total, event->v[i].data, size)) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-               total += size;
-       }
-       ret = total;
-
- out:
-       kfree(event);
-
-       return ret;
-}
-
-static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
-                                size_t count, loff_t *offset)
-{
-       struct client *client = file->private_data;
-
-       return dequeue_event(client, buffer, count);
-}
-
-static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
-                                struct client *client)
-{
-       struct fw_card *card = client->device->card;
-
-       spin_lock_irq(&card->lock);
-
-       event->closure       = client->bus_reset_closure;
-       event->type          = FW_CDEV_EVENT_BUS_RESET;
-       event->generation    = client->device->generation;
-       event->node_id       = client->device->node_id;
-       event->local_node_id = card->local_node->node_id;
-       event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
-       event->irm_node_id   = card->irm_node->node_id;
-       event->root_node_id  = card->root_node->node_id;
-
-       spin_unlock_irq(&card->lock);
-}
-
-static void for_each_client(struct fw_device *device,
-                           void (*callback)(struct client *client))
-{
-       struct client *c;
-
-       mutex_lock(&device->client_list_mutex);
-       list_for_each_entry(c, &device->client_list, link)
-               callback(c);
-       mutex_unlock(&device->client_list_mutex);
-}
-
-static int schedule_reallocations(int id, void *p, void *data)
-{
-       struct client_resource *r = p;
-
-       if (r->release == release_iso_resource)
-               schedule_iso_resource(container_of(r,
-                                       struct iso_resource, resource));
-       return 0;
-}
-
-static void queue_bus_reset_event(struct client *client)
-{
-       struct bus_reset_event *e;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (e == NULL) {
-               fw_notify("Out of memory when allocating bus reset event\n");
-               return;
-       }
-
-       fill_bus_reset_event(&e->reset, client);
-
-       queue_event(client, &e->event,
-                   &e->reset, sizeof(e->reset), NULL, 0);
-
-       spin_lock_irq(&client->lock);
-       idr_for_each(&client->resource_idr, schedule_reallocations, client);
-       spin_unlock_irq(&client->lock);
-}
-
-void fw_device_cdev_update(struct fw_device *device)
-{
-       for_each_client(device, queue_bus_reset_event);
-}
-
-static void wake_up_client(struct client *client)
-{
-       wake_up_interruptible(&client->wait);
-}
-
-void fw_device_cdev_remove(struct fw_device *device)
-{
-       for_each_client(device, wake_up_client);
-}
-
-static int ioctl_get_info(struct client *client, void *buffer)
-{
-       struct fw_cdev_get_info *get_info = buffer;
-       struct fw_cdev_event_bus_reset bus_reset;
-       unsigned long ret = 0;
-
-       client->version = get_info->version;
-       get_info->version = FW_CDEV_VERSION;
-       get_info->card = client->device->card->index;
-
-       down_read(&fw_device_rwsem);
-
-       if (get_info->rom != 0) {
-               void __user *uptr = u64_to_uptr(get_info->rom);
-               size_t want = get_info->rom_length;
-               size_t have = client->device->config_rom_length * 4;
-
-               ret = copy_to_user(uptr, client->device->config_rom,
-                                  min(want, have));
-       }
-       get_info->rom_length = client->device->config_rom_length * 4;
-
-       up_read(&fw_device_rwsem);
-
-       if (ret != 0)
-               return -EFAULT;
-
-       client->bus_reset_closure = get_info->bus_reset_closure;
-       if (get_info->bus_reset != 0) {
-               void __user *uptr = u64_to_uptr(get_info->bus_reset);
-
-               fill_bus_reset_event(&bus_reset, client);
-               if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
-                       return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int add_client_resource(struct client *client,
-                              struct client_resource *resource, gfp_t gfp_mask)
-{
-       unsigned long flags;
-       int ret;
-
- retry:
-       if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&client->lock, flags);
-       if (client->in_shutdown)
-               ret = -ECANCELED;
-       else
-               ret = idr_get_new(&client->resource_idr, resource,
-                                 &resource->handle);
-       if (ret >= 0) {
-               client_get(client);
-               if (resource->release == release_iso_resource)
-                       schedule_iso_resource(container_of(resource,
-                                               struct iso_resource, resource));
-       }
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       if (ret == -EAGAIN)
-               goto retry;
-
-       return ret < 0 ? ret : 0;
-}
-
-static int release_client_resource(struct client *client, u32 handle,
-                                  client_resource_release_fn_t release,
-                                  struct client_resource **resource)
-{
-       struct client_resource *r;
-
-       spin_lock_irq(&client->lock);
-       if (client->in_shutdown)
-               r = NULL;
-       else
-               r = idr_find(&client->resource_idr, handle);
-       if (r && r->release == release)
-               idr_remove(&client->resource_idr, handle);
-       spin_unlock_irq(&client->lock);
-
-       if (!(r && r->release == release))
-               return -EINVAL;
-
-       if (resource)
-               *resource = r;
-       else
-               r->release(client, r);
-
-       client_put(client);
-
-       return 0;
-}
-
-static void release_transaction(struct client *client,
-                               struct client_resource *resource)
-{
-       struct outbound_transaction_resource *r = container_of(resource,
-                       struct outbound_transaction_resource, resource);
-
-       fw_cancel_transaction(client->device->card, &r->transaction);
-}
-
-static void complete_transaction(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct outbound_transaction_event *e = data;
-       struct fw_cdev_event_response *rsp = &e->response;
-       struct client *client = e->client;
-       unsigned long flags;
-
-       if (length < rsp->length)
-               rsp->length = length;
-       if (rcode == RCODE_COMPLETE)
-               memcpy(rsp->data, payload, rsp->length);
-
-       spin_lock_irqsave(&client->lock, flags);
-       /*
-        * 1. If called while in shutdown, the idr tree must be left untouched.
-        *    The idr handle will be removed and the client reference will be
-        *    dropped later.
-        * 2. If the call chain was release_client_resource ->
-        *    release_transaction -> complete_transaction (instead of a normal
-        *    conclusion of the transaction), i.e. if this resource was already
-        *    unregistered from the idr, the client reference will be dropped
-        *    by release_client_resource and we must not drop it here.
-        */
-       if (!client->in_shutdown &&
-           idr_find(&client->resource_idr, e->r.resource.handle)) {
-               idr_remove(&client->resource_idr, e->r.resource.handle);
-               /* Drop the idr's reference */
-               client_put(client);
-       }
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       rsp->type = FW_CDEV_EVENT_RESPONSE;
-       rsp->rcode = rcode;
-
-       /*
-        * In the case that sizeof(*rsp) doesn't align with the position of the
-        * data, and the read is short, preserve an extra copy of the data
-        * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
-        * for short reads and some apps depended on it, this is both safe
-        * and prudent for compatibility.
-        */
-       if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
-               queue_event(client, &e->event, rsp, sizeof(*rsp),
-                           rsp->data, rsp->length);
-       else
-               queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
-                           NULL, 0);
-
-       /* Drop the transaction callback's reference */
-       client_put(client);
-}
-
-static int init_request(struct client *client,
-                       struct fw_cdev_send_request *request,
-                       int destination_id, int speed)
-{
-       struct outbound_transaction_event *e;
-       int ret;
-
-       if (request->tcode != TCODE_STREAM_DATA &&
-           (request->length > 4096 || request->length > 512 << speed))
-               return -EIO;
-
-       e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
-       if (e == NULL)
-               return -ENOMEM;
-
-       e->client = client;
-       e->response.length = request->length;
-       e->response.closure = request->closure;
-
-       if (request->data &&
-           copy_from_user(e->response.data,
-                          u64_to_uptr(request->data), request->length)) {
-               ret = -EFAULT;
-               goto failed;
-       }
-
-       e->r.resource.release = release_transaction;
-       ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
-       if (ret < 0)
-               goto failed;
-
-       /* Get a reference for the transaction callback */
-       client_get(client);
-
-       fw_send_request(client->device->card, &e->r.transaction,
-                       request->tcode, destination_id, request->generation,
-                       speed, request->offset, e->response.data,
-                       request->length, complete_transaction, e);
-       return 0;
-
- failed:
-       kfree(e);
-
-       return ret;
-}
-
-static int ioctl_send_request(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_request *request = buffer;
-
-       switch (request->tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_READ_QUADLET_REQUEST:
-       case TCODE_READ_BLOCK_REQUEST:
-       case TCODE_LOCK_MASK_SWAP:
-       case TCODE_LOCK_COMPARE_SWAP:
-       case TCODE_LOCK_FETCH_ADD:
-       case TCODE_LOCK_LITTLE_ADD:
-       case TCODE_LOCK_BOUNDED_ADD:
-       case TCODE_LOCK_WRAP_ADD:
-       case TCODE_LOCK_VENDOR_DEPENDENT:
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return init_request(client, request, client->device->node_id,
-                           client->device->max_speed);
-}
-
-static void release_request(struct client *client,
-                           struct client_resource *resource)
-{
-       struct inbound_transaction_resource *r = container_of(resource,
-                       struct inbound_transaction_resource, resource);
-
-       fw_send_response(client->device->card, r->request,
-                        RCODE_CONFLICT_ERROR);
-       kfree(r);
-}
-
-static void handle_request(struct fw_card *card, struct fw_request *request,
-                          int tcode, int destination, int source,
-                          int generation, int speed,
-                          unsigned long long offset,
-                          void *payload, size_t length, void *callback_data)
-{
-       struct address_handler_resource *handler = callback_data;
-       struct inbound_transaction_resource *r;
-       struct inbound_transaction_event *e;
-       int ret;
-
-       r = kmalloc(sizeof(*r), GFP_ATOMIC);
-       e = kmalloc(sizeof(*e), GFP_ATOMIC);
-       if (r == NULL || e == NULL)
-               goto failed;
-
-       r->request = request;
-       r->data    = payload;
-       r->length  = length;
-
-       r->resource.release = release_request;
-       ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
-       if (ret < 0)
-               goto failed;
-
-       e->request.type    = FW_CDEV_EVENT_REQUEST;
-       e->request.tcode   = tcode;
-       e->request.offset  = offset;
-       e->request.length  = length;
-       e->request.handle  = r->resource.handle;
-       e->request.closure = handler->closure;
-
-       queue_event(handler->client, &e->event,
-                   &e->request, sizeof(e->request), payload, length);
-       return;
-
- failed:
-       kfree(r);
-       kfree(e);
-       fw_send_response(card, request, RCODE_CONFLICT_ERROR);
-}
-
-static void release_address_handler(struct client *client,
-                                   struct client_resource *resource)
-{
-       struct address_handler_resource *r =
-           container_of(resource, struct address_handler_resource, resource);
-
-       fw_core_remove_address_handler(&r->handler);
-       kfree(r);
-}
-
-static int ioctl_allocate(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate *request = buffer;
-       struct address_handler_resource *r;
-       struct fw_address_region region;
-       int ret;
-
-       r = kmalloc(sizeof(*r), GFP_KERNEL);
-       if (r == NULL)
-               return -ENOMEM;
-
-       region.start = request->offset;
-       region.end = request->offset + request->length;
-       r->handler.length = request->length;
-       r->handler.address_callback = handle_request;
-       r->handler.callback_data = r;
-       r->closure = request->closure;
-       r->client = client;
-
-       ret = fw_core_add_address_handler(&r->handler, &region);
-       if (ret < 0) {
-               kfree(r);
-               return ret;
-       }
-
-       r->resource.release = release_address_handler;
-       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-       if (ret < 0) {
-               release_address_handler(client, &r->resource);
-               return ret;
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
-}
-
-static int ioctl_deallocate(struct client *client, void *buffer)
-{
-       struct fw_cdev_deallocate *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_address_handler, NULL);
-}
-
-static int ioctl_send_response(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_response *request = buffer;
-       struct client_resource *resource;
-       struct inbound_transaction_resource *r;
-
-       if (release_client_resource(client, request->handle,
-                                   release_request, &resource) < 0)
-               return -EINVAL;
-
-       r = container_of(resource, struct inbound_transaction_resource,
-                        resource);
-       if (request->length < r->length)
-               r->length = request->length;
-       if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
-               return -EFAULT;
-
-       fw_send_response(client->device->card, r->request, request->rcode);
-       kfree(r);
-
-       return 0;
-}
-
-static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
-{
-       struct fw_cdev_initiate_bus_reset *request = buffer;
-       int short_reset;
-
-       short_reset = (request->type == FW_CDEV_SHORT_RESET);
-
-       return fw_core_initiate_bus_reset(client->device->card, short_reset);
-}
-
-static void release_descriptor(struct client *client,
-                              struct client_resource *resource)
-{
-       struct descriptor_resource *r =
-               container_of(resource, struct descriptor_resource, resource);
-
-       fw_core_remove_descriptor(&r->descriptor);
-       kfree(r);
-}
-
-static int ioctl_add_descriptor(struct client *client, void *buffer)
-{
-       struct fw_cdev_add_descriptor *request = buffer;
-       struct descriptor_resource *r;
-       int ret;
-
-       /* Access policy: Allow this ioctl only on local nodes' device files. */
-       if (!client->device->is_local)
-               return -ENOSYS;
-
-       if (request->length > 256)
-               return -EINVAL;
-
-       r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
-       if (r == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(r->data,
-                          u64_to_uptr(request->data), request->length * 4)) {
-               ret = -EFAULT;
-               goto failed;
-       }
-
-       r->descriptor.length    = request->length;
-       r->descriptor.immediate = request->immediate;
-       r->descriptor.key       = request->key;
-       r->descriptor.data      = r->data;
-
-       ret = fw_core_add_descriptor(&r->descriptor);
-       if (ret < 0)
-               goto failed;
-
-       r->resource.release = release_descriptor;
-       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-       if (ret < 0) {
-               fw_core_remove_descriptor(&r->descriptor);
-               goto failed;
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
- failed:
-       kfree(r);
-
-       return ret;
-}
-
-static int ioctl_remove_descriptor(struct client *client, void *buffer)
-{
-       struct fw_cdev_remove_descriptor *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_descriptor, NULL);
-}
-
-static void iso_callback(struct fw_iso_context *context, u32 cycle,
-                        size_t header_length, void *header, void *data)
-{
-       struct client *client = data;
-       struct iso_interrupt_event *e;
-
-       e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
-       if (e == NULL)
-               return;
-
-       e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
-       e->interrupt.closure   = client->iso_closure;
-       e->interrupt.cycle     = cycle;
-       e->interrupt.header_length = header_length;
-       memcpy(e->interrupt.header, header, header_length);
-       queue_event(client, &e->event, &e->interrupt,
-                   sizeof(e->interrupt) + header_length, NULL, 0);
-}
-
-static int ioctl_create_iso_context(struct client *client, void *buffer)
-{
-       struct fw_cdev_create_iso_context *request = buffer;
-       struct fw_iso_context *context;
-
-       /* We only support one context at this time. */
-       if (client->iso_context != NULL)
-               return -EBUSY;
-
-       if (request->channel > 63)
-               return -EINVAL;
-
-       switch (request->type) {
-       case FW_ISO_CONTEXT_RECEIVE:
-               if (request->header_size < 4 || (request->header_size & 3))
-                       return -EINVAL;
-
-               break;
-
-       case FW_ISO_CONTEXT_TRANSMIT:
-               if (request->speed > SCODE_3200)
-                       return -EINVAL;
-
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       context =  fw_iso_context_create(client->device->card,
-                                        request->type,
-                                        request->channel,
-                                        request->speed,
-                                        request->header_size,
-                                        iso_callback, client);
-       if (IS_ERR(context))
-               return PTR_ERR(context);
-
-       client->iso_closure = request->closure;
-       client->iso_context = context;
-
-       /* We only support one context at this time. */
-       request->handle = 0;
-
-       return 0;
-}
-
-/* Macros for decoding the iso packet control header. */
-#define GET_PAYLOAD_LENGTH(v)  ((v) & 0xffff)
-#define GET_INTERRUPT(v)       (((v) >> 16) & 0x01)
-#define GET_SKIP(v)            (((v) >> 17) & 0x01)
-#define GET_TAG(v)             (((v) >> 18) & 0x03)
-#define GET_SY(v)              (((v) >> 20) & 0x0f)
-#define GET_HEADER_LENGTH(v)   (((v) >> 24) & 0xff)
-
-static int ioctl_queue_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_queue_iso *request = buffer;
-       struct fw_cdev_iso_packet __user *p, *end, *next;
-       struct fw_iso_context *ctx = client->iso_context;
-       unsigned long payload, buffer_end, header_length;
-       u32 control;
-       int count;
-       struct {
-               struct fw_iso_packet packet;
-               u8 header[256];
-       } u;
-
-       if (ctx == NULL || request->handle != 0)
-               return -EINVAL;
-
-       /*
-        * If the user passes a non-NULL data pointer, has mmap()'ed
-        * the iso buffer, and the pointer points inside the buffer,
-        * we setup the payload pointers accordingly.  Otherwise we
-        * set them both to 0, which will still let packets with
-        * payload_length == 0 through.  In other words, if no packets
-        * use the indirect payload, the iso buffer need not be mapped
-        * and the request->data pointer is ignored.
-        */
-
-       payload = (unsigned long)request->data - client->vm_start;
-       buffer_end = client->buffer.page_count << PAGE_SHIFT;
-       if (request->data == 0 || client->buffer.pages == NULL ||
-           payload >= buffer_end) {
-               payload = 0;
-               buffer_end = 0;
-       }
-
-       p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
-
-       if (!access_ok(VERIFY_READ, p, request->size))
-               return -EFAULT;
-
-       end = (void __user *)p + request->size;
-       count = 0;
-       while (p < end) {
-               if (get_user(control, &p->control))
-                       return -EFAULT;
-               u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
-               u.packet.interrupt = GET_INTERRUPT(control);
-               u.packet.skip = GET_SKIP(control);
-               u.packet.tag = GET_TAG(control);
-               u.packet.sy = GET_SY(control);
-               u.packet.header_length = GET_HEADER_LENGTH(control);
-
-               if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
-                       header_length = u.packet.header_length;
-               } else {
-                       /*
-                        * We require that header_length is a multiple of
-                        * the fixed header size, ctx->header_size.
-                        */
-                       if (ctx->header_size == 0) {
-                               if (u.packet.header_length > 0)
-                                       return -EINVAL;
-                       } else if (u.packet.header_length % ctx->header_size != 0) {
-                               return -EINVAL;
-                       }
-                       header_length = 0;
-               }
-
-               next = (struct fw_cdev_iso_packet __user *)
-                       &p->header[header_length / 4];
-               if (next > end)
-                       return -EINVAL;
-               if (__copy_from_user
-                   (u.packet.header, p->header, header_length))
-                       return -EFAULT;
-               if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
-                   u.packet.header_length + u.packet.payload_length > 0)
-                       return -EINVAL;
-               if (payload + u.packet.payload_length > buffer_end)
-                       return -EINVAL;
-
-               if (fw_iso_context_queue(ctx, &u.packet,
-                                        &client->buffer, payload))
-                       break;
-
-               p = next;
-               payload += u.packet.payload_length;
-               count++;
-       }
-
-       request->size    -= uptr_to_u64(p) - request->packets;
-       request->packets  = uptr_to_u64(p);
-       request->data     = client->vm_start + payload;
-
-       return count;
-}
-
-static int ioctl_start_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_start_iso *request = buffer;
-
-       if (client->iso_context == NULL || request->handle != 0)
-               return -EINVAL;
-
-       if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
-               if (request->tags == 0 || request->tags > 15)
-                       return -EINVAL;
-
-               if (request->sync > 15)
-                       return -EINVAL;
-       }
-
-       return fw_iso_context_start(client->iso_context, request->cycle,
-                                   request->sync, request->tags);
-}
-
-static int ioctl_stop_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_stop_iso *request = buffer;
-
-       if (client->iso_context == NULL || request->handle != 0)
-               return -EINVAL;
-
-       return fw_iso_context_stop(client->iso_context);
-}
-
-static int ioctl_get_cycle_timer(struct client *client, void *buffer)
-{
-       struct fw_cdev_get_cycle_timer *request = buffer;
-       struct fw_card *card = client->device->card;
-       unsigned long long bus_time;
-       struct timeval tv;
-       unsigned long flags;
-
-       preempt_disable();
-       local_irq_save(flags);
-
-       bus_time = card->driver->get_bus_time(card);
-       do_gettimeofday(&tv);
-
-       local_irq_restore(flags);
-       preempt_enable();
-
-       request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
-       request->cycle_timer = bus_time & 0xffffffff;
-       return 0;
-}
-
-static void iso_resource_work(struct work_struct *work)
-{
-       struct iso_resource_event *e;
-       struct iso_resource *r =
-                       container_of(work, struct iso_resource, work.work);
-       struct client *client = r->client;
-       int generation, channel, bandwidth, todo;
-       bool skip, free, success;
-
-       spin_lock_irq(&client->lock);
-       generation = client->device->generation;
-       todo = r->todo;
-       /* Allow 1000ms grace period for other reallocations. */
-       if (todo == ISO_RES_ALLOC &&
-           time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
-               if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
-                       client_get(client);
-               skip = true;
-       } else {
-               /* We could be called twice within the same generation. */
-               skip = todo == ISO_RES_REALLOC &&
-                      r->generation == generation;
-       }
-       free = todo == ISO_RES_DEALLOC ||
-              todo == ISO_RES_ALLOC_ONCE ||
-              todo == ISO_RES_DEALLOC_ONCE;
-       r->generation = generation;
-       spin_unlock_irq(&client->lock);
-
-       if (skip)
-               goto out;
-
-       bandwidth = r->bandwidth;
-
-       fw_iso_resource_manage(client->device->card, generation,
-                       r->channels, &channel, &bandwidth,
-                       todo == ISO_RES_ALLOC ||
-                       todo == ISO_RES_REALLOC ||
-                       todo == ISO_RES_ALLOC_ONCE);
-       /*
-        * Is this generation outdated already?  As long as this resource sticks
-        * in the idr, it will be scheduled again for a newer generation or at
-        * shutdown.
-        */
-       if (channel == -EAGAIN &&
-           (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
-               goto out;
-
-       success = channel >= 0 || bandwidth > 0;
-
-       spin_lock_irq(&client->lock);
-       /*
-        * Transit from allocation to reallocation, except if the client
-        * requested deallocation in the meantime.
-        */
-       if (r->todo == ISO_RES_ALLOC)
-               r->todo = ISO_RES_REALLOC;
-       /*
-        * Allocation or reallocation failure?  Pull this resource out of the
-        * idr and prepare for deletion, unless the client is shutting down.
-        */
-       if (r->todo == ISO_RES_REALLOC && !success &&
-           !client->in_shutdown &&
-           idr_find(&client->resource_idr, r->resource.handle)) {
-               idr_remove(&client->resource_idr, r->resource.handle);
-               client_put(client);
-               free = true;
-       }
-       spin_unlock_irq(&client->lock);
-
-       if (todo == ISO_RES_ALLOC && channel >= 0)
-               r->channels = 1ULL << channel;
-
-       if (todo == ISO_RES_REALLOC && success)
-               goto out;
-
-       if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
-               e = r->e_alloc;
-               r->e_alloc = NULL;
-       } else {
-               e = r->e_dealloc;
-               r->e_dealloc = NULL;
-       }
-       e->resource.handle      = r->resource.handle;
-       e->resource.channel     = channel;
-       e->resource.bandwidth   = bandwidth;
-
-       queue_event(client, &e->event,
-                   &e->resource, sizeof(e->resource), NULL, 0);
-
-       if (free) {
-               cancel_delayed_work(&r->work);
-               kfree(r->e_alloc);
-               kfree(r->e_dealloc);
-               kfree(r);
-       }
- out:
-       client_put(client);
-}
-
-static void schedule_iso_resource(struct iso_resource *r)
-{
-       client_get(r->client);
-       if (!schedule_delayed_work(&r->work, 0))
-               client_put(r->client);
-}
-
-static void release_iso_resource(struct client *client,
-                                struct client_resource *resource)
-{
-       struct iso_resource *r =
-               container_of(resource, struct iso_resource, resource);
-
-       spin_lock_irq(&client->lock);
-       r->todo = ISO_RES_DEALLOC;
-       schedule_iso_resource(r);
-       spin_unlock_irq(&client->lock);
-}
-
-static int init_iso_resource(struct client *client,
-               struct fw_cdev_allocate_iso_resource *request, int todo)
-{
-       struct iso_resource_event *e1, *e2;
-       struct iso_resource *r;
-       int ret;
-
-       if ((request->channels == 0 && request->bandwidth == 0) ||
-           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
-           request->bandwidth < 0)
-               return -EINVAL;
-
-       r  = kmalloc(sizeof(*r), GFP_KERNEL);
-       e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
-       e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
-       if (r == NULL || e1 == NULL || e2 == NULL) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       INIT_DELAYED_WORK(&r->work, iso_resource_work);
-       r->client       = client;
-       r->todo         = todo;
-       r->generation   = -1;
-       r->channels     = request->channels;
-       r->bandwidth    = request->bandwidth;
-       r->e_alloc      = e1;
-       r->e_dealloc    = e2;
-
-       e1->resource.closure    = request->closure;
-       e1->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
-       e2->resource.closure    = request->closure;
-       e2->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
-
-       if (todo == ISO_RES_ALLOC) {
-               r->resource.release = release_iso_resource;
-               ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-               if (ret < 0)
-                       goto fail;
-       } else {
-               r->resource.release = NULL;
-               r->resource.handle = -1;
-               schedule_iso_resource(r);
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
- fail:
-       kfree(r);
-       kfree(e1);
-       kfree(e2);
-
-       return ret;
-}
-
-static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_ALLOC);
-}
-
-static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
-{
-       struct fw_cdev_deallocate *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_iso_resource, NULL);
-}
-
-static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
-}
-
-static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
-}
-
-/*
- * Returns a speed code:  Maximum speed to or from this device,
- * limited by the device's link speed, the local node's link speed,
- * and all PHY port speeds between the two links.
- */
-static int ioctl_get_speed(struct client *client, void *buffer)
-{
-       return client->device->max_speed;
-}
-
-static int ioctl_send_broadcast_request(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_request *request = buffer;
-
-       switch (request->tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* Security policy: Only allow accesses to Units Space. */
-       if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
-               return -EACCES;
-
-       return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
-}
-
-static int ioctl_send_stream_packet(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_stream_packet *p = buffer;
-       struct fw_cdev_send_request request;
-       int dest;
-
-       if (p->speed > client->device->card->link_speed ||
-           p->length > 1024 << p->speed)
-               return -EIO;
-
-       if (p->tag > 3 || p->channel > 63 || p->sy > 15)
-               return -EINVAL;
-
-       dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
-       request.tcode           = TCODE_STREAM_DATA;
-       request.length          = p->length;
-       request.closure         = p->closure;
-       request.data            = p->data;
-       request.generation      = p->generation;
-
-       return init_request(client, &request, dest, p->speed);
-}
-
-static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
-       ioctl_get_info,
-       ioctl_send_request,
-       ioctl_allocate,
-       ioctl_deallocate,
-       ioctl_send_response,
-       ioctl_initiate_bus_reset,
-       ioctl_add_descriptor,
-       ioctl_remove_descriptor,
-       ioctl_create_iso_context,
-       ioctl_queue_iso,
-       ioctl_start_iso,
-       ioctl_stop_iso,
-       ioctl_get_cycle_timer,
-       ioctl_allocate_iso_resource,
-       ioctl_deallocate_iso_resource,
-       ioctl_allocate_iso_resource_once,
-       ioctl_deallocate_iso_resource_once,
-       ioctl_get_speed,
-       ioctl_send_broadcast_request,
-       ioctl_send_stream_packet,
-};
-
-static int dispatch_ioctl(struct client *client,
-                         unsigned int cmd, void __user *arg)
-{
-       char buffer[256];
-       int ret;
-
-       if (_IOC_TYPE(cmd) != '#' ||
-           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
-               return -EINVAL;
-
-       if (_IOC_DIR(cmd) & _IOC_WRITE) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
-                       return -EFAULT;
-       }
-
-       ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
-       if (ret < 0)
-               return ret;
-
-       if (_IOC_DIR(cmd) & _IOC_READ) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
-                       return -EFAULT;
-       }
-
-       return ret;
-}
-
-static long fw_device_op_ioctl(struct file *file,
-                              unsigned int cmd, unsigned long arg)
-{
-       struct client *client = file->private_data;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       return dispatch_ioctl(client, cmd, (void __user *) arg);
-}
-
-#ifdef CONFIG_COMPAT
-static long fw_device_op_compat_ioctl(struct file *file,
-                                     unsigned int cmd, unsigned long arg)
-{
-       struct client *client = file->private_data;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       return dispatch_ioctl(client, cmd, compat_ptr(arg));
-}
-#endif
-
-static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       struct client *client = file->private_data;
-       enum dma_data_direction direction;
-       unsigned long size;
-       int page_count, ret;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       /* FIXME: We could support multiple buffers, but we don't. */
-       if (client->buffer.pages != NULL)
-               return -EBUSY;
-
-       if (!(vma->vm_flags & VM_SHARED))
-               return -EINVAL;
-
-       if (vma->vm_start & ~PAGE_MASK)
-               return -EINVAL;
-
-       client->vm_start = vma->vm_start;
-       size = vma->vm_end - vma->vm_start;
-       page_count = size >> PAGE_SHIFT;
-       if (size & ~PAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_flags & VM_WRITE)
-               direction = DMA_TO_DEVICE;
-       else
-               direction = DMA_FROM_DEVICE;
-
-       ret = fw_iso_buffer_init(&client->buffer, client->device->card,
-                                page_count, direction);
-       if (ret < 0)
-               return ret;
-
-       ret = fw_iso_buffer_map(&client->buffer, vma);
-       if (ret < 0)
-               fw_iso_buffer_destroy(&client->buffer, client->device->card);
-
-       return ret;
-}
-
-static int shutdown_resource(int id, void *p, void *data)
-{
-       struct client_resource *r = p;
-       struct client *client = data;
-
-       r->release(client, r);
-       client_put(client);
-
-       return 0;
-}
-
-static int fw_device_op_release(struct inode *inode, struct file *file)
-{
-       struct client *client = file->private_data;
-       struct event *e, *next_e;
-
-       mutex_lock(&client->device->client_list_mutex);
-       list_del(&client->link);
-       mutex_unlock(&client->device->client_list_mutex);
-
-       if (client->iso_context)
-               fw_iso_context_destroy(client->iso_context);
-
-       if (client->buffer.pages)
-               fw_iso_buffer_destroy(&client->buffer, client->device->card);
-
-       /* Freeze client->resource_idr and client->event_list */
-       spin_lock_irq(&client->lock);
-       client->in_shutdown = true;
-       spin_unlock_irq(&client->lock);
-
-       idr_for_each(&client->resource_idr, shutdown_resource, client);
-       idr_remove_all(&client->resource_idr);
-       idr_destroy(&client->resource_idr);
-
-       list_for_each_entry_safe(e, next_e, &client->event_list, link)
-               kfree(e);
-
-       client_put(client);
-
-       return 0;
-}
-
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
-{
-       struct client *client = file->private_data;
-       unsigned int mask = 0;
-
-       poll_wait(file, &client->wait, pt);
-
-       if (fw_device_is_shutdown(client->device))
-               mask |= POLLHUP | POLLERR;
-       if (!list_empty(&client->event_list))
-               mask |= POLLIN | POLLRDNORM;
-
-       return mask;
-}
-
-const struct file_operations fw_device_ops = {
-       .owner          = THIS_MODULE,
-       .open           = fw_device_op_open,
-       .read           = fw_device_op_read,
-       .unlocked_ioctl = fw_device_op_ioctl,
-       .poll           = fw_device_op_poll,
-       .release        = fw_device_op_release,
-       .mmap           = fw_device_op_mmap,
-
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = fw_device_op_compat_ioctl,
-#endif
-};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
deleted file mode 100644 (file)
index 65d84dd..0000000
+++ /dev/null
@@ -1,1196 +0,0 @@
-/*
- * Device probing and sysfs code.
- *
- * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
-#include <linux/kobject.h>
-#include <linux/list.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-
-#include <asm/atomic.h>
-#include <asm/byteorder.h>
-#include <asm/system.h>
-
-#include "core.h"
-
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
-{
-       ci->p = p + 1;
-       ci->end = ci->p + (p[0] >> 16);
-}
-EXPORT_SYMBOL(fw_csr_iterator_init);
-
-int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
-{
-       *key = *ci->p >> 24;
-       *value = *ci->p & 0xffffff;
-
-       return ci->p++ < ci->end;
-}
-EXPORT_SYMBOL(fw_csr_iterator_next);
-
-static int is_fw_unit(struct device *dev);
-
-static int match_unit_directory(u32 *directory, u32 match_flags,
-                               const struct ieee1394_device_id *id)
-{
-       struct fw_csr_iterator ci;
-       int key, value, match;
-
-       match = 0;
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (key == CSR_VENDOR && value == id->vendor_id)
-                       match |= IEEE1394_MATCH_VENDOR_ID;
-               if (key == CSR_MODEL && value == id->model_id)
-                       match |= IEEE1394_MATCH_MODEL_ID;
-               if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
-                       match |= IEEE1394_MATCH_SPECIFIER_ID;
-               if (key == CSR_VERSION && value == id->version)
-                       match |= IEEE1394_MATCH_VERSION;
-       }
-
-       return (match & match_flags) == match_flags;
-}
-
-static int fw_unit_match(struct device *dev, struct device_driver *drv)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_device *device;
-       const struct ieee1394_device_id *id;
-
-       /* We only allow binding to fw_units. */
-       if (!is_fw_unit(dev))
-               return 0;
-
-       device = fw_device(unit->device.parent);
-       id = container_of(drv, struct fw_driver, driver)->id_table;
-
-       for (; id->match_flags != 0; id++) {
-               if (match_unit_directory(unit->directory, id->match_flags, id))
-                       return 1;
-
-               /* Also check vendor ID in the root directory. */
-               if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
-                   match_unit_directory(&device->config_rom[5],
-                               IEEE1394_MATCH_VENDOR_ID, id) &&
-                   match_unit_directory(unit->directory, id->match_flags
-                               & ~IEEE1394_MATCH_VENDOR_ID, id))
-                       return 1;
-       }
-
-       return 0;
-}
-
-static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
-{
-       struct fw_device *device = fw_device(unit->device.parent);
-       struct fw_csr_iterator ci;
-
-       int key, value;
-       int vendor = 0;
-       int model = 0;
-       int specifier_id = 0;
-       int version = 0;
-
-       fw_csr_iterator_init(&ci, &device->config_rom[5]);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-               case CSR_VENDOR:
-                       vendor = value;
-                       break;
-               case CSR_MODEL:
-                       model = value;
-                       break;
-               }
-       }
-
-       fw_csr_iterator_init(&ci, unit->directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-               case CSR_SPECIFIER_ID:
-                       specifier_id = value;
-                       break;
-               case CSR_VERSION:
-                       version = value;
-                       break;
-               }
-       }
-
-       return snprintf(buffer, buffer_size,
-                       "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
-                       vendor, model, specifier_id, version);
-}
-
-static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       char modalias[64];
-
-       get_modalias(unit, modalias, sizeof(modalias));
-
-       if (add_uevent_var(env, "MODALIAS=%s", modalias))
-               return -ENOMEM;
-
-       return 0;
-}
-
-struct bus_type fw_bus_type = {
-       .name = "firewire",
-       .match = fw_unit_match,
-};
-EXPORT_SYMBOL(fw_bus_type);
-
-int fw_device_enable_phys_dma(struct fw_device *device)
-{
-       int generation = device->generation;
-
-       /* device->node_id, accessed below, must not be older than generation */
-       smp_rmb();
-
-       return device->card->driver->enable_phys_dma(device->card,
-                                                    device->node_id,
-                                                    generation);
-}
-EXPORT_SYMBOL(fw_device_enable_phys_dma);
-
-struct config_rom_attribute {
-       struct device_attribute attr;
-       u32 key;
-};
-
-static ssize_t show_immediate(struct device *dev,
-                             struct device_attribute *dattr, char *buf)
-{
-       struct config_rom_attribute *attr =
-               container_of(dattr, struct config_rom_attribute, attr);
-       struct fw_csr_iterator ci;
-       u32 *dir;
-       int key, value, ret = -ENOENT;
-
-       down_read(&fw_device_rwsem);
-
-       if (is_fw_unit(dev))
-               dir = fw_unit(dev)->directory;
-       else
-               dir = fw_device(dev)->config_rom + 5;
-
-       fw_csr_iterator_init(&ci, dir);
-       while (fw_csr_iterator_next(&ci, &key, &value))
-               if (attr->key == key) {
-                       ret = snprintf(buf, buf ? PAGE_SIZE : 0,
-                                      "0x%06x\n", value);
-                       break;
-               }
-
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-#define IMMEDIATE_ATTR(name, key)                              \
-       { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
-
-static ssize_t show_text_leaf(struct device *dev,
-                             struct device_attribute *dattr, char *buf)
-{
-       struct config_rom_attribute *attr =
-               container_of(dattr, struct config_rom_attribute, attr);
-       struct fw_csr_iterator ci;
-       u32 *dir, *block = NULL, *p, *end;
-       int length, key, value, last_key = 0, ret = -ENOENT;
-       char *b;
-
-       down_read(&fw_device_rwsem);
-
-       if (is_fw_unit(dev))
-               dir = fw_unit(dev)->directory;
-       else
-               dir = fw_device(dev)->config_rom + 5;
-
-       fw_csr_iterator_init(&ci, dir);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (attr->key == last_key &&
-                   key == (CSR_DESCRIPTOR | CSR_LEAF))
-                       block = ci.p - 1 + value;
-               last_key = key;
-       }
-
-       if (block == NULL)
-               goto out;
-
-       length = min(block[0] >> 16, 256U);
-       if (length < 3)
-               goto out;
-
-       if (block[1] != 0 || block[2] != 0)
-               /* Unknown encoding. */
-               goto out;
-
-       if (buf == NULL) {
-               ret = length * 4;
-               goto out;
-       }
-
-       b = buf;
-       end = &block[length + 1];
-       for (p = &block[3]; p < end; p++, b += 4)
-               * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
-       /* Strip trailing whitespace and add newline. */
-       while (b--, (isspace(*b) || *b == '\0') && b > buf);
-       strcpy(b + 1, "\n");
-       ret = b + 2 - buf;
- out:
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-#define TEXT_LEAF_ATTR(name, key)                              \
-       { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
-
-static struct config_rom_attribute config_rom_attributes[] = {
-       IMMEDIATE_ATTR(vendor, CSR_VENDOR),
-       IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
-       IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
-       IMMEDIATE_ATTR(version, CSR_VERSION),
-       IMMEDIATE_ATTR(model, CSR_MODEL),
-       TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
-       TEXT_LEAF_ATTR(model_name, CSR_MODEL),
-       TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
-};
-
-static void init_fw_attribute_group(struct device *dev,
-                                   struct device_attribute *attrs,
-                                   struct fw_attribute_group *group)
-{
-       struct device_attribute *attr;
-       int i, j;
-
-       for (j = 0; attrs[j].attr.name != NULL; j++)
-               group->attrs[j] = &attrs[j].attr;
-
-       for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
-               attr = &config_rom_attributes[i].attr;
-               if (attr->show(dev, attr, NULL) < 0)
-                       continue;
-               group->attrs[j++] = &attr->attr;
-       }
-
-       group->attrs[j] = NULL;
-       group->groups[0] = &group->group;
-       group->groups[1] = NULL;
-       group->group.attrs = group->attrs;
-       dev->groups = group->groups;
-}
-
-static ssize_t modalias_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       int length;
-
-       length = get_modalias(unit, buf, PAGE_SIZE);
-       strcpy(buf + length, "\n");
-
-       return length + 1;
-}
-
-static ssize_t rom_index_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev->parent);
-       struct fw_unit *unit = fw_unit(dev);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       (int)(unit->directory - device->config_rom));
-}
-
-static struct device_attribute fw_unit_attributes[] = {
-       __ATTR_RO(modalias),
-       __ATTR_RO(rom_index),
-       __ATTR_NULL,
-};
-
-static ssize_t config_rom_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev);
-       size_t length;
-
-       down_read(&fw_device_rwsem);
-       length = device->config_rom_length * 4;
-       memcpy(buf, device->config_rom, length);
-       up_read(&fw_device_rwsem);
-
-       return length;
-}
-
-static ssize_t guid_show(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev);
-       int ret;
-
-       down_read(&fw_device_rwsem);
-       ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
-                      device->config_rom[3], device->config_rom[4]);
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-static int units_sprintf(char *buf, u32 *directory)
-{
-       struct fw_csr_iterator ci;
-       int key, value;
-       int specifier_id = 0;
-       int version = 0;
-
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-               case CSR_SPECIFIER_ID:
-                       specifier_id = value;
-                       break;
-               case CSR_VERSION:
-                       version = value;
-                       break;
-               }
-       }
-
-       return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
-}
-
-static ssize_t units_show(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev);
-       struct fw_csr_iterator ci;
-       int key, value, i = 0;
-
-       down_read(&fw_device_rwsem);
-       fw_csr_iterator_init(&ci, &device->config_rom[5]);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (key != (CSR_UNIT | CSR_DIRECTORY))
-                       continue;
-               i += units_sprintf(&buf[i], ci.p + value - 1);
-               if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
-                       break;
-       }
-       up_read(&fw_device_rwsem);
-
-       if (i)
-               buf[i - 1] = '\n';
-
-       return i;
-}
-
-static struct device_attribute fw_device_attributes[] = {
-       __ATTR_RO(config_rom),
-       __ATTR_RO(guid),
-       __ATTR_RO(units),
-       __ATTR_NULL,
-};
-
-static int read_rom(struct fw_device *device,
-                   int generation, int index, u32 *data)
-{
-       int rcode;
-
-       /* device->node_id, accessed below, must not be older than generation */
-       smp_rmb();
-
-       rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
-                       device->node_id, generation, device->max_speed,
-                       (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
-                       data, 4);
-       be32_to_cpus(data);
-
-       return rcode;
-}
-
-#define READ_BIB_ROM_SIZE      256
-#define READ_BIB_STACK_SIZE    16
-
-/*
- * Read the bus info block, perform a speed probe, and read all of the rest of
- * the config ROM.  We do all this with a cached bus generation.  If the bus
- * generation changes under us, read_bus_info_block will fail and get retried.
- * It's better to start all over in this case because the node from which we
- * are reading the ROM may have changed the ROM during the reset.
- */
-static int read_bus_info_block(struct fw_device *device, int generation)
-{
-       u32 *rom, *stack, *old_rom, *new_rom;
-       u32 sp, key;
-       int i, end, length, ret = -1;
-
-       rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
-                     sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
-       if (rom == NULL)
-               return -ENOMEM;
-
-       stack = &rom[READ_BIB_ROM_SIZE];
-
-       device->max_speed = SCODE_100;
-
-       /* First read the bus info block. */
-       for (i = 0; i < 5; i++) {
-               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-                       goto out;
-               /*
-                * As per IEEE1212 7.2, during power-up, devices can
-                * reply with a 0 for the first quadlet of the config
-                * rom to indicate that they are booting (for example,
-                * if the firmware is on the disk of a external
-                * harddisk).  In that case we just fail, and the
-                * retry mechanism will try again later.
-                */
-               if (i == 0 && rom[i] == 0)
-                       goto out;
-       }
-
-       device->max_speed = device->node->max_speed;
-
-       /*
-        * Determine the speed of
-        *   - devices with link speed less than PHY speed,
-        *   - devices with 1394b PHY (unless only connected to 1394a PHYs),
-        *   - all devices if there are 1394b repeaters.
-        * Note, we cannot use the bus info block's link_spd as starting point
-        * because some buggy firmwares set it lower than necessary and because
-        * 1394-1995 nodes do not have the field.
-        */
-       if ((rom[2] & 0x7) < device->max_speed ||
-           device->max_speed == SCODE_BETA ||
-           device->card->beta_repeaters_present) {
-               u32 dummy;
-
-               /* for S1600 and S3200 */
-               if (device->max_speed == SCODE_BETA)
-                       device->max_speed = device->card->link_speed;
-
-               while (device->max_speed > SCODE_100) {
-                       if (read_rom(device, generation, 0, &dummy) ==
-                           RCODE_COMPLETE)
-                               break;
-                       device->max_speed--;
-               }
-       }
-
-       /*
-        * Now parse the config rom.  The config rom is a recursive
-        * directory structure so we parse it using a stack of
-        * references to the blocks that make up the structure.  We
-        * push a reference to the root directory on the stack to
-        * start things off.
-        */
-       length = i;
-       sp = 0;
-       stack[sp++] = 0xc0000005;
-       while (sp > 0) {
-               /*
-                * Pop the next block reference of the stack.  The
-                * lower 24 bits is the offset into the config rom,
-                * the upper 8 bits are the type of the reference the
-                * block.
-                */
-               key = stack[--sp];
-               i = key & 0xffffff;
-               if (i >= READ_BIB_ROM_SIZE)
-                       /*
-                        * The reference points outside the standard
-                        * config rom area, something's fishy.
-                        */
-                       goto out;
-
-               /* Read header quadlet for the block to get the length. */
-               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-                       goto out;
-               end = i + (rom[i] >> 16) + 1;
-               i++;
-               if (end > READ_BIB_ROM_SIZE)
-                       /*
-                        * This block extends outside standard config
-                        * area (and the array we're reading it
-                        * into).  That's broken, so ignore this
-                        * device.
-                        */
-                       goto out;
-
-               /*
-                * Now read in the block.  If this is a directory
-                * block, check the entries as we read them to see if
-                * it references another block, and push it in that case.
-                */
-               while (i < end) {
-                       if (read_rom(device, generation, i, &rom[i]) !=
-                           RCODE_COMPLETE)
-                               goto out;
-                       if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
-                           sp < READ_BIB_STACK_SIZE)
-                               stack[sp++] = i + rom[i];
-                       i++;
-               }
-               if (length < i)
-                       length = i;
-       }
-
-       old_rom = device->config_rom;
-       new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
-       if (new_rom == NULL)
-               goto out;
-
-       down_write(&fw_device_rwsem);
-       device->config_rom = new_rom;
-       device->config_rom_length = length;
-       up_write(&fw_device_rwsem);
-
-       kfree(old_rom);
-       ret = 0;
-       device->cmc = rom[2] >> 30 & 1;
- out:
-       kfree(rom);
-
-       return ret;
-}
-
-static void fw_unit_release(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-
-       kfree(unit);
-}
-
-static struct device_type fw_unit_type = {
-       .uevent         = fw_unit_uevent,
-       .release        = fw_unit_release,
-};
-
-static int is_fw_unit(struct device *dev)
-{
-       return dev->type == &fw_unit_type;
-}
-
-static void create_units(struct fw_device *device)
-{
-       struct fw_csr_iterator ci;
-       struct fw_unit *unit;
-       int key, value, i;
-
-       i = 0;
-       fw_csr_iterator_init(&ci, &device->config_rom[5]);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (key != (CSR_UNIT | CSR_DIRECTORY))
-                       continue;
-
-               /*
-                * Get the address of the unit directory and try to
-                * match the drivers id_tables against it.
-                */
-               unit = kzalloc(sizeof(*unit), GFP_KERNEL);
-               if (unit == NULL) {
-                       fw_error("failed to allocate memory for unit\n");
-                       continue;
-               }
-
-               unit->directory = ci.p + value - 1;
-               unit->device.bus = &fw_bus_type;
-               unit->device.type = &fw_unit_type;
-               unit->device.parent = &device->device;
-               dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
-
-               BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
-                               ARRAY_SIZE(fw_unit_attributes) +
-                               ARRAY_SIZE(config_rom_attributes));
-               init_fw_attribute_group(&unit->device,
-                                       fw_unit_attributes,
-                                       &unit->attribute_group);
-
-               if (device_register(&unit->device) < 0)
-                       goto skip_unit;
-
-               continue;
-
-       skip_unit:
-               kfree(unit);
-       }
-}
-
-static int shutdown_unit(struct device *device, void *data)
-{
-       device_unregister(device);
-
-       return 0;
-}
-
-/*
- * fw_device_rwsem acts as dual purpose mutex:
- *   - serializes accesses to fw_device_idr,
- *   - serializes accesses to fw_device.config_rom/.config_rom_length and
- *     fw_unit.directory, unless those accesses happen at safe occasions
- */
-DECLARE_RWSEM(fw_device_rwsem);
-
-DEFINE_IDR(fw_device_idr);
-int fw_cdev_major;
-
-struct fw_device *fw_device_get_by_devt(dev_t devt)
-{
-       struct fw_device *device;
-
-       down_read(&fw_device_rwsem);
-       device = idr_find(&fw_device_idr, MINOR(devt));
-       if (device)
-               fw_device_get(device);
-       up_read(&fw_device_rwsem);
-
-       return device;
-}
-
-/*
- * These defines control the retry behavior for reading the config
- * rom.  It shouldn't be necessary to tweak these; if the device
- * doesn't respond to a config rom read within 10 seconds, it's not
- * going to respond at all.  As for the initial delay, a lot of
- * devices will be able to respond within half a second after bus
- * reset.  On the other hand, it's not really worth being more
- * aggressive than that, since it scales pretty well; if 10 devices
- * are plugged in, they're all getting read within one second.
- */
-
-#define MAX_RETRIES    10
-#define RETRY_DELAY    (3 * HZ)
-#define INITIAL_DELAY  (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
-
-static void fw_device_shutdown(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       int minor = MINOR(device->device.devt);
-
-       if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
-           && !list_empty(&device->card->link)) {
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
-               return;
-       }
-
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_GONE,
-                          FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
-               return;
-
-       fw_device_cdev_remove(device);
-       device_for_each_child(&device->device, NULL, shutdown_unit);
-       device_unregister(&device->device);
-
-       down_write(&fw_device_rwsem);
-       idr_remove(&fw_device_idr, minor);
-       up_write(&fw_device_rwsem);
-
-       fw_device_put(device);
-}
-
-static void fw_device_release(struct device *dev)
-{
-       struct fw_device *device = fw_device(dev);
-       struct fw_card *card = device->card;
-       unsigned long flags;
-
-       /*
-        * Take the card lock so we don't set this to NULL while a
-        * FW_NODE_UPDATED callback is being handled or while the
-        * bus manager work looks at this node.
-        */
-       spin_lock_irqsave(&card->lock, flags);
-       device->node->data = NULL;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       fw_node_put(device->node);
-       kfree(device->config_rom);
-       kfree(device);
-       fw_card_put(card);
-}
-
-static struct device_type fw_device_type = {
-       .release = fw_device_release,
-};
-
-static int update_unit(struct device *dev, void *data)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_driver *driver = (struct fw_driver *)dev->driver;
-
-       if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
-               down(&dev->sem);
-               driver->update(unit);
-               up(&dev->sem);
-       }
-
-       return 0;
-}
-
-static void fw_device_update(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-
-       fw_device_cdev_update(device);
-       device_for_each_child(&device->device, NULL, update_unit);
-}
-
-/*
- * If a device was pending for deletion because its node went away but its
- * bus info block and root directory header matches that of a newly discovered
- * device, revive the existing fw_device.
- * The newly allocated fw_device becomes obsolete instead.
- */
-static int lookup_existing_device(struct device *dev, void *data)
-{
-       struct fw_device *old = fw_device(dev);
-       struct fw_device *new = data;
-       struct fw_card *card = new->card;
-       int match = 0;
-
-       down_read(&fw_device_rwsem); /* serialize config_rom access */
-       spin_lock_irq(&card->lock);  /* serialize node access */
-
-       if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
-           atomic_cmpxchg(&old->state,
-                          FW_DEVICE_GONE,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
-               struct fw_node *current_node = new->node;
-               struct fw_node *obsolete_node = old->node;
-
-               new->node = obsolete_node;
-               new->node->data = new;
-               old->node = current_node;
-               old->node->data = old;
-
-               old->max_speed = new->max_speed;
-               old->node_id = current_node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               old->generation = card->generation;
-               old->config_rom_retries = 0;
-               fw_notify("rediscovered device %s\n", dev_name(dev));
-
-               PREPARE_DELAYED_WORK(&old->work, fw_device_update);
-               schedule_delayed_work(&old->work, 0);
-
-               if (current_node == card->root_node)
-                       fw_schedule_bm_work(card, 0);
-
-               match = 1;
-       }
-
-       spin_unlock_irq(&card->lock);
-       up_read(&fw_device_rwsem);
-
-       return match;
-}
-
-enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
-
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
-{
-       struct fw_card *card = device->card;
-       __be32 data;
-       int rcode;
-
-       if (!card->broadcast_channel_allocated)
-               return;
-
-       if (device->bc_implemented == BC_UNKNOWN) {
-               rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
-                               device->node_id, generation, device->max_speed,
-                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
-                               &data, 4);
-               switch (rcode) {
-               case RCODE_COMPLETE:
-                       if (data & cpu_to_be32(1 << 31)) {
-                               device->bc_implemented = BC_IMPLEMENTED;
-                               break;
-                       }
-                       /* else fall through to case address error */
-               case RCODE_ADDRESS_ERROR:
-                       device->bc_implemented = BC_UNIMPLEMENTED;
-               }
-       }
-
-       if (device->bc_implemented == BC_IMPLEMENTED) {
-               data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
-                                  BROADCAST_CHANNEL_VALID);
-               fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
-                               device->node_id, generation, device->max_speed,
-                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
-                               &data, 4);
-       }
-}
-
-static void fw_device_init(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       struct device *revived_dev;
-       int minor, ret;
-
-       /*
-        * All failure paths here set node->data to NULL, so that we
-        * don't try to do device_for_each_child() on a kfree()'d
-        * device.
-        */
-
-       if (read_bus_info_block(device, device->generation) < 0) {
-               if (device->config_rom_retries < MAX_RETRIES &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
-               } else {
-                       fw_notify("giving up on config rom for node id %x\n",
-                                 device->node_id);
-                       if (device->node == device->card->root_node)
-                               fw_schedule_bm_work(device->card, 0);
-                       fw_device_release(&device->device);
-               }
-               return;
-       }
-
-       revived_dev = device_find_child(device->card->device,
-                                       device, lookup_existing_device);
-       if (revived_dev) {
-               put_device(revived_dev);
-               fw_device_release(&device->device);
-
-               return;
-       }
-
-       device_initialize(&device->device);
-
-       fw_device_get(device);
-       down_write(&fw_device_rwsem);
-       ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
-             idr_get_new(&fw_device_idr, device, &minor) :
-             -ENOMEM;
-       up_write(&fw_device_rwsem);
-
-       if (ret < 0)
-               goto error;
-
-       device->device.bus = &fw_bus_type;
-       device->device.type = &fw_device_type;
-       device->device.parent = device->card->device;
-       device->device.devt = MKDEV(fw_cdev_major, minor);
-       dev_set_name(&device->device, "fw%d", minor);
-
-       BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
-                       ARRAY_SIZE(fw_device_attributes) +
-                       ARRAY_SIZE(config_rom_attributes));
-       init_fw_attribute_group(&device->device,
-                               fw_device_attributes,
-                               &device->attribute_group);
-
-       if (device_add(&device->device)) {
-               fw_error("Failed to add device.\n");
-               goto error_with_cdev;
-       }
-
-       create_units(device);
-
-       /*
-        * Transition the device to running state.  If it got pulled
-        * out from under us while we did the intialization work, we
-        * have to shut down the device again here.  Normally, though,
-        * fw_node_event will be responsible for shutting it down when
-        * necessary.  We have to use the atomic cmpxchg here to avoid
-        * racing with the FW_NODE_DESTROYED case in
-        * fw_node_event().
-        */
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_INITIALIZING,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
-               PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
-       } else {
-               if (device->config_rom_retries)
-                       fw_notify("created device %s: GUID %08x%08x, S%d00, "
-                                 "%d config ROM retries\n",
-                                 dev_name(&device->device),
-                                 device->config_rom[3], device->config_rom[4],
-                                 1 << device->max_speed,
-                                 device->config_rom_retries);
-               else
-                       fw_notify("created device %s: GUID %08x%08x, S%d00\n",
-                                 dev_name(&device->device),
-                                 device->config_rom[3], device->config_rom[4],
-                                 1 << device->max_speed);
-               device->config_rom_retries = 0;
-
-               fw_device_set_broadcast_channel(device, device->generation);
-       }
-
-       /*
-        * Reschedule the IRM work if we just finished reading the
-        * root node config rom.  If this races with a bus reset we
-        * just end up running the IRM work a couple of extra times -
-        * pretty harmless.
-        */
-       if (device->node == device->card->root_node)
-               fw_schedule_bm_work(device->card, 0);
-
-       return;
-
- error_with_cdev:
-       down_write(&fw_device_rwsem);
-       idr_remove(&fw_device_idr, minor);
-       up_write(&fw_device_rwsem);
- error:
-       fw_device_put(device);          /* fw_device_idr's reference */
-
-       put_device(&device->device);    /* our reference */
-}
-
-enum {
-       REREAD_BIB_ERROR,
-       REREAD_BIB_GONE,
-       REREAD_BIB_UNCHANGED,
-       REREAD_BIB_CHANGED,
-};
-
-/* Reread and compare bus info block and header of root directory */
-static int reread_bus_info_block(struct fw_device *device, int generation)
-{
-       u32 q;
-       int i;
-
-       for (i = 0; i < 6; i++) {
-               if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
-                       return REREAD_BIB_ERROR;
-
-               if (i == 0 && q == 0)
-                       return REREAD_BIB_GONE;
-
-               if (q != device->config_rom[i])
-                       return REREAD_BIB_CHANGED;
-       }
-
-       return REREAD_BIB_UNCHANGED;
-}
-
-static void fw_device_refresh(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       struct fw_card *card = device->card;
-       int node_id = device->node_id;
-
-       switch (reread_bus_info_block(device, device->generation)) {
-       case REREAD_BIB_ERROR:
-               if (device->config_rom_retries < MAX_RETRIES / 2 &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY / 2);
-
-                       return;
-               }
-               goto give_up;
-
-       case REREAD_BIB_GONE:
-               goto gone;
-
-       case REREAD_BIB_UNCHANGED:
-               if (atomic_cmpxchg(&device->state,
-                                  FW_DEVICE_INITIALIZING,
-                                  FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
-                       goto gone;
-
-               fw_device_update(work);
-               device->config_rom_retries = 0;
-               goto out;
-
-       case REREAD_BIB_CHANGED:
-               break;
-       }
-
-       /*
-        * Something changed.  We keep things simple and don't investigate
-        * further.  We just destroy all previous units and create new ones.
-        */
-       device_for_each_child(&device->device, NULL, shutdown_unit);
-
-       if (read_bus_info_block(device, device->generation) < 0) {
-               if (device->config_rom_retries < MAX_RETRIES &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
-
-                       return;
-               }
-               goto give_up;
-       }
-
-       create_units(device);
-
-       /* Userspace may want to re-read attributes. */
-       kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
-
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_INITIALIZING,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
-               goto gone;
-
-       fw_notify("refreshed device %s\n", dev_name(&device->device));
-       device->config_rom_retries = 0;
-       goto out;
-
- give_up:
-       fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
- gone:
-       atomic_set(&device->state, FW_DEVICE_GONE);
-       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-       schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
- out:
-       if (node_id == card->root_node->node_id)
-               fw_schedule_bm_work(card, 0);
-}
-
-void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
-{
-       struct fw_device *device;
-
-       switch (event) {
-       case FW_NODE_CREATED:
-       case FW_NODE_LINK_ON:
-               if (!node->link_on)
-                       break;
- create:
-               device = kzalloc(sizeof(*device), GFP_ATOMIC);
-               if (device == NULL)
-                       break;
-
-               /*
-                * Do minimal intialization of the device here, the
-                * rest will happen in fw_device_init().
-                *
-                * Attention:  A lot of things, even fw_device_get(),
-                * cannot be done before fw_device_init() finished!
-                * You can basically just check device->state and
-                * schedule work until then, but only while holding
-                * card->lock.
-                */
-               atomic_set(&device->state, FW_DEVICE_INITIALIZING);
-               device->card = fw_card_get(card);
-               device->node = fw_node_get(node);
-               device->node_id = node->node_id;
-               device->generation = card->generation;
-               device->is_local = node == card->local_node;
-               mutex_init(&device->client_list_mutex);
-               INIT_LIST_HEAD(&device->client_list);
-
-               /*
-                * Set the node data to point back to this device so
-                * FW_NODE_UPDATED callbacks can update the node_id
-                * and generation for the device.
-                */
-               node->data = device;
-
-               /*
-                * Many devices are slow to respond after bus resets,
-                * especially if they are bus powered and go through
-                * power-up after getting plugged in.  We schedule the
-                * first config rom scan half a second after bus reset.
-                */
-               INIT_DELAYED_WORK(&device->work, fw_device_init);
-               schedule_delayed_work(&device->work, INITIAL_DELAY);
-               break;
-
-       case FW_NODE_INITIATED_RESET:
-               device = node->data;
-               if (device == NULL)
-                       goto create;
-
-               device->node_id = node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               device->generation = card->generation;
-               if (atomic_cmpxchg(&device->state,
-                           FW_DEVICE_RUNNING,
-                           FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
-                       schedule_delayed_work(&device->work,
-                               device->is_local ? 0 : INITIAL_DELAY);
-               }
-               break;
-
-       case FW_NODE_UPDATED:
-               if (!node->link_on || node->data == NULL)
-                       break;
-
-               device = node->data;
-               device->node_id = node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               device->generation = card->generation;
-               if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_update);
-                       schedule_delayed_work(&device->work, 0);
-               }
-               break;
-
-       case FW_NODE_DESTROYED:
-       case FW_NODE_LINK_OFF:
-               if (!node->data)
-                       break;
-
-               /*
-                * Destroy the device associated with the node.  There
-                * are two cases here: either the device is fully
-                * initialized (FW_DEVICE_RUNNING) or we're in the
-                * process of reading its config rom
-                * (FW_DEVICE_INITIALIZING).  If it is fully
-                * initialized we can reuse device->work to schedule a
-                * full fw_device_shutdown().  If not, there's work
-                * scheduled to read it's config rom, and we just put
-                * the device in shutdown state to have that code fail
-                * to create the device.
-                */
-               device = node->data;
-               if (atomic_xchg(&device->state,
-                               FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-                       schedule_delayed_work(&device->work,
-                               list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
-               }
-               break;
-       }
-}
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
deleted file mode 100644 (file)
index 28076c8..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Isochronous I/O functionality:
- *   - Isochronous DMA context management
- *   - Isochronous bus resource management (channels, bandwidth), client side
- *
- * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-
-#include <asm/byteorder.h>
-
-#include "core.h"
-
-/*
- * Isochronous DMA context management
- */
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
-                      int page_count, enum dma_data_direction direction)
-{
-       int i, j;
-       dma_addr_t address;
-
-       buffer->page_count = page_count;
-       buffer->direction = direction;
-
-       buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
-                               GFP_KERNEL);
-       if (buffer->pages == NULL)
-               goto out;
-
-       for (i = 0; i < buffer->page_count; i++) {
-               buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-               if (buffer->pages[i] == NULL)
-                       goto out_pages;
-
-               address = dma_map_page(card->device, buffer->pages[i],
-                                      0, PAGE_SIZE, direction);
-               if (dma_mapping_error(card->device, address)) {
-                       __free_page(buffer->pages[i]);
-                       goto out_pages;
-               }
-               set_page_private(buffer->pages[i], address);
-       }
-
-       return 0;
-
- out_pages:
-       for (j = 0; j < i; j++) {
-               address = page_private(buffer->pages[j]);
-               dma_unmap_page(card->device, address,
-                              PAGE_SIZE, DMA_TO_DEVICE);
-               __free_page(buffer->pages[j]);
-       }
-       kfree(buffer->pages);
- out:
-       buffer->pages = NULL;
-
-       return -ENOMEM;
-}
-
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
-{
-       unsigned long uaddr;
-       int i, err;
-
-       uaddr = vma->vm_start;
-       for (i = 0; i < buffer->page_count; i++) {
-               err = vm_insert_page(vma, uaddr, buffer->pages[i]);
-               if (err)
-                       return err;
-
-               uaddr += PAGE_SIZE;
-       }
-
-       return 0;
-}
-
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
-                          struct fw_card *card)
-{
-       int i;
-       dma_addr_t address;
-
-       for (i = 0; i < buffer->page_count; i++) {
-               address = page_private(buffer->pages[i]);
-               dma_unmap_page(card->device, address,
-                              PAGE_SIZE, DMA_TO_DEVICE);
-               __free_page(buffer->pages[i]);
-       }
-
-       kfree(buffer->pages);
-       buffer->pages = NULL;
-}
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
-               int type, int channel, int speed, size_t header_size,
-               fw_iso_callback_t callback, void *callback_data)
-{
-       struct fw_iso_context *ctx;
-
-       ctx = card->driver->allocate_iso_context(card,
-                                                type, channel, header_size);
-       if (IS_ERR(ctx))
-               return ctx;
-
-       ctx->card = card;
-       ctx->type = type;
-       ctx->channel = channel;
-       ctx->speed = speed;
-       ctx->header_size = header_size;
-       ctx->callback = callback;
-       ctx->callback_data = callback_data;
-
-       return ctx;
-}
-
-void fw_iso_context_destroy(struct fw_iso_context *ctx)
-{
-       struct fw_card *card = ctx->card;
-
-       card->driver->free_iso_context(ctx);
-}
-
-int fw_iso_context_start(struct fw_iso_context *ctx,
-                        int cycle, int sync, int tags)
-{
-       return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
-}
-
-int fw_iso_context_queue(struct fw_iso_context *ctx,
-                        struct fw_iso_packet *packet,
-                        struct fw_iso_buffer *buffer,
-                        unsigned long payload)
-{
-       struct fw_card *card = ctx->card;
-
-       return card->driver->queue_iso(ctx, packet, buffer, payload);
-}
-
-int fw_iso_context_stop(struct fw_iso_context *ctx)
-{
-       return ctx->card->driver->stop_iso(ctx);
-}
-
-/*
- * Isochronous bus resource management (channels, bandwidth), client side
- */
-
-static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
-                           int bandwidth, bool allocate)
-{
-       __be32 data[2];
-       int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
-
-       /*
-        * On a 1394a IRM with low contention, try < 1 is enough.
-        * On a 1394-1995 IRM, we need at least try < 2.
-        * Let's just do try < 5.
-        */
-       for (try = 0; try < 5; try++) {
-               new = allocate ? old - bandwidth : old + bandwidth;
-               if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
-                       break;
-
-               data[0] = cpu_to_be32(old);
-               data[1] = cpu_to_be32(new);
-               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                               irm_id, generation, SCODE_100,
-                               CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
-                               data, sizeof(data))) {
-               case RCODE_GENERATION:
-                       /* A generation change frees all bandwidth. */
-                       return allocate ? -EAGAIN : bandwidth;
-
-               case RCODE_COMPLETE:
-                       if (be32_to_cpup(data) == old)
-                               return bandwidth;
-
-                       old = be32_to_cpup(data);
-                       /* Fall through. */
-               }
-       }
-
-       return -EIO;
-}
-
-static int manage_channel(struct fw_card *card, int irm_id, int generation,
-                         u32 channels_mask, u64 offset, bool allocate)
-{
-       __be32 data[2], c, all, old;
-       int i, retry = 5;
-
-       old = all = allocate ? cpu_to_be32(~0) : 0;
-
-       for (i = 0; i < 32; i++) {
-               if (!(channels_mask & 1 << i))
-                       continue;
-
-               c = cpu_to_be32(1 << (31 - i));
-               if ((old & c) != (all & c))
-                       continue;
-
-               data[0] = old;
-               data[1] = old ^ c;
-               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                                          irm_id, generation, SCODE_100,
-                                          offset, data, sizeof(data))) {
-               case RCODE_GENERATION:
-                       /* A generation change frees all channels. */
-                       return allocate ? -EAGAIN : i;
-
-               case RCODE_COMPLETE:
-                       if (data[0] == old)
-                               return i;
-
-                       old = data[0];
-
-                       /* Is the IRM 1394a-2000 compliant? */
-                       if ((data[0] & c) == (data[1] & c))
-                               continue;
-
-                       /* 1394-1995 IRM, fall through to retry. */
-               default:
-                       if (retry--)
-                               i--;
-               }
-       }
-
-       return -EIO;
-}
-
-static void deallocate_channel(struct fw_card *card, int irm_id,
-                              int generation, int channel)
-{
-       u32 mask;
-       u64 offset;
-
-       mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
-       offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
-                               CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
-
-       manage_channel(card, irm_id, generation, mask, offset, false);
-}
-
-/**
- * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
- *
- * In parameters: card, generation, channels_mask, bandwidth, allocate
- * Out parameters: channel, bandwidth
- * This function blocks (sleeps) during communication with the IRM.
- *
- * Allocates or deallocates at most one channel out of channels_mask.
- * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
- * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
- * channel 0 and LSB for channel 63.)
- * Allocates or deallocates as many bandwidth allocation units as specified.
- *
- * Returns channel < 0 if no channel was allocated or deallocated.
- * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
- *
- * If generation is stale, deallocations succeed but allocations fail with
- * channel = -EAGAIN.
- *
- * If channel allocation fails, no bandwidth will be allocated either.
- * If bandwidth allocation fails, no channel will be allocated either.
- * But deallocations of channel and bandwidth are tried independently
- * of each other's success.
- */
-void fw_iso_resource_manage(struct fw_card *card, int generation,
-                           u64 channels_mask, int *channel, int *bandwidth,
-                           bool allocate)
-{
-       u32 channels_hi = channels_mask;        /* channels 31...0 */
-       u32 channels_lo = channels_mask >> 32;  /* channels 63...32 */
-       int irm_id, ret, c = -EINVAL;
-
-       spin_lock_irq(&card->lock);
-       irm_id = card->irm_node->node_id;
-       spin_unlock_irq(&card->lock);
-
-       if (channels_hi)
-               c = manage_channel(card, irm_id, generation, channels_hi,
-                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
-       if (channels_lo && c < 0) {
-               c = manage_channel(card, irm_id, generation, channels_lo,
-                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
-               if (c >= 0)
-                       c += 32;
-       }
-       *channel = c;
-
-       if (allocate && channels_mask != 0 && c < 0)
-               *bandwidth = 0;
-
-       if (*bandwidth == 0)
-               return;
-
-       ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
-       if (ret < 0)
-               *bandwidth = 0;
-
-       if (allocate && ret < 0 && c >= 0) {
-               deallocate_channel(card, irm_id, generation, c);
-               *channel = ret;
-       }
-}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
deleted file mode 100644 (file)
index ecddd11..0000000
+++ /dev/null
@@ -1,2636 +0,0 @@
-/*
- * Driver for OHCI 1394 controllers
- *
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/compiler.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#include <asm/atomic.h>
-#include <asm/byteorder.h>
-#include <asm/page.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/pmac_feature.h>
-#endif
-
-#include "core.h"
-#include "ohci.h"
-
-#define DESCRIPTOR_OUTPUT_MORE         0
-#define DESCRIPTOR_OUTPUT_LAST         (1 << 12)
-#define DESCRIPTOR_INPUT_MORE          (2 << 12)
-#define DESCRIPTOR_INPUT_LAST          (3 << 12)
-#define DESCRIPTOR_STATUS              (1 << 11)
-#define DESCRIPTOR_KEY_IMMEDIATE       (2 << 8)
-#define DESCRIPTOR_PING                        (1 << 7)
-#define DESCRIPTOR_YY                  (1 << 6)
-#define DESCRIPTOR_NO_IRQ              (0 << 4)
-#define DESCRIPTOR_IRQ_ERROR           (1 << 4)
-#define DESCRIPTOR_IRQ_ALWAYS          (3 << 4)
-#define DESCRIPTOR_BRANCH_ALWAYS       (3 << 2)
-#define DESCRIPTOR_WAIT                        (3 << 0)
-
-struct descriptor {
-       __le16 req_count;
-       __le16 control;
-       __le32 data_address;
-       __le32 branch_address;
-       __le16 res_count;
-       __le16 transfer_status;
-} __attribute__((aligned(16)));
-
-struct db_descriptor {
-       __le16 first_size;
-       __le16 control;
-       __le16 second_req_count;
-       __le16 first_req_count;
-       __le32 branch_address;
-       __le16 second_res_count;
-       __le16 first_res_count;
-       __le32 reserved0;
-       __le32 first_buffer;
-       __le32 second_buffer;
-       __le32 reserved1;
-} __attribute__((aligned(16)));
-
-#define CONTROL_SET(regs)      (regs)
-#define CONTROL_CLEAR(regs)    ((regs) + 4)
-#define COMMAND_PTR(regs)      ((regs) + 12)
-#define CONTEXT_MATCH(regs)    ((regs) + 16)
-
-struct ar_buffer {
-       struct descriptor descriptor;
-       struct ar_buffer *next;
-       __le32 data[0];
-};
-
-struct ar_context {
-       struct fw_ohci *ohci;
-       struct ar_buffer *current_buffer;
-       struct ar_buffer *last_buffer;
-       void *pointer;
-       u32 regs;
-       struct tasklet_struct tasklet;
-};
-
-struct context;
-
-typedef int (*descriptor_callback_t)(struct context *ctx,
-                                    struct descriptor *d,
-                                    struct descriptor *last);
-
-/*
- * A buffer that contains a block of DMA-able coherent memory used for
- * storing a portion of a DMA descriptor program.
- */
-struct descriptor_buffer {
-       struct list_head list;
-       dma_addr_t buffer_bus;
-       size_t buffer_size;
-       size_t used;
-       struct descriptor buffer[0];
-};
-
-struct context {
-       struct fw_ohci *ohci;
-       u32 regs;
-       int total_allocation;
-
-       /*
-        * List of page-sized buffers for storing DMA descriptors.
-        * Head of list contains buffers in use and tail of list contains
-        * free buffers.
-        */
-       struct list_head buffer_list;
-
-       /*
-        * Pointer to a buffer inside buffer_list that contains the tail
-        * end of the current DMA program.
-        */
-       struct descriptor_buffer *buffer_tail;
-
-       /*
-        * The descriptor containing the branch address of the first
-        * descriptor that has not yet been filled by the device.
-        */
-       struct descriptor *last;
-
-       /*
-        * The last descriptor in the DMA program.  It contains the branch
-        * address that must be updated upon appending a new descriptor.
-        */
-       struct descriptor *prev;
-
-       descriptor_callback_t callback;
-
-       struct tasklet_struct tasklet;
-};
-
-#define IT_HEADER_SY(v)          ((v) <<  0)
-#define IT_HEADER_TCODE(v)       ((v) <<  4)
-#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
-#define IT_HEADER_TAG(v)         ((v) << 14)
-#define IT_HEADER_SPEED(v)       ((v) << 16)
-#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
-
-struct iso_context {
-       struct fw_iso_context base;
-       struct context context;
-       int excess_bytes;
-       void *header;
-       size_t header_length;
-};
-
-#define CONFIG_ROM_SIZE 1024
-
-struct fw_ohci {
-       struct fw_card card;
-
-       __iomem char *registers;
-       dma_addr_t self_id_bus;
-       __le32 *self_id_cpu;
-       struct tasklet_struct bus_reset_tasklet;
-       int node_id;
-       int generation;
-       int request_generation; /* for timestamping incoming requests */
-       atomic_t bus_seconds;
-
-       bool use_dualbuffer;
-       bool old_uninorth;
-       bool bus_reset_packet_quirk;
-
-       /*
-        * Spinlock for accessing fw_ohci data.  Never call out of
-        * this driver with this lock held.
-        */
-       spinlock_t lock;
-       u32 self_id_buffer[512];
-
-       /* Config rom buffers */
-       __be32 *config_rom;
-       dma_addr_t config_rom_bus;
-       __be32 *next_config_rom;
-       dma_addr_t next_config_rom_bus;
-       u32 next_header;
-
-       struct ar_context ar_request_ctx;
-       struct ar_context ar_response_ctx;
-       struct context at_request_ctx;
-       struct context at_response_ctx;
-
-       u32 it_context_mask;
-       struct iso_context *it_context_list;
-       u64 ir_context_channels;
-       u32 ir_context_mask;
-       struct iso_context *ir_context_list;
-};
-
-static inline struct fw_ohci *fw_ohci(struct fw_card *card)
-{
-       return container_of(card, struct fw_ohci, card);
-}
-
-#define IT_CONTEXT_CYCLE_MATCH_ENABLE  0x80000000
-#define IR_CONTEXT_BUFFER_FILL         0x80000000
-#define IR_CONTEXT_ISOCH_HEADER                0x40000000
-#define IR_CONTEXT_CYCLE_MATCH_ENABLE  0x20000000
-#define IR_CONTEXT_MULTI_CHANNEL_MODE  0x10000000
-#define IR_CONTEXT_DUAL_BUFFER_MODE    0x08000000
-
-#define CONTEXT_RUN    0x8000
-#define CONTEXT_WAKE   0x1000
-#define CONTEXT_DEAD   0x0800
-#define CONTEXT_ACTIVE 0x0400
-
-#define OHCI1394_MAX_AT_REQ_RETRIES    0xf
-#define OHCI1394_MAX_AT_RESP_RETRIES   0x2
-#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
-
-#define OHCI1394_REGISTER_SIZE         0x800
-#define OHCI_LOOP_COUNT                        500
-#define OHCI1394_PCI_HCI_Control       0x40
-#define SELF_ID_BUF_SIZE               0x800
-#define OHCI_TCODE_PHY_PACKET          0x0e
-#define OHCI_VERSION_1_1               0x010010
-
-static char ohci_driver_name[] = KBUILD_MODNAME;
-
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
-#define OHCI_PARAM_DEBUG_AT_AR         1
-#define OHCI_PARAM_DEBUG_SELFIDS       2
-#define OHCI_PARAM_DEBUG_IRQS          4
-#define OHCI_PARAM_DEBUG_BUSRESETS     8 /* only effective before chip init */
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
-       ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
-       ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
-       ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
-       ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
-       ", or a combination, or all = -1)");
-
-static void log_irqs(u32 evt)
-{
-       if (likely(!(param_debug &
-                       (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
-               return;
-
-       if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
-           !(evt & OHCI1394_busReset))
-               return;
-
-       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
-           evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
-           evt & OHCI1394_RQPkt                ? " AR_req"             : "",
-           evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
-           evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
-           evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
-           evt & OHCI1394_isochRx              ? " IR"                 : "",
-           evt & OHCI1394_isochTx              ? " IT"                 : "",
-           evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
-           evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
-           evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
-           evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
-           evt & OHCI1394_busReset             ? " busReset"           : "",
-           evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
-                   OHCI1394_RSPkt | OHCI1394_reqTxComplete |
-                   OHCI1394_respTxComplete | OHCI1394_isochRx |
-                   OHCI1394_isochTx | OHCI1394_postedWriteErr |
-                   OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
-                   OHCI1394_regAccessFail | OHCI1394_busReset)
-                                               ? " ?"                  : "");
-}
-
-static const char *speed[] = {
-       [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
-};
-static const char *power[] = {
-       [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
-       [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
-};
-static const char port[] = { '.', '-', 'p', 'c', };
-
-static char _p(u32 *s, int shift)
-{
-       return port[*s >> shift & 3];
-}
-
-static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
-{
-       if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
-               return;
-
-       fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
-                 self_id_count, generation, node_id);
-
-       for (; self_id_count--; ++s)
-               if ((*s & 1 << 23) == 0)
-                       fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
-                           "%s gc=%d %s %s%s%s\n",
-                           *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
-                           speed[*s >> 14 & 3], *s >> 16 & 63,
-                           power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
-                           *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-               else
-                       fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
-                           *s, *s >> 24 & 63,
-                           _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
-                           _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
-}
-
-static const char *evts[] = {
-       [0x00] = "evt_no_status",       [0x01] = "-reserved-",
-       [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
-       [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
-       [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
-       [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
-       [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
-       [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
-       [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
-       [0x10] = "-reserved-",          [0x11] = "ack_complete",
-       [0x12] = "ack_pending ",        [0x13] = "-reserved-",
-       [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
-       [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
-       [0x18] = "-reserved-",          [0x19] = "-reserved-",
-       [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
-       [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
-       [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
-       [0x20] = "pending/cancelled",
-};
-static const char *tcodes[] = {
-       [0x0] = "QW req",               [0x1] = "BW req",
-       [0x2] = "W resp",               [0x3] = "-reserved-",
-       [0x4] = "QR req",               [0x5] = "BR req",
-       [0x6] = "QR resp",              [0x7] = "BR resp",
-       [0x8] = "cycle start",          [0x9] = "Lk req",
-       [0xa] = "async stream packet",  [0xb] = "Lk resp",
-       [0xc] = "-reserved-",           [0xd] = "-reserved-",
-       [0xe] = "link internal",        [0xf] = "-reserved-",
-};
-static const char *phys[] = {
-       [0x0] = "phy config packet",    [0x1] = "link-on packet",
-       [0x2] = "self-id packet",       [0x3] = "-reserved-",
-};
-
-static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
-{
-       int tcode = header[0] >> 4 & 0xf;
-       char specific[12];
-
-       if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
-               return;
-
-       if (unlikely(evt >= ARRAY_SIZE(evts)))
-                       evt = 0x1f;
-
-       if (evt == OHCI1394_evt_bus_reset) {
-               fw_notify("A%c evt_bus_reset, generation %d\n",
-                   dir, (header[2] >> 16) & 0xff);
-               return;
-       }
-
-       if (header[0] == ~header[1]) {
-               fw_notify("A%c %s, %s, %08x\n",
-                   dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
-               return;
-       }
-
-       switch (tcode) {
-       case 0x0: case 0x6: case 0x8:
-               snprintf(specific, sizeof(specific), " = %08x",
-                        be32_to_cpu((__force __be32)header[3]));
-               break;
-       case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
-               snprintf(specific, sizeof(specific), " %x,%x",
-                        header[3] >> 16, header[3] & 0xffff);
-               break;
-       default:
-               specific[0] = '\0';
-       }
-
-       switch (tcode) {
-       case 0xe: case 0xa:
-               fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
-               break;
-       case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
-               fw_notify("A%c spd %x tl %02x, "
-                   "%04x -> %04x, %s, "
-                   "%s, %04x%08x%s\n",
-                   dir, speed, header[0] >> 10 & 0x3f,
-                   header[1] >> 16, header[0] >> 16, evts[evt],
-                   tcodes[tcode], header[1] & 0xffff, header[2], specific);
-               break;
-       default:
-               fw_notify("A%c spd %x tl %02x, "
-                   "%04x -> %04x, %s, "
-                   "%s%s\n",
-                   dir, speed, header[0] >> 10 & 0x3f,
-                   header[1] >> 16, header[0] >> 16, evts[evt],
-                   tcodes[tcode], specific);
-       }
-}
-
-#else
-
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
-
-#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
-
-static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
-{
-       writel(data, ohci->registers + offset);
-}
-
-static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
-{
-       return readl(ohci->registers + offset);
-}
-
-static inline void flush_writes(const struct fw_ohci *ohci)
-{
-       /* Do a dummy read to flush writes. */
-       reg_read(ohci, OHCI1394_Version);
-}
-
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
-                              int clear_bits, int set_bits)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       u32 val, old;
-
-       reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
-       flush_writes(ohci);
-       msleep(2);
-       val = reg_read(ohci, OHCI1394_PhyControl);
-       if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
-               fw_error("failed to set phy reg bits.\n");
-               return -EBUSY;
-       }
-
-       old = OHCI1394_PhyControl_ReadData(val);
-       old = (old & ~clear_bits) | set_bits;
-       reg_write(ohci, OHCI1394_PhyControl,
-                 OHCI1394_PhyControl_Write(addr, old));
-
-       return 0;
-}
-
-static int ar_context_add_page(struct ar_context *ctx)
-{
-       struct device *dev = ctx->ohci->card.device;
-       struct ar_buffer *ab;
-       dma_addr_t uninitialized_var(ab_bus);
-       size_t offset;
-
-       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-       if (ab == NULL)
-               return -ENOMEM;
-
-       ab->next = NULL;
-       memset(&ab->descriptor, 0, sizeof(ab->descriptor));
-       ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
-                                                   DESCRIPTOR_STATUS |
-                                                   DESCRIPTOR_BRANCH_ALWAYS);
-       offset = offsetof(struct ar_buffer, data);
-       ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
-       ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
-       ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
-       ab->descriptor.branch_address = 0;
-
-       ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
-       ctx->last_buffer->next = ab;
-       ctx->last_buffer = ab;
-
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
-       flush_writes(ctx->ohci);
-
-       return 0;
-}
-
-static void ar_context_release(struct ar_context *ctx)
-{
-       struct ar_buffer *ab, *ab_next;
-       size_t offset;
-       dma_addr_t ab_bus;
-
-       for (ab = ctx->current_buffer; ab; ab = ab_next) {
-               ab_next = ab->next;
-               offset = offsetof(struct ar_buffer, data);
-               ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-               dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
-                                 ab, ab_bus);
-       }
-}
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-#define cond_le32_to_cpu(v) \
-       (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
-#else
-#define cond_le32_to_cpu(v) le32_to_cpu(v)
-#endif
-
-static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-       struct fw_packet p;
-       u32 status, length, tcode;
-       int evt;
-
-       p.header[0] = cond_le32_to_cpu(buffer[0]);
-       p.header[1] = cond_le32_to_cpu(buffer[1]);
-       p.header[2] = cond_le32_to_cpu(buffer[2]);
-
-       tcode = (p.header[0] >> 4) & 0x0f;
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_READ_QUADLET_RESPONSE:
-               p.header[3] = (__force __u32) buffer[3];
-               p.header_length = 16;
-               p.payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST :
-               p.header[3] = cond_le32_to_cpu(buffer[3]);
-               p.header_length = 16;
-               p.payload_length = 0;
-               break;
-
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_READ_BLOCK_RESPONSE:
-       case TCODE_LOCK_REQUEST:
-       case TCODE_LOCK_RESPONSE:
-               p.header[3] = cond_le32_to_cpu(buffer[3]);
-               p.header_length = 16;
-               p.payload_length = p.header[3] >> 16;
-               break;
-
-       case TCODE_WRITE_RESPONSE:
-       case TCODE_READ_QUADLET_REQUEST:
-       case OHCI_TCODE_PHY_PACKET:
-               p.header_length = 12;
-               p.payload_length = 0;
-               break;
-
-       default:
-               /* FIXME: Stop context, discard everything, and restart? */
-               p.header_length = 0;
-               p.payload_length = 0;
-       }
-
-       p.payload = (void *) buffer + p.header_length;
-
-       /* FIXME: What to do about evt_* errors? */
-       length = (p.header_length + p.payload_length + 3) / 4;
-       status = cond_le32_to_cpu(buffer[length]);
-       evt    = (status >> 16) & 0x1f;
-
-       p.ack        = evt - 16;
-       p.speed      = (status >> 21) & 0x7;
-       p.timestamp  = status & 0xffff;
-       p.generation = ohci->request_generation;
-
-       log_ar_at_event('R', p.speed, p.header, evt);
-
-       /*
-        * The OHCI bus reset handler synthesizes a phy packet with
-        * the new generation number when a bus reset happens (see
-        * section 8.4.2.3).  This helps us determine when a request
-        * was received and make sure we send the response in the same
-        * generation.  We only need this for requests; for responses
-        * we use the unique tlabel for finding the matching
-        * request.
-        *
-        * Alas some chips sometimes emit bus reset packets with a
-        * wrong generation.  We set the correct generation for these
-        * at a slightly incorrect time (in bus_reset_tasklet).
-        */
-       if (evt == OHCI1394_evt_bus_reset) {
-               if (!ohci->bus_reset_packet_quirk)
-                       ohci->request_generation = (p.header[2] >> 16) & 0xff;
-       } else if (ctx == &ohci->ar_request_ctx) {
-               fw_core_handle_request(&ohci->card, &p);
-       } else {
-               fw_core_handle_response(&ohci->card, &p);
-       }
-
-       return buffer + length + 1;
-}
-
-static void ar_context_tasklet(unsigned long data)
-{
-       struct ar_context *ctx = (struct ar_context *)data;
-       struct fw_ohci *ohci = ctx->ohci;
-       struct ar_buffer *ab;
-       struct descriptor *d;
-       void *buffer, *end;
-
-       ab = ctx->current_buffer;
-       d = &ab->descriptor;
-
-       if (d->res_count == 0) {
-               size_t size, rest, offset;
-               dma_addr_t start_bus;
-               void *start;
-
-               /*
-                * This descriptor is finished and we may have a
-                * packet split across this and the next buffer. We
-                * reuse the page for reassembling the split packet.
-                */
-
-               offset = offsetof(struct ar_buffer, data);
-               start = buffer = ab;
-               start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-
-               ab = ab->next;
-               d = &ab->descriptor;
-               size = buffer + PAGE_SIZE - ctx->pointer;
-               rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
-               memmove(buffer, ctx->pointer, size);
-               memcpy(buffer + size, ab->data, rest);
-               ctx->current_buffer = ab;
-               ctx->pointer = (void *) ab->data + rest;
-               end = buffer + size + rest;
-
-               while (buffer < end)
-                       buffer = handle_ar_packet(ctx, buffer);
-
-               dma_free_coherent(ohci->card.device, PAGE_SIZE,
-                                 start, start_bus);
-               ar_context_add_page(ctx);
-       } else {
-               buffer = ctx->pointer;
-               ctx->pointer = end =
-                       (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
-
-               while (buffer < end)
-                       buffer = handle_ar_packet(ctx, buffer);
-       }
-}
-
-static int ar_context_init(struct ar_context *ctx,
-                          struct fw_ohci *ohci, u32 regs)
-{
-       struct ar_buffer ab;
-
-       ctx->regs        = regs;
-       ctx->ohci        = ohci;
-       ctx->last_buffer = &ab;
-       tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
-
-       ar_context_add_page(ctx);
-       ar_context_add_page(ctx);
-       ctx->current_buffer = ab.next;
-       ctx->pointer = ctx->current_buffer->data;
-
-       return 0;
-}
-
-static void ar_context_run(struct ar_context *ctx)
-{
-       struct ar_buffer *ab = ctx->current_buffer;
-       dma_addr_t ab_bus;
-       size_t offset;
-
-       offset = offsetof(struct ar_buffer, data);
-       ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-
-       reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
-       flush_writes(ctx->ohci);
-}
-
-static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
-{
-       int b, key;
-
-       b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
-       key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
-
-       /* figure out which descriptor the branch address goes in */
-       if (z == 2 && (b == 3 || key == 2))
-               return d;
-       else
-               return d + z - 1;
-}
-
-static void context_tasklet(unsigned long data)
-{
-       struct context *ctx = (struct context *) data;
-       struct descriptor *d, *last;
-       u32 address;
-       int z;
-       struct descriptor_buffer *desc;
-
-       desc = list_entry(ctx->buffer_list.next,
-                       struct descriptor_buffer, list);
-       last = ctx->last;
-       while (last->branch_address != 0) {
-               struct descriptor_buffer *old_desc = desc;
-               address = le32_to_cpu(last->branch_address);
-               z = address & 0xf;
-               address &= ~0xf;
-
-               /* If the branch address points to a buffer outside of the
-                * current buffer, advance to the next buffer. */
-               if (address < desc->buffer_bus ||
-                               address >= desc->buffer_bus + desc->used)
-                       desc = list_entry(desc->list.next,
-                                       struct descriptor_buffer, list);
-               d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
-               last = find_branch_descriptor(d, z);
-
-               if (!ctx->callback(ctx, d, last))
-                       break;
-
-               if (old_desc != desc) {
-                       /* If we've advanced to the next buffer, move the
-                        * previous buffer to the free list. */
-                       unsigned long flags;
-                       old_desc->used = 0;
-                       spin_lock_irqsave(&ctx->ohci->lock, flags);
-                       list_move_tail(&old_desc->list, &ctx->buffer_list);
-                       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-               }
-               ctx->last = last;
-       }
-}
-
-/*
- * Allocate a new buffer and add it to the list of free buffers for this
- * context.  Must be called with ohci->lock held.
- */
-static int context_add_buffer(struct context *ctx)
-{
-       struct descriptor_buffer *desc;
-       dma_addr_t uninitialized_var(bus_addr);
-       int offset;
-
-       /*
-        * 16MB of descriptors should be far more than enough for any DMA
-        * program.  This will catch run-away userspace or DoS attacks.
-        */
-       if (ctx->total_allocation >= 16*1024*1024)
-               return -ENOMEM;
-
-       desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
-                       &bus_addr, GFP_ATOMIC);
-       if (!desc)
-               return -ENOMEM;
-
-       offset = (void *)&desc->buffer - (void *)desc;
-       desc->buffer_size = PAGE_SIZE - offset;
-       desc->buffer_bus = bus_addr + offset;
-       desc->used = 0;
-
-       list_add_tail(&desc->list, &ctx->buffer_list);
-       ctx->total_allocation += PAGE_SIZE;
-
-       return 0;
-}
-
-static int context_init(struct context *ctx, struct fw_ohci *ohci,
-                       u32 regs, descriptor_callback_t callback)
-{
-       ctx->ohci = ohci;
-       ctx->regs = regs;
-       ctx->total_allocation = 0;
-
-       INIT_LIST_HEAD(&ctx->buffer_list);
-       if (context_add_buffer(ctx) < 0)
-               return -ENOMEM;
-
-       ctx->buffer_tail = list_entry(ctx->buffer_list.next,
-                       struct descriptor_buffer, list);
-
-       tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
-       ctx->callback = callback;
-
-       /*
-        * We put a dummy descriptor in the buffer that has a NULL
-        * branch address and looks like it's been sent.  That way we
-        * have a descriptor to append DMA programs to.
-        */
-       memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
-       ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
-       ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
-       ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
-       ctx->last = ctx->buffer_tail->buffer;
-       ctx->prev = ctx->buffer_tail->buffer;
-
-       return 0;
-}
-
-static void context_release(struct context *ctx)
-{
-       struct fw_card *card = &ctx->ohci->card;
-       struct descriptor_buffer *desc, *tmp;
-
-       list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
-               dma_free_coherent(card->device, PAGE_SIZE, desc,
-                       desc->buffer_bus -
-                       ((void *)&desc->buffer - (void *)desc));
-}
-
-/* Must be called with ohci->lock held */
-static struct descriptor *context_get_descriptors(struct context *ctx,
-                                                 int z, dma_addr_t *d_bus)
-{
-       struct descriptor *d = NULL;
-       struct descriptor_buffer *desc = ctx->buffer_tail;
-
-       if (z * sizeof(*d) > desc->buffer_size)
-               return NULL;
-
-       if (z * sizeof(*d) > desc->buffer_size - desc->used) {
-               /* No room for the descriptor in this buffer, so advance to the
-                * next one. */
-
-               if (desc->list.next == &ctx->buffer_list) {
-                       /* If there is no free buffer next in the list,
-                        * allocate one. */
-                       if (context_add_buffer(ctx) < 0)
-                               return NULL;
-               }
-               desc = list_entry(desc->list.next,
-                               struct descriptor_buffer, list);
-               ctx->buffer_tail = desc;
-       }
-
-       d = desc->buffer + desc->used / sizeof(*d);
-       memset(d, 0, z * sizeof(*d));
-       *d_bus = desc->buffer_bus + desc->used;
-
-       return d;
-}
-
-static void context_run(struct context *ctx, u32 extra)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-
-       reg_write(ohci, COMMAND_PTR(ctx->regs),
-                 le32_to_cpu(ctx->last->branch_address));
-       reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
-       reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
-       flush_writes(ohci);
-}
-
-static void context_append(struct context *ctx,
-                          struct descriptor *d, int z, int extra)
-{
-       dma_addr_t d_bus;
-       struct descriptor_buffer *desc = ctx->buffer_tail;
-
-       d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
-
-       desc->used += (z + extra) * sizeof(*d);
-       ctx->prev->branch_address = cpu_to_le32(d_bus | z);
-       ctx->prev = find_branch_descriptor(d, z);
-
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
-       flush_writes(ctx->ohci);
-}
-
-static void context_stop(struct context *ctx)
-{
-       u32 reg;
-       int i;
-
-       reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
-       flush_writes(ctx->ohci);
-
-       for (i = 0; i < 10; i++) {
-               reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
-               if ((reg & CONTEXT_ACTIVE) == 0)
-                       return;
-
-               mdelay(1);
-       }
-       fw_error("Error: DMA context still active (0x%08x)\n", reg);
-}
-
-struct driver_data {
-       struct fw_packet *packet;
-};
-
-/*
- * This function apppends a packet to the DMA queue for transmission.
- * Must always be called with the ochi->lock held to ensure proper
- * generation handling and locking around packet queue manipulation.
- */
-static int at_context_queue_packet(struct context *ctx,
-                                  struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-       dma_addr_t d_bus, uninitialized_var(payload_bus);
-       struct driver_data *driver_data;
-       struct descriptor *d, *last;
-       __le32 *header;
-       int z, tcode;
-       u32 reg;
-
-       d = context_get_descriptors(ctx, 4, &d_bus);
-       if (d == NULL) {
-               packet->ack = RCODE_SEND_ERROR;
-               return -1;
-       }
-
-       d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
-       d[0].res_count = cpu_to_le16(packet->timestamp);
-
-       /*
-        * The DMA format for asyncronous link packets is different
-        * from the IEEE1394 layout, so shift the fields around
-        * accordingly.  If header_length is 8, it's a PHY packet, to
-        * which we need to prepend an extra quadlet.
-        */
-
-       header = (__le32 *) &d[1];
-       switch (packet->header_length) {
-       case 16:
-       case 12:
-               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
-                                       (packet->header[0] & 0xffff0000));
-               header[2] = cpu_to_le32(packet->header[2]);
-
-               tcode = (packet->header[0] >> 4) & 0x0f;
-               if (TCODE_IS_BLOCK_PACKET(tcode))
-                       header[3] = cpu_to_le32(packet->header[3]);
-               else
-                       header[3] = (__force __le32) packet->header[3];
-
-               d[0].req_count = cpu_to_le16(packet->header_length);
-               break;
-
-       case 8:
-               header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32(packet->header[0]);
-               header[2] = cpu_to_le32(packet->header[1]);
-               d[0].req_count = cpu_to_le16(12);
-               break;
-
-       case 4:
-               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
-               d[0].req_count = cpu_to_le16(8);
-               break;
-
-       default:
-               /* BUG(); */
-               packet->ack = RCODE_SEND_ERROR;
-               return -1;
-       }
-
-       driver_data = (struct driver_data *) &d[3];
-       driver_data->packet = packet;
-       packet->driver_data = driver_data;
-
-       if (packet->payload_length > 0) {
-               payload_bus =
-                       dma_map_single(ohci->card.device, packet->payload,
-                                      packet->payload_length, DMA_TO_DEVICE);
-               if (dma_mapping_error(ohci->card.device, payload_bus)) {
-                       packet->ack = RCODE_SEND_ERROR;
-                       return -1;
-               }
-               packet->payload_bus = payload_bus;
-
-               d[2].req_count    = cpu_to_le16(packet->payload_length);
-               d[2].data_address = cpu_to_le32(payload_bus);
-               last = &d[2];
-               z = 3;
-       } else {
-               last = &d[0];
-               z = 2;
-       }
-
-       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
-                                    DESCRIPTOR_IRQ_ALWAYS |
-                                    DESCRIPTOR_BRANCH_ALWAYS);
-
-       /*
-        * If the controller and packet generations don't match, we need to
-        * bail out and try again.  If IntEvent.busReset is set, the AT context
-        * is halted, so appending to the context and trying to run it is
-        * futile.  Most controllers do the right thing and just flush the AT
-        * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
-        * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
-        * up stalling out.  So we just bail out in software and try again
-        * later, and everyone is happy.
-        * FIXME: Document how the locking works.
-        */
-       if (ohci->generation != packet->generation ||
-           reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
-               if (packet->payload_length > 0)
-                       dma_unmap_single(ohci->card.device, payload_bus,
-                                        packet->payload_length, DMA_TO_DEVICE);
-               packet->ack = RCODE_GENERATION;
-               return -1;
-       }
-
-       context_append(ctx, d, z, 4 - z);
-
-       /* If the context isn't already running, start it up. */
-       reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
-       if ((reg & CONTEXT_RUN) == 0)
-               context_run(ctx, 0);
-
-       return 0;
-}
-
-static int handle_at_packet(struct context *context,
-                           struct descriptor *d,
-                           struct descriptor *last)
-{
-       struct driver_data *driver_data;
-       struct fw_packet *packet;
-       struct fw_ohci *ohci = context->ohci;
-       int evt;
-
-       if (last->transfer_status == 0)
-               /* This descriptor isn't done yet, stop iteration. */
-               return 0;
-
-       driver_data = (struct driver_data *) &d[3];
-       packet = driver_data->packet;
-       if (packet == NULL)
-               /* This packet was cancelled, just continue. */
-               return 1;
-
-       if (packet->payload_bus)
-               dma_unmap_single(ohci->card.device, packet->payload_bus,
-                                packet->payload_length, DMA_TO_DEVICE);
-
-       evt = le16_to_cpu(last->transfer_status) & 0x1f;
-       packet->timestamp = le16_to_cpu(last->res_count);
-
-       log_ar_at_event('T', packet->speed, packet->header, evt);
-
-       switch (evt) {
-       case OHCI1394_evt_timeout:
-               /* Async response transmit timed out. */
-               packet->ack = RCODE_CANCELLED;
-               break;
-
-       case OHCI1394_evt_flushed:
-               /*
-                * The packet was flushed should give same error as
-                * when we try to use a stale generation count.
-                */
-               packet->ack = RCODE_GENERATION;
-               break;
-
-       case OHCI1394_evt_missing_ack:
-               /*
-                * Using a valid (current) generation count, but the
-                * node is not on the bus or not sending acks.
-                */
-               packet->ack = RCODE_NO_ACK;
-               break;
-
-       case ACK_COMPLETE + 0x10:
-       case ACK_PENDING + 0x10:
-       case ACK_BUSY_X + 0x10:
-       case ACK_BUSY_A + 0x10:
-       case ACK_BUSY_B + 0x10:
-       case ACK_DATA_ERROR + 0x10:
-       case ACK_TYPE_ERROR + 0x10:
-               packet->ack = evt - 0x10;
-               break;
-
-       default:
-               packet->ack = RCODE_SEND_ERROR;
-               break;
-       }
-
-       packet->callback(packet, &ohci->card, packet->ack);
-
-       return 1;
-}
-
-#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
-#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
-
-static void handle_local_rom(struct fw_ohci *ohci,
-                            struct fw_packet *packet, u32 csr)
-{
-       struct fw_packet response;
-       int tcode, length, i;
-
-       tcode = HEADER_GET_TCODE(packet->header[0]);
-       if (TCODE_IS_BLOCK_PACKET(tcode))
-               length = HEADER_GET_DATA_LENGTH(packet->header[3]);
-       else
-               length = 4;
-
-       i = csr - CSR_CONFIG_ROM;
-       if (i + length > CONFIG_ROM_SIZE) {
-               fw_fill_response(&response, packet->header,
-                                RCODE_ADDRESS_ERROR, NULL, 0);
-       } else if (!TCODE_IS_READ_REQUEST(tcode)) {
-               fw_fill_response(&response, packet->header,
-                                RCODE_TYPE_ERROR, NULL, 0);
-       } else {
-               fw_fill_response(&response, packet->header, RCODE_COMPLETE,
-                                (void *) ohci->config_rom + i, length);
-       }
-
-       fw_core_handle_response(&ohci->card, &response);
-}
-
-static void handle_local_lock(struct fw_ohci *ohci,
-                             struct fw_packet *packet, u32 csr)
-{
-       struct fw_packet response;
-       int tcode, length, ext_tcode, sel;
-       __be32 *payload, lock_old;
-       u32 lock_arg, lock_data;
-
-       tcode = HEADER_GET_TCODE(packet->header[0]);
-       length = HEADER_GET_DATA_LENGTH(packet->header[3]);
-       payload = packet->payload;
-       ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
-
-       if (tcode == TCODE_LOCK_REQUEST &&
-           ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
-               lock_arg = be32_to_cpu(payload[0]);
-               lock_data = be32_to_cpu(payload[1]);
-       } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
-               lock_arg = 0;
-               lock_data = 0;
-       } else {
-               fw_fill_response(&response, packet->header,
-                                RCODE_TYPE_ERROR, NULL, 0);
-               goto out;
-       }
-
-       sel = (csr - CSR_BUS_MANAGER_ID) / 4;
-       reg_write(ohci, OHCI1394_CSRData, lock_data);
-       reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
-       reg_write(ohci, OHCI1394_CSRControl, sel);
-
-       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
-               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
-       else
-               fw_notify("swap not done yet\n");
-
-       fw_fill_response(&response, packet->header,
-                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
- out:
-       fw_core_handle_response(&ohci->card, &response);
-}
-
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
-{
-       u64 offset;
-       u32 csr;
-
-       if (ctx == &ctx->ohci->at_request_ctx) {
-               packet->ack = ACK_PENDING;
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-       }
-
-       offset =
-               ((unsigned long long)
-                HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
-               packet->header[2];
-       csr = offset - CSR_REGISTER_BASE;
-
-       /* Handle config rom reads. */
-       if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
-               handle_local_rom(ctx->ohci, packet, csr);
-       else switch (csr) {
-       case CSR_BUS_MANAGER_ID:
-       case CSR_BANDWIDTH_AVAILABLE:
-       case CSR_CHANNELS_AVAILABLE_HI:
-       case CSR_CHANNELS_AVAILABLE_LO:
-               handle_local_lock(ctx->ohci, packet, csr);
-               break;
-       default:
-               if (ctx == &ctx->ohci->at_request_ctx)
-                       fw_core_handle_request(&ctx->ohci->card, packet);
-               else
-                       fw_core_handle_response(&ctx->ohci->card, packet);
-               break;
-       }
-
-       if (ctx == &ctx->ohci->at_response_ctx) {
-               packet->ack = ACK_COMPLETE;
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-       }
-}
-
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&ctx->ohci->lock, flags);
-
-       if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
-           ctx->ohci->generation == packet->generation) {
-               spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-               handle_local_request(ctx, packet);
-               return;
-       }
-
-       ret = at_context_queue_packet(ctx, packet);
-       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-
-       if (ret < 0)
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-
-}
-
-static void bus_reset_tasklet(unsigned long data)
-{
-       struct fw_ohci *ohci = (struct fw_ohci *)data;
-       int self_id_count, i, j, reg;
-       int generation, new_generation;
-       unsigned long flags;
-       void *free_rom = NULL;
-       dma_addr_t free_rom_bus = 0;
-
-       reg = reg_read(ohci, OHCI1394_NodeID);
-       if (!(reg & OHCI1394_NodeID_idValid)) {
-               fw_notify("node ID not valid, new bus reset in progress\n");
-               return;
-       }
-       if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
-               fw_notify("malconfigured bus\n");
-               return;
-       }
-       ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
-                              OHCI1394_NodeID_nodeNumber);
-
-       reg = reg_read(ohci, OHCI1394_SelfIDCount);
-       if (reg & OHCI1394_SelfIDCount_selfIDError) {
-               fw_notify("inconsistent self IDs\n");
-               return;
-       }
-       /*
-        * The count in the SelfIDCount register is the number of
-        * bytes in the self ID receive buffer.  Since we also receive
-        * the inverted quadlets and a header quadlet, we shift one
-        * bit extra to get the actual number of self IDs.
-        */
-       self_id_count = (reg >> 3) & 0x3ff;
-       if (self_id_count == 0) {
-               fw_notify("inconsistent self IDs\n");
-               return;
-       }
-       generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
-       rmb();
-
-       for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
-               if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
-                       fw_notify("inconsistent self IDs\n");
-                       return;
-               }
-               ohci->self_id_buffer[j] =
-                               cond_le32_to_cpu(ohci->self_id_cpu[i]);
-       }
-       rmb();
-
-       /*
-        * Check the consistency of the self IDs we just read.  The
-        * problem we face is that a new bus reset can start while we
-        * read out the self IDs from the DMA buffer. If this happens,
-        * the DMA buffer will be overwritten with new self IDs and we
-        * will read out inconsistent data.  The OHCI specification
-        * (section 11.2) recommends a technique similar to
-        * linux/seqlock.h, where we remember the generation of the
-        * self IDs in the buffer before reading them out and compare
-        * it to the current generation after reading them out.  If
-        * the two generations match we know we have a consistent set
-        * of self IDs.
-        */
-
-       new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
-       if (new_generation != generation) {
-               fw_notify("recursive bus reset detected, "
-                         "discarding self ids\n");
-               return;
-       }
-
-       /* FIXME: Document how the locking works. */
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       ohci->generation = generation;
-       context_stop(&ohci->at_request_ctx);
-       context_stop(&ohci->at_response_ctx);
-       reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
-
-       if (ohci->bus_reset_packet_quirk)
-               ohci->request_generation = generation;
-
-       /*
-        * This next bit is unrelated to the AT context stuff but we
-        * have to do it under the spinlock also.  If a new config rom
-        * was set up before this reset, the old one is now no longer
-        * in use and we can free it. Update the config rom pointers
-        * to point to the current config rom and clear the
-        * next_config_rom pointer so a new udpate can take place.
-        */
-
-       if (ohci->next_config_rom != NULL) {
-               if (ohci->next_config_rom != ohci->config_rom) {
-                       free_rom      = ohci->config_rom;
-                       free_rom_bus  = ohci->config_rom_bus;
-               }
-               ohci->config_rom      = ohci->next_config_rom;
-               ohci->config_rom_bus  = ohci->next_config_rom_bus;
-               ohci->next_config_rom = NULL;
-
-               /*
-                * Restore config_rom image and manually update
-                * config_rom registers.  Writing the header quadlet
-                * will indicate that the config rom is ready, so we
-                * do that last.
-                */
-               reg_write(ohci, OHCI1394_BusOptions,
-                         be32_to_cpu(ohci->config_rom[2]));
-               ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
-               reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
-       }
-
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
-       reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
-       reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
-#endif
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       if (free_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 free_rom, free_rom_bus);
-
-       log_selfids(ohci->node_id, generation,
-                   self_id_count, ohci->self_id_buffer);
-
-       fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
-                                self_id_count, ohci->self_id_buffer);
-}
-
-static irqreturn_t irq_handler(int irq, void *data)
-{
-       struct fw_ohci *ohci = data;
-       u32 event, iso_event, cycle_time;
-       int i;
-
-       event = reg_read(ohci, OHCI1394_IntEventClear);
-
-       if (!event || !~event)
-               return IRQ_NONE;
-
-       /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
-       reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
-       log_irqs(event);
-
-       if (event & OHCI1394_selfIDComplete)
-               tasklet_schedule(&ohci->bus_reset_tasklet);
-
-       if (event & OHCI1394_RQPkt)
-               tasklet_schedule(&ohci->ar_request_ctx.tasklet);
-
-       if (event & OHCI1394_RSPkt)
-               tasklet_schedule(&ohci->ar_response_ctx.tasklet);
-
-       if (event & OHCI1394_reqTxComplete)
-               tasklet_schedule(&ohci->at_request_ctx.tasklet);
-
-       if (event & OHCI1394_respTxComplete)
-               tasklet_schedule(&ohci->at_response_ctx.tasklet);
-
-       iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
-       reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
-
-       while (iso_event) {
-               i = ffs(iso_event) - 1;
-               tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
-               iso_event &= ~(1 << i);
-       }
-
-       iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
-       reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
-
-       while (iso_event) {
-               i = ffs(iso_event) - 1;
-               tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
-               iso_event &= ~(1 << i);
-       }
-
-       if (unlikely(event & OHCI1394_regAccessFail))
-               fw_error("Register access failure - "
-                        "please notify linux1394-devel@lists.sf.net\n");
-
-       if (unlikely(event & OHCI1394_postedWriteErr))
-               fw_error("PCI posted write error\n");
-
-       if (unlikely(event & OHCI1394_cycleTooLong)) {
-               if (printk_ratelimit())
-                       fw_notify("isochronous cycle too long\n");
-               reg_write(ohci, OHCI1394_LinkControlSet,
-                         OHCI1394_LinkControl_cycleMaster);
-       }
-
-       if (event & OHCI1394_cycle64Seconds) {
-               cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-               if ((cycle_time & 0x80000000) == 0)
-                       atomic_inc(&ohci->bus_seconds);
-       }
-
-       return IRQ_HANDLED;
-}
-
-static int software_reset(struct fw_ohci *ohci)
-{
-       int i;
-
-       reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
-
-       for (i = 0; i < OHCI_LOOP_COUNT; i++) {
-               if ((reg_read(ohci, OHCI1394_HCControlSet) &
-                    OHCI1394_HCControl_softReset) == 0)
-                       return 0;
-               msleep(1);
-       }
-
-       return -EBUSY;
-}
-
-static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct pci_dev *dev = to_pci_dev(card->device);
-       u32 lps;
-       int i;
-
-       if (software_reset(ohci)) {
-               fw_error("Failed to reset ohci card.\n");
-               return -EBUSY;
-       }
-
-       /*
-        * Now enable LPS, which we need in order to start accessing
-        * most of the registers.  In fact, on some cards (ALI M5251),
-        * accessing registers in the SClk domain without LPS enabled
-        * will lock up the machine.  Wait 50msec to make sure we have
-        * full link enabled.  However, with some cards (well, at least
-        * a JMicron PCIe card), we have to try again sometimes.
-        */
-       reg_write(ohci, OHCI1394_HCControlSet,
-                 OHCI1394_HCControl_LPS |
-                 OHCI1394_HCControl_postedWriteEnable);
-       flush_writes(ohci);
-
-       for (lps = 0, i = 0; !lps && i < 3; i++) {
-               msleep(50);
-               lps = reg_read(ohci, OHCI1394_HCControlSet) &
-                     OHCI1394_HCControl_LPS;
-       }
-
-       if (!lps) {
-               fw_error("Failed to set Link Power Status\n");
-               return -EIO;
-       }
-
-       reg_write(ohci, OHCI1394_HCControlClear,
-                 OHCI1394_HCControl_noByteSwapData);
-
-       reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
-       reg_write(ohci, OHCI1394_LinkControlClear,
-                 OHCI1394_LinkControl_rcvPhyPkt);
-       reg_write(ohci, OHCI1394_LinkControlSet,
-                 OHCI1394_LinkControl_rcvSelfID |
-                 OHCI1394_LinkControl_cycleTimerEnable |
-                 OHCI1394_LinkControl_cycleMaster);
-
-       reg_write(ohci, OHCI1394_ATRetries,
-                 OHCI1394_MAX_AT_REQ_RETRIES |
-                 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
-                 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
-
-       ar_context_run(&ohci->ar_request_ctx);
-       ar_context_run(&ohci->ar_response_ctx);
-
-       reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
-       reg_write(ohci, OHCI1394_IntEventClear, ~0);
-       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
-       reg_write(ohci, OHCI1394_IntMaskSet,
-                 OHCI1394_selfIDComplete |
-                 OHCI1394_RQPkt | OHCI1394_RSPkt |
-                 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
-                 OHCI1394_isochRx | OHCI1394_isochTx |
-                 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
-                 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
-                 OHCI1394_masterIntEnable);
-       if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
-               reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
-
-       /* Activate link_on bit and contender bit in our self ID packets.*/
-       if (ohci_update_phy_reg(card, 4, 0,
-                               PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
-               return -EIO;
-
-       /*
-        * When the link is not yet enabled, the atomic config rom
-        * update mechanism described below in ohci_set_config_rom()
-        * is not active.  We have to update ConfigRomHeader and
-        * BusOptions manually, and the write to ConfigROMmap takes
-        * effect immediately.  We tie this to the enabling of the
-        * link, so we have a valid config rom before enabling - the
-        * OHCI requires that ConfigROMhdr and BusOptions have valid
-        * values before enabling.
-        *
-        * However, when the ConfigROMmap is written, some controllers
-        * always read back quadlets 0 and 2 from the config rom to
-        * the ConfigRomHeader and BusOptions registers on bus reset.
-        * They shouldn't do that in this initial case where the link
-        * isn't enabled.  This means we have to use the same
-        * workaround here, setting the bus header to 0 and then write
-        * the right values in the bus reset tasklet.
-        */
-
-       if (config_rom) {
-               ohci->next_config_rom =
-                       dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                          &ohci->next_config_rom_bus,
-                                          GFP_KERNEL);
-               if (ohci->next_config_rom == NULL)
-                       return -ENOMEM;
-
-               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
-               fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
-       } else {
-               /*
-                * In the suspend case, config_rom is NULL, which
-                * means that we just reuse the old config rom.
-                */
-               ohci->next_config_rom = ohci->config_rom;
-               ohci->next_config_rom_bus = ohci->config_rom_bus;
-       }
-
-       ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
-       ohci->next_config_rom[0] = 0;
-       reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
-       reg_write(ohci, OHCI1394_BusOptions,
-                 be32_to_cpu(ohci->next_config_rom[2]));
-       reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
-
-       reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
-
-       if (request_irq(dev->irq, irq_handler,
-                       IRQF_SHARED, ohci_driver_name, ohci)) {
-               fw_error("Failed to allocate shared interrupt %d.\n",
-                        dev->irq);
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->config_rom, ohci->config_rom_bus);
-               return -EIO;
-       }
-
-       reg_write(ohci, OHCI1394_HCControlSet,
-                 OHCI1394_HCControl_linkEnable |
-                 OHCI1394_HCControl_BIBimageValid);
-       flush_writes(ohci);
-
-       /*
-        * We are ready to go, initiate bus reset to finish the
-        * initialization.
-        */
-
-       fw_core_initiate_bus_reset(&ohci->card, 1);
-
-       return 0;
-}
-
-static int ohci_set_config_rom(struct fw_card *card,
-                              u32 *config_rom, size_t length)
-{
-       struct fw_ohci *ohci;
-       unsigned long flags;
-       int ret = -EBUSY;
-       __be32 *next_config_rom;
-       dma_addr_t uninitialized_var(next_config_rom_bus);
-
-       ohci = fw_ohci(card);
-
-       /*
-        * When the OHCI controller is enabled, the config rom update
-        * mechanism is a bit tricky, but easy enough to use.  See
-        * section 5.5.6 in the OHCI specification.
-        *
-        * The OHCI controller caches the new config rom address in a
-        * shadow register (ConfigROMmapNext) and needs a bus reset
-        * for the changes to take place.  When the bus reset is
-        * detected, the controller loads the new values for the
-        * ConfigRomHeader and BusOptions registers from the specified
-        * config rom and loads ConfigROMmap from the ConfigROMmapNext
-        * shadow register. All automatically and atomically.
-        *
-        * Now, there's a twist to this story.  The automatic load of
-        * ConfigRomHeader and BusOptions doesn't honor the
-        * noByteSwapData bit, so with a be32 config rom, the
-        * controller will load be32 values in to these registers
-        * during the atomic update, even on litte endian
-        * architectures.  The workaround we use is to put a 0 in the
-        * header quadlet; 0 is endian agnostic and means that the
-        * config rom isn't ready yet.  In the bus reset tasklet we
-        * then set up the real values for the two registers.
-        *
-        * We use ohci->lock to avoid racing with the code that sets
-        * ohci->next_config_rom to NULL (see bus_reset_tasklet).
-        */
-
-       next_config_rom =
-               dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                  &next_config_rom_bus, GFP_KERNEL);
-       if (next_config_rom == NULL)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ohci->next_config_rom == NULL) {
-               ohci->next_config_rom = next_config_rom;
-               ohci->next_config_rom_bus = next_config_rom_bus;
-
-               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
-               fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
-                                 length * 4);
-
-               ohci->next_header = config_rom[0];
-               ohci->next_config_rom[0] = 0;
-
-               reg_write(ohci, OHCI1394_ConfigROMmap,
-                         ohci->next_config_rom_bus);
-               ret = 0;
-       }
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       /*
-        * Now initiate a bus reset to have the changes take
-        * effect. We clean up the old config rom memory and DMA
-        * mappings in the bus reset tasklet, since the OHCI
-        * controller could need to access it before the bus reset
-        * takes effect.
-        */
-       if (ret == 0)
-               fw_core_initiate_bus_reset(&ohci->card, 1);
-       else
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 next_config_rom, next_config_rom_bus);
-
-       return ret;
-}
-
-static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-
-       at_context_transmit(&ohci->at_request_ctx, packet);
-}
-
-static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-
-       at_context_transmit(&ohci->at_response_ctx, packet);
-}
-
-static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct context *ctx = &ohci->at_request_ctx;
-       struct driver_data *driver_data = packet->driver_data;
-       int ret = -ENOENT;
-
-       tasklet_disable(&ctx->tasklet);
-
-       if (packet->ack != 0)
-               goto out;
-
-       if (packet->payload_bus)
-               dma_unmap_single(ohci->card.device, packet->payload_bus,
-                                packet->payload_length, DMA_TO_DEVICE);
-
-       log_ar_at_event('T', packet->speed, packet->header, 0x20);
-       driver_data->packet = NULL;
-       packet->ack = RCODE_CANCELLED;
-       packet->callback(packet, &ohci->card, packet->ack);
-       ret = 0;
- out:
-       tasklet_enable(&ctx->tasklet);
-
-       return ret;
-}
-
-static int ohci_enable_phys_dma(struct fw_card *card,
-                               int node_id, int generation)
-{
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
-       return 0;
-#else
-       struct fw_ohci *ohci = fw_ohci(card);
-       unsigned long flags;
-       int n, ret = 0;
-
-       /*
-        * FIXME:  Make sure this bitmask is cleared when we clear the busReset
-        * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
-        */
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ohci->generation != generation) {
-               ret = -ESTALE;
-               goto out;
-       }
-
-       /*
-        * Note, if the node ID contains a non-local bus ID, physical DMA is
-        * enabled for _all_ nodes on remote buses.
-        */
-
-       n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
-       if (n < 32)
-               reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
-       else
-               reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
-
-       flush_writes(ohci);
- out:
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       return ret;
-#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
-}
-
-static u64 ohci_get_bus_time(struct fw_card *card)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       u32 cycle_time;
-       u64 bus_time;
-
-       cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-       bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
-
-       return bus_time;
-}
-
-static void copy_iso_headers(struct iso_context *ctx, void *p)
-{
-       int i = ctx->header_length;
-
-       if (i + ctx->base.header_size > PAGE_SIZE)
-               return;
-
-       /*
-        * The iso header is byteswapped to little endian by
-        * the controller, but the remaining header quadlets
-        * are big endian.  We want to present all the headers
-        * as big endian, so we have to swap the first quadlet.
-        */
-       if (ctx->base.header_size > 0)
-               *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
-       if (ctx->base.header_size > 4)
-               *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
-       if (ctx->base.header_size > 8)
-               memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
-       ctx->header_length += ctx->base.header_size;
-}
-
-static int handle_ir_dualbuffer_packet(struct context *context,
-                                      struct descriptor *d,
-                                      struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-       struct db_descriptor *db = (struct db_descriptor *) d;
-       __le32 *ir_header;
-       size_t header_length;
-       void *p, *end;
-
-       if (db->first_res_count != 0 && db->second_res_count != 0) {
-               if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
-                       /* This descriptor isn't done yet, stop iteration. */
-                       return 0;
-               }
-               ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
-       }
-
-       header_length = le16_to_cpu(db->first_req_count) -
-               le16_to_cpu(db->first_res_count);
-
-       p = db + 1;
-       end = p + header_length;
-       while (p < end) {
-               copy_iso_headers(ctx, p);
-               ctx->excess_bytes +=
-                       (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
-               p += max(ctx->base.header_size, (size_t)8);
-       }
-
-       ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
-               le16_to_cpu(db->second_res_count);
-
-       if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
-               ir_header = (__le32 *) (db + 1);
-               ctx->base.callback(&ctx->base,
-                                  le32_to_cpu(ir_header[0]) & 0xffff,
-                                  ctx->header_length, ctx->header,
-                                  ctx->base.callback_data);
-               ctx->header_length = 0;
-       }
-
-       return 1;
-}
-
-static int handle_ir_packet_per_buffer(struct context *context,
-                                      struct descriptor *d,
-                                      struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-       struct descriptor *pd;
-       __le32 *ir_header;
-       void *p;
-
-       for (pd = d; pd <= last; pd++) {
-               if (pd->transfer_status)
-                       break;
-       }
-       if (pd > last)
-               /* Descriptor(s) not done yet, stop iteration */
-               return 0;
-
-       p = last + 1;
-       copy_iso_headers(ctx, p);
-
-       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
-               ir_header = (__le32 *) p;
-               ctx->base.callback(&ctx->base,
-                                  le32_to_cpu(ir_header[0]) & 0xffff,
-                                  ctx->header_length, ctx->header,
-                                  ctx->base.callback_data);
-               ctx->header_length = 0;
-       }
-
-       return 1;
-}
-
-static int handle_it_packet(struct context *context,
-                           struct descriptor *d,
-                           struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-
-       if (last->transfer_status == 0)
-               /* This descriptor isn't done yet, stop iteration. */
-               return 0;
-
-       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
-               ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
-                                  0, NULL, ctx->base.callback_data);
-
-       return 1;
-}
-
-static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
-                               int type, int channel, size_t header_size)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct iso_context *ctx, *list;
-       descriptor_callback_t callback;
-       u64 *channels, dont_care = ~0ULL;
-       u32 *mask, regs;
-       unsigned long flags;
-       int index, ret = -ENOMEM;
-
-       if (type == FW_ISO_CONTEXT_TRANSMIT) {
-               channels = &dont_care;
-               mask = &ohci->it_context_mask;
-               list = ohci->it_context_list;
-               callback = handle_it_packet;
-       } else {
-               channels = &ohci->ir_context_channels;
-               mask = &ohci->ir_context_mask;
-               list = ohci->ir_context_list;
-               if (ohci->use_dualbuffer)
-                       callback = handle_ir_dualbuffer_packet;
-               else
-                       callback = handle_ir_packet_per_buffer;
-       }
-
-       spin_lock_irqsave(&ohci->lock, flags);
-       index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
-       if (index >= 0) {
-               *channels &= ~(1ULL << channel);
-               *mask &= ~(1 << index);
-       }
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       if (index < 0)
-               return ERR_PTR(-EBUSY);
-
-       if (type == FW_ISO_CONTEXT_TRANSMIT)
-               regs = OHCI1394_IsoXmitContextBase(index);
-       else
-               regs = OHCI1394_IsoRcvContextBase(index);
-
-       ctx = &list[index];
-       memset(ctx, 0, sizeof(*ctx));
-       ctx->header_length = 0;
-       ctx->header = (void *) __get_free_page(GFP_KERNEL);
-       if (ctx->header == NULL)
-               goto out;
-
-       ret = context_init(&ctx->context, ohci, regs, callback);
-       if (ret < 0)
-               goto out_with_header;
-
-       return &ctx->base;
-
- out_with_header:
-       free_page((unsigned long)ctx->header);
- out:
-       spin_lock_irqsave(&ohci->lock, flags);
-       *mask |= 1 << index;
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       return ERR_PTR(ret);
-}
-
-static int ohci_start_iso(struct fw_iso_context *base,
-                         s32 cycle, u32 sync, u32 tags)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct fw_ohci *ohci = ctx->context.ohci;
-       u32 control, match;
-       int index;
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               match = 0;
-               if (cycle >= 0)
-                       match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
-                               (cycle & 0x7fff) << 16;
-
-               reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
-               reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
-               context_run(&ctx->context, match);
-       } else {
-               index = ctx - ohci->ir_context_list;
-               control = IR_CONTEXT_ISOCH_HEADER;
-               if (ohci->use_dualbuffer)
-                       control |= IR_CONTEXT_DUAL_BUFFER_MODE;
-               match = (tags << 28) | (sync << 8) | ctx->base.channel;
-               if (cycle >= 0) {
-                       match |= (cycle & 0x07fff) << 12;
-                       control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
-               }
-
-               reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
-               reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
-               reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
-               context_run(&ctx->context, control);
-       }
-
-       return 0;
-}
-
-static int ohci_stop_iso(struct fw_iso_context *base)
-{
-       struct fw_ohci *ohci = fw_ohci(base->card);
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       int index;
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
-       } else {
-               index = ctx - ohci->ir_context_list;
-               reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
-       }
-       flush_writes(ohci);
-       context_stop(&ctx->context);
-
-       return 0;
-}
-
-static void ohci_free_iso_context(struct fw_iso_context *base)
-{
-       struct fw_ohci *ohci = fw_ohci(base->card);
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       unsigned long flags;
-       int index;
-
-       ohci_stop_iso(base);
-       context_release(&ctx->context);
-       free_page((unsigned long)ctx->header);
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               ohci->it_context_mask |= 1 << index;
-       } else {
-               index = ctx - ohci->ir_context_list;
-               ohci->ir_context_mask |= 1 << index;
-               ohci->ir_context_channels |= 1ULL << base->channel;
-       }
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-}
-
-static int ohci_queue_iso_transmit(struct fw_iso_context *base,
-                                  struct fw_iso_packet *packet,
-                                  struct fw_iso_buffer *buffer,
-                                  unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct descriptor *d, *last, *pd;
-       struct fw_iso_packet *p;
-       __le32 *header;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, payload_z, irq;
-       u32 payload_index, payload_end_index, next_page_index;
-       int page, end_page, i, length, offset;
-
-       /*
-        * FIXME: Cycle lost behavior should be configurable: lose
-        * packet, retransmit or terminate..
-        */
-
-       p = packet;
-       payload_index = payload;
-
-       if (p->skip)
-               z = 1;
-       else
-               z = 2;
-       if (p->header_length > 0)
-               z++;
-
-       /* Determine the first page the payload isn't contained in. */
-       end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
-       if (p->payload_length > 0)
-               payload_z = end_page - (payload_index >> PAGE_SHIFT);
-       else
-               payload_z = 0;
-
-       z += payload_z;
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
-
-       d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
-       if (d == NULL)
-               return -ENOMEM;
-
-       if (!p->skip) {
-               d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
-               d[0].req_count = cpu_to_le16(8);
-
-               header = (__le32 *) &d[1];
-               header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
-                                       IT_HEADER_TAG(p->tag) |
-                                       IT_HEADER_TCODE(TCODE_STREAM_DATA) |
-                                       IT_HEADER_CHANNEL(ctx->base.channel) |
-                                       IT_HEADER_SPEED(ctx->base.speed));
-               header[1] =
-                       cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
-                                                         p->payload_length));
-       }
-
-       if (p->header_length > 0) {
-               d[2].req_count    = cpu_to_le16(p->header_length);
-               d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
-               memcpy(&d[z], p->header, p->header_length);
-       }
-
-       pd = d + z - payload_z;
-       payload_end_index = payload_index + p->payload_length;
-       for (i = 0; i < payload_z; i++) {
-               page               = payload_index >> PAGE_SHIFT;
-               offset             = payload_index & ~PAGE_MASK;
-               next_page_index    = (page + 1) << PAGE_SHIFT;
-               length             =
-                       min(next_page_index, payload_end_index) - payload_index;
-               pd[i].req_count    = cpu_to_le16(length);
-
-               page_bus = page_private(buffer->pages[page]);
-               pd[i].data_address = cpu_to_le32(page_bus + offset);
-
-               payload_index += length;
-       }
-
-       if (p->interrupt)
-               irq = DESCRIPTOR_IRQ_ALWAYS;
-       else
-               irq = DESCRIPTOR_NO_IRQ;
-
-       last = z == 2 ? d : d + z - 1;
-       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
-                                    DESCRIPTOR_STATUS |
-                                    DESCRIPTOR_BRANCH_ALWAYS |
-                                    irq);
-
-       context_append(&ctx->context, d, z, header_z);
-
-       return 0;
-}
-
-static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
-                                            struct fw_iso_packet *packet,
-                                            struct fw_iso_buffer *buffer,
-                                            unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct db_descriptor *db = NULL;
-       struct descriptor *d;
-       struct fw_iso_packet *p;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, length, rest;
-       int page, offset, packet_count, header_size;
-
-       /*
-        * FIXME: Cycle lost behavior should be configurable: lose
-        * packet, retransmit or terminate..
-        */
-
-       p = packet;
-       z = 2;
-
-       /*
-        * The OHCI controller puts the isochronous header and trailer in the
-        * buffer, so we need at least 8 bytes.
-        */
-       packet_count = p->header_length / ctx->base.header_size;
-       header_size = packet_count * max(ctx->base.header_size, (size_t)8);
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
-       page     = payload >> PAGE_SHIFT;
-       offset   = payload & ~PAGE_MASK;
-       rest     = p->payload_length;
-
-       /* FIXME: make packet-per-buffer/dual-buffer a context option */
-       while (rest > 0) {
-               d = context_get_descriptors(&ctx->context,
-                                           z + header_z, &d_bus);
-               if (d == NULL)
-                       return -ENOMEM;
-
-               db = (struct db_descriptor *) d;
-               db->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                         DESCRIPTOR_BRANCH_ALWAYS);
-               db->first_size =
-                   cpu_to_le16(max(ctx->base.header_size, (size_t)8));
-               if (p->skip && rest == p->payload_length) {
-                       db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
-                       db->first_req_count = db->first_size;
-               } else {
-                       db->first_req_count = cpu_to_le16(header_size);
-               }
-               db->first_res_count = db->first_req_count;
-               db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
-
-               if (p->skip && rest == p->payload_length)
-                       length = 4;
-               else if (offset + rest < PAGE_SIZE)
-                       length = rest;
-               else
-                       length = PAGE_SIZE - offset;
-
-               db->second_req_count = cpu_to_le16(length);
-               db->second_res_count = db->second_req_count;
-               page_bus = page_private(buffer->pages[page]);
-               db->second_buffer = cpu_to_le32(page_bus + offset);
-
-               if (p->interrupt && length == rest)
-                       db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
-               context_append(&ctx->context, d, z, header_z);
-               offset = (offset + length) & ~PAGE_MASK;
-               rest -= length;
-               if (offset == 0)
-                       page++;
-       }
-
-       return 0;
-}
-
-static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
-                                       struct fw_iso_packet *packet,
-                                       struct fw_iso_buffer *buffer,
-                                       unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct descriptor *d = NULL, *pd = NULL;
-       struct fw_iso_packet *p = packet;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, rest;
-       int i, j, length;
-       int page, offset, packet_count, header_size, payload_per_buffer;
-
-       /*
-        * The OHCI controller puts the isochronous header and trailer in the
-        * buffer, so we need at least 8 bytes.
-        */
-       packet_count = p->header_length / ctx->base.header_size;
-       header_size  = max(ctx->base.header_size, (size_t)8);
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
-       page     = payload >> PAGE_SHIFT;
-       offset   = payload & ~PAGE_MASK;
-       payload_per_buffer = p->payload_length / packet_count;
-
-       for (i = 0; i < packet_count; i++) {
-               /* d points to the header descriptor */
-               z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
-               d = context_get_descriptors(&ctx->context,
-                               z + header_z, &d_bus);
-               if (d == NULL)
-                       return -ENOMEM;
-
-               d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
-                                             DESCRIPTOR_INPUT_MORE);
-               if (p->skip && i == 0)
-                       d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
-               d->req_count    = cpu_to_le16(header_size);
-               d->res_count    = d->req_count;
-               d->transfer_status = 0;
-               d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
-
-               rest = payload_per_buffer;
-               for (j = 1; j < z; j++) {
-                       pd = d + j;
-                       pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                                 DESCRIPTOR_INPUT_MORE);
-
-                       if (offset + rest < PAGE_SIZE)
-                               length = rest;
-                       else
-                               length = PAGE_SIZE - offset;
-                       pd->req_count = cpu_to_le16(length);
-                       pd->res_count = pd->req_count;
-                       pd->transfer_status = 0;
-
-                       page_bus = page_private(buffer->pages[page]);
-                       pd->data_address = cpu_to_le32(page_bus + offset);
-
-                       offset = (offset + length) & ~PAGE_MASK;
-                       rest -= length;
-                       if (offset == 0)
-                               page++;
-               }
-               pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                         DESCRIPTOR_INPUT_LAST |
-                                         DESCRIPTOR_BRANCH_ALWAYS);
-               if (p->interrupt && i == packet_count - 1)
-                       pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
-               context_append(&ctx->context, d, z, header_z);
-       }
-
-       return 0;
-}
-
-static int ohci_queue_iso(struct fw_iso_context *base,
-                         struct fw_iso_packet *packet,
-                         struct fw_iso_buffer *buffer,
-                         unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&ctx->context.ohci->lock, flags);
-       if (base->type == FW_ISO_CONTEXT_TRANSMIT)
-               ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
-       else if (ctx->context.ohci->use_dualbuffer)
-               ret = ohci_queue_iso_receive_dualbuffer(base, packet,
-                                                       buffer, payload);
-       else
-               ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
-                                                       buffer, payload);
-       spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
-
-       return ret;
-}
-
-static const struct fw_card_driver ohci_driver = {
-       .enable                 = ohci_enable,
-       .update_phy_reg         = ohci_update_phy_reg,
-       .set_config_rom         = ohci_set_config_rom,
-       .send_request           = ohci_send_request,
-       .send_response          = ohci_send_response,
-       .cancel_packet          = ohci_cancel_packet,
-       .enable_phys_dma        = ohci_enable_phys_dma,
-       .get_bus_time           = ohci_get_bus_time,
-
-       .allocate_iso_context   = ohci_allocate_iso_context,
-       .free_iso_context       = ohci_free_iso_context,
-       .queue_iso              = ohci_queue_iso,
-       .start_iso              = ohci_start_iso,
-       .stop_iso               = ohci_stop_iso,
-};
-
-#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
-{
-       if (machine_is(powermac)) {
-               struct device_node *ofn = pci_device_to_OF_node(dev);
-
-               if (ofn) {
-                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
-                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
-               }
-       }
-}
-
-static void ohci_pmac_off(struct pci_dev *dev)
-{
-       if (machine_is(powermac)) {
-               struct device_node *ofn = pci_device_to_OF_node(dev);
-
-               if (ofn) {
-                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
-               }
-       }
-}
-#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
-#endif /* CONFIG_PPC_PMAC */
-
-static int __devinit pci_probe(struct pci_dev *dev,
-                              const struct pci_device_id *ent)
-{
-       struct fw_ohci *ohci;
-       u32 bus_options, max_receive, link_speed, version;
-       u64 guid;
-       int err;
-       size_t size;
-
-       ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
-       if (ohci == NULL) {
-               err = -ENOMEM;
-               goto fail;
-       }
-
-       fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
-
-       ohci_pmac_on(dev);
-
-       err = pci_enable_device(dev);
-       if (err) {
-               fw_error("Failed to enable OHCI hardware\n");
-               goto fail_free;
-       }
-
-       pci_set_master(dev);
-       pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
-       pci_set_drvdata(dev, ohci);
-
-       spin_lock_init(&ohci->lock);
-
-       tasklet_init(&ohci->bus_reset_tasklet,
-                    bus_reset_tasklet, (unsigned long)ohci);
-
-       err = pci_request_region(dev, 0, ohci_driver_name);
-       if (err) {
-               fw_error("MMIO resource unavailable\n");
-               goto fail_disable;
-       }
-
-       ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
-       if (ohci->registers == NULL) {
-               fw_error("Failed to remap registers\n");
-               err = -ENXIO;
-               goto fail_iomem;
-       }
-
-       version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-       ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
-
-/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
-#if !defined(CONFIG_X86_32)
-       /* dual-buffer mode is broken with descriptor addresses above 2G */
-       if (dev->vendor == PCI_VENDOR_ID_TI &&
-           dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
-               ohci->use_dualbuffer = false;
-#endif
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-       ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
-                            dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
-       ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
-
-       ar_context_init(&ohci->ar_request_ctx, ohci,
-                       OHCI1394_AsReqRcvContextControlSet);
-
-       ar_context_init(&ohci->ar_response_ctx, ohci,
-                       OHCI1394_AsRspRcvContextControlSet);
-
-       context_init(&ohci->at_request_ctx, ohci,
-                    OHCI1394_AsReqTrContextControlSet, handle_at_packet);
-
-       context_init(&ohci->at_response_ctx, ohci,
-                    OHCI1394_AsRspTrContextControlSet, handle_at_packet);
-
-       reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
-       ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
-       reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
-       size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
-       ohci->it_context_list = kzalloc(size, GFP_KERNEL);
-
-       reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
-       ohci->ir_context_channels = ~0ULL;
-       ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
-       reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
-       size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
-       ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
-
-       if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
-               err = -ENOMEM;
-               goto fail_contexts;
-       }
-
-       /* self-id dma buffer allocation */
-       ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
-                                              SELF_ID_BUF_SIZE,
-                                              &ohci->self_id_bus,
-                                              GFP_KERNEL);
-       if (ohci->self_id_cpu == NULL) {
-               err = -ENOMEM;
-               goto fail_contexts;
-       }
-
-       bus_options = reg_read(ohci, OHCI1394_BusOptions);
-       max_receive = (bus_options >> 12) & 0xf;
-       link_speed = bus_options & 0x7;
-       guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
-               reg_read(ohci, OHCI1394_GUIDLo);
-
-       err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
-       if (err)
-               goto fail_self_id;
-
-       fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
-                 dev_name(&dev->dev), version >> 16, version & 0xff);
-
-       return 0;
-
- fail_self_id:
-       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-                         ohci->self_id_cpu, ohci->self_id_bus);
- fail_contexts:
-       kfree(ohci->ir_context_list);
-       kfree(ohci->it_context_list);
-       context_release(&ohci->at_response_ctx);
-       context_release(&ohci->at_request_ctx);
-       ar_context_release(&ohci->ar_response_ctx);
-       ar_context_release(&ohci->ar_request_ctx);
-       pci_iounmap(dev, ohci->registers);
- fail_iomem:
-       pci_release_region(dev, 0);
- fail_disable:
-       pci_disable_device(dev);
- fail_free:
-       kfree(&ohci->card);
-       ohci_pmac_off(dev);
- fail:
-       if (err == -ENOMEM)
-               fw_error("Out of memory\n");
-
-       return err;
-}
-
-static void pci_remove(struct pci_dev *dev)
-{
-       struct fw_ohci *ohci;
-
-       ohci = pci_get_drvdata(dev);
-       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
-       flush_writes(ohci);
-       fw_core_remove_card(&ohci->card);
-
-       /*
-        * FIXME: Fail all pending packets here, now that the upper
-        * layers can't queue any more.
-        */
-
-       software_reset(ohci);
-       free_irq(dev->irq, ohci);
-
-       if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->next_config_rom, ohci->next_config_rom_bus);
-       if (ohci->config_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->config_rom, ohci->config_rom_bus);
-       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-                         ohci->self_id_cpu, ohci->self_id_bus);
-       ar_context_release(&ohci->ar_request_ctx);
-       ar_context_release(&ohci->ar_response_ctx);
-       context_release(&ohci->at_request_ctx);
-       context_release(&ohci->at_response_ctx);
-       kfree(ohci->it_context_list);
-       kfree(ohci->ir_context_list);
-       pci_iounmap(dev, ohci->registers);
-       pci_release_region(dev, 0);
-       pci_disable_device(dev);
-       kfree(&ohci->card);
-       ohci_pmac_off(dev);
-
-       fw_notify("Removed fw-ohci device.\n");
-}
-
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
-       struct fw_ohci *ohci = pci_get_drvdata(dev);
-       int err;
-
-       software_reset(ohci);
-       free_irq(dev->irq, ohci);
-       err = pci_save_state(dev);
-       if (err) {
-               fw_error("pci_save_state failed\n");
-               return err;
-       }
-       err = pci_set_power_state(dev, pci_choose_state(dev, state));
-       if (err)
-               fw_error("pci_set_power_state failed with %d\n", err);
-       ohci_pmac_off(dev);
-
-       return 0;
-}
-
-static int pci_resume(struct pci_dev *dev)
-{
-       struct fw_ohci *ohci = pci_get_drvdata(dev);
-       int err;
-
-       ohci_pmac_on(dev);
-       pci_set_power_state(dev, PCI_D0);
-       pci_restore_state(dev);
-       err = pci_enable_device(dev);
-       if (err) {
-               fw_error("pci_enable_device failed\n");
-               return err;
-       }
-
-       return ohci_enable(&ohci->card, NULL, 0);
-}
-#endif
-
-static struct pci_device_id pci_table[] = {
-       { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
-       { }
-};
-
-MODULE_DEVICE_TABLE(pci, pci_table);
-
-static struct pci_driver fw_ohci_pci_driver = {
-       .name           = ohci_driver_name,
-       .id_table       = pci_table,
-       .probe          = pci_probe,
-       .remove         = pci_remove,
-#ifdef CONFIG_PM
-       .resume         = pci_resume,
-       .suspend        = pci_suspend,
-#endif
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
-MODULE_LICENSE("GPL");
-
-/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
-MODULE_ALIAS("ohci1394");
-#endif
-
-static int __init fw_ohci_init(void)
-{
-       return pci_register_driver(&fw_ohci_pci_driver);
-}
-
-static void __exit fw_ohci_cleanup(void)
-{
-       pci_unregister_driver(&fw_ohci_pci_driver);
-}
-
-module_init(fw_ohci_init);
-module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
deleted file mode 100644 (file)
index d41cb6e..0000000
+++ /dev/null
@@ -1,1651 +0,0 @@
-/*
- * SBP2 driver (SCSI over IEEE1394)
- *
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * The basic structure of this driver is based on the old storage driver,
- * drivers/ieee1394/sbp2.c, originally written by
- *     James Goodwin <jamesg@filanet.com>
- * with later contributions and ongoing maintenance from
- *     Ben Collins <bcollins@debian.org>,
- *     Stefan Richter <stefanr@s5r6.in-berlin.de>
- * and many others.
- */
-
-#include <linux/blkdev.h>
-#include <linux/bug.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/stringify.h>
-#include <linux/workqueue.h>
-
-#include <asm/byteorder.h>
-#include <asm/system.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-/*
- * So far only bridges from Oxford Semiconductor are known to support
- * concurrent logins. Depending on firmware, four or two concurrent logins
- * are possible on OXFW911 and newer Oxsemi bridges.
- *
- * Concurrent logins are useful together with cluster filesystems.
- */
-static int sbp2_param_exclusive_login = 1;
-module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
-MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
-                "(default = Y, use N for concurrent initiators)");
-
-/*
- * Flags for firmware oddities
- *
- * - 128kB max transfer
- *   Limit transfer size. Necessary for some old bridges.
- *
- * - 36 byte inquiry
- *   When scsi_mod probes the device, let the inquiry command look like that
- *   from MS Windows.
- *
- * - skip mode page 8
- *   Suppress sending of mode_sense for mode page 8 if the device pretends to
- *   support the SCSI Primary Block commands instead of Reduced Block Commands.
- *
- * - fix capacity
- *   Tell sd_mod to correct the last sector number reported by read_capacity.
- *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
- *   Don't use this with devices which don't have this bug.
- *
- * - delay inquiry
- *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
- *
- * - power condition
- *   Set the power condition field in the START STOP UNIT commands sent by
- *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
- *   Some disks need this to spin down or to resume properly.
- *
- * - override internal blacklist
- *   Instead of adding to the built-in blacklist, use only the workarounds
- *   specified in the module load parameter.
- *   Useful if a blacklist entry interfered with a non-broken device.
- */
-#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
-#define SBP2_WORKAROUND_INQUIRY_36     0x2
-#define SBP2_WORKAROUND_MODE_SENSE_8   0x4
-#define SBP2_WORKAROUND_FIX_CAPACITY   0x8
-#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
-#define SBP2_INQUIRY_DELAY             12
-#define SBP2_WORKAROUND_POWER_CONDITION        0x20
-#define SBP2_WORKAROUND_OVERRIDE       0x100
-
-static int sbp2_param_workarounds;
-module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
-MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
-       ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
-       ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
-       ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
-       ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
-       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
-       ", set power condition in start stop unit = "
-                                 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
-       ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
-       ", or a combination)");
-
-/* I don't know why the SCSI stack doesn't define something like this... */
-typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
-
-static const char sbp2_driver_name[] = "sbp2";
-
-/*
- * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
- * and one struct scsi_device per sbp2_logical_unit.
- */
-struct sbp2_logical_unit {
-       struct sbp2_target *tgt;
-       struct list_head link;
-       struct fw_address_handler address_handler;
-       struct list_head orb_list;
-
-       u64 command_block_agent_address;
-       u16 lun;
-       int login_id;
-
-       /*
-        * The generation is updated once we've logged in or reconnected
-        * to the logical unit.  Thus, I/O to the device will automatically
-        * fail and get retried if it happens in a window where the device
-        * is not ready, e.g. after a bus reset but before we reconnect.
-        */
-       int generation;
-       int retries;
-       struct delayed_work work;
-       bool has_sdev;
-       bool blocked;
-};
-
-/*
- * We create one struct sbp2_target per IEEE 1212 Unit Directory
- * and one struct Scsi_Host per sbp2_target.
- */
-struct sbp2_target {
-       struct kref kref;
-       struct fw_unit *unit;
-       const char *bus_id;
-       struct list_head lu_list;
-
-       u64 management_agent_address;
-       u64 guid;
-       int directory_id;
-       int node_id;
-       int address_high;
-       unsigned int workarounds;
-       unsigned int mgt_orb_timeout;
-       unsigned int max_payload;
-
-       int dont_block; /* counter for each logical unit */
-       int blocked;    /* ditto */
-};
-
-/* Impossible login_id, to detect logout attempt before successful login */
-#define INVALID_LOGIN_ID 0x10000
-
-/*
- * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
- * provided in the config rom. Most devices do provide a value, which
- * we'll use for login management orbs, but with some sane limits.
- */
-#define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
-#define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
-#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
-#define SBP2_ORB_NULL                  0x80000000
-#define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
-#define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
-
-/*
- * The default maximum s/g segment size of a FireWire controller is
- * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
- * be quadlet-aligned, we set the length limit to 0xffff & ~3.
- */
-#define SBP2_MAX_SEG_SIZE              0xfffc
-
-/* Unit directory keys */
-#define SBP2_CSR_UNIT_CHARACTERISTICS  0x3a
-#define SBP2_CSR_FIRMWARE_REVISION     0x3c
-#define SBP2_CSR_LOGICAL_UNIT_NUMBER   0x14
-#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY        0xd4
-
-/* Management orb opcodes */
-#define SBP2_LOGIN_REQUEST             0x0
-#define SBP2_QUERY_LOGINS_REQUEST      0x1
-#define SBP2_RECONNECT_REQUEST         0x3
-#define SBP2_SET_PASSWORD_REQUEST      0x4
-#define SBP2_LOGOUT_REQUEST            0x7
-#define SBP2_ABORT_TASK_REQUEST                0xb
-#define SBP2_ABORT_TASK_SET            0xc
-#define SBP2_LOGICAL_UNIT_RESET                0xe
-#define SBP2_TARGET_RESET_REQUEST      0xf
-
-/* Offsets for command block agent registers */
-#define SBP2_AGENT_STATE               0x00
-#define SBP2_AGENT_RESET               0x04
-#define SBP2_ORB_POINTER               0x08
-#define SBP2_DOORBELL                  0x10
-#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
-
-/* Status write response codes */
-#define SBP2_STATUS_REQUEST_COMPLETE   0x0
-#define SBP2_STATUS_TRANSPORT_FAILURE  0x1
-#define SBP2_STATUS_ILLEGAL_REQUEST    0x2
-#define SBP2_STATUS_VENDOR_DEPENDENT   0x3
-
-#define STATUS_GET_ORB_HIGH(v)         ((v).status & 0xffff)
-#define STATUS_GET_SBP_STATUS(v)       (((v).status >> 16) & 0xff)
-#define STATUS_GET_LEN(v)              (((v).status >> 24) & 0x07)
-#define STATUS_GET_DEAD(v)             (((v).status >> 27) & 0x01)
-#define STATUS_GET_RESPONSE(v)         (((v).status >> 28) & 0x03)
-#define STATUS_GET_SOURCE(v)           (((v).status >> 30) & 0x03)
-#define STATUS_GET_ORB_LOW(v)          ((v).orb_low)
-#define STATUS_GET_DATA(v)             ((v).data)
-
-struct sbp2_status {
-       u32 status;
-       u32 orb_low;
-       u8 data[24];
-};
-
-struct sbp2_pointer {
-       __be32 high;
-       __be32 low;
-};
-
-struct sbp2_orb {
-       struct fw_transaction t;
-       struct kref kref;
-       dma_addr_t request_bus;
-       int rcode;
-       struct sbp2_pointer pointer;
-       void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
-       struct list_head link;
-};
-
-#define MANAGEMENT_ORB_LUN(v)                  ((v))
-#define MANAGEMENT_ORB_FUNCTION(v)             ((v) << 16)
-#define MANAGEMENT_ORB_RECONNECT(v)            ((v) << 20)
-#define MANAGEMENT_ORB_EXCLUSIVE(v)            ((v) ? 1 << 28 : 0)
-#define MANAGEMENT_ORB_REQUEST_FORMAT(v)       ((v) << 29)
-#define MANAGEMENT_ORB_NOTIFY                  ((1) << 31)
-
-#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)      ((v))
-#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)      ((v) << 16)
-
-struct sbp2_management_orb {
-       struct sbp2_orb base;
-       struct {
-               struct sbp2_pointer password;
-               struct sbp2_pointer response;
-               __be32 misc;
-               __be32 length;
-               struct sbp2_pointer status_fifo;
-       } request;
-       __be32 response[4];
-       dma_addr_t response_bus;
-       struct completion done;
-       struct sbp2_status status;
-};
-
-struct sbp2_login_response {
-       __be32 misc;
-       struct sbp2_pointer command_block_agent;
-       __be32 reconnect_hold;
-};
-#define COMMAND_ORB_DATA_SIZE(v)       ((v))
-#define COMMAND_ORB_PAGE_SIZE(v)       ((v) << 16)
-#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
-#define COMMAND_ORB_MAX_PAYLOAD(v)     ((v) << 20)
-#define COMMAND_ORB_SPEED(v)           ((v) << 24)
-#define COMMAND_ORB_DIRECTION          ((1) << 27)
-#define COMMAND_ORB_REQUEST_FORMAT(v)  ((v) << 29)
-#define COMMAND_ORB_NOTIFY             ((1) << 31)
-
-struct sbp2_command_orb {
-       struct sbp2_orb base;
-       struct {
-               struct sbp2_pointer next;
-               struct sbp2_pointer data_descriptor;
-               __be32 misc;
-               u8 command_block[12];
-       } request;
-       struct scsi_cmnd *cmd;
-       scsi_done_fn_t done;
-       struct sbp2_logical_unit *lu;
-
-       struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
-       dma_addr_t page_table_bus;
-};
-
-#define SBP2_ROM_VALUE_WILDCARD ~0         /* match all */
-#define SBP2_ROM_VALUE_MISSING  0xff000000 /* not present in the unit dir. */
-
-/*
- * List of devices with known bugs.
- *
- * The firmware_revision field, masked with 0xffff00, is the best
- * indicator for the type of bridge chip of a device.  It yields a few
- * false positives but this did not break correctly behaving devices
- * so far.
- */
-static const struct {
-       u32 firmware_revision;
-       u32 model;
-       unsigned int workarounds;
-} sbp2_workarounds_table[] = {
-       /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
-               .firmware_revision      = 0x002800,
-               .model                  = 0x001010,
-               .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
-                                         SBP2_WORKAROUND_MODE_SENSE_8 |
-                                         SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
-               .firmware_revision      = 0x002800,
-               .model                  = 0x000000,
-               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY |
-                                         SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* Initio bridges, actually only needed for some older ones */ {
-               .firmware_revision      = 0x000200,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
-       },
-       /* PL-3507 bridge with Prolific firmware */ {
-               .firmware_revision      = 0x012800,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* Symbios bridge */ {
-               .firmware_revision      = 0xa0b800,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
-       },
-       /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
-               .firmware_revision      = 0x002600,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
-       },
-       /*
-        * iPod 2nd generation: needs 128k max transfer size workaround
-        * iPod 3rd generation: needs fix capacity workaround
-        */
-       {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000000,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
-                                         SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod 4th generation */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000021,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod mini */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000022,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod mini */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000023,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod Photo */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x00007e,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       }
-};
-
-static void free_orb(struct kref *kref)
-{
-       struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
-
-       kfree(orb);
-}
-
-static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
-                             int tcode, int destination, int source,
-                             int generation, int speed,
-                             unsigned long long offset,
-                             void *payload, size_t length, void *callback_data)
-{
-       struct sbp2_logical_unit *lu = callback_data;
-       struct sbp2_orb *orb;
-       struct sbp2_status status;
-       size_t header_size;
-       unsigned long flags;
-
-       if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
-           length == 0 || length > sizeof(status)) {
-               fw_send_response(card, request, RCODE_TYPE_ERROR);
-               return;
-       }
-
-       header_size = min(length, 2 * sizeof(u32));
-       fw_memcpy_from_be32(&status, payload, header_size);
-       if (length > header_size)
-               memcpy(status.data, payload + 8, length - header_size);
-       if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
-               fw_notify("non-orb related status write, not handled\n");
-               fw_send_response(card, request, RCODE_COMPLETE);
-               return;
-       }
-
-       /* Lookup the orb corresponding to this status write. */
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(orb, &lu->orb_list, link) {
-               if (STATUS_GET_ORB_HIGH(status) == 0 &&
-                   STATUS_GET_ORB_LOW(status) == orb->request_bus) {
-                       orb->rcode = RCODE_COMPLETE;
-                       list_del(&orb->link);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&orb->link != &lu->orb_list)
-               orb->callback(orb, &status);
-       else
-               fw_error("status write for unknown orb\n");
-
-       kref_put(&orb->kref, free_orb);
-
-       fw_send_response(card, request, RCODE_COMPLETE);
-}
-
-static void complete_transaction(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct sbp2_orb *orb = data;
-       unsigned long flags;
-
-       /*
-        * This is a little tricky.  We can get the status write for
-        * the orb before we get this callback.  The status write
-        * handler above will assume the orb pointer transaction was
-        * successful and set the rcode to RCODE_COMPLETE for the orb.
-        * So this callback only sets the rcode if it hasn't already
-        * been set and only does the cleanup if the transaction
-        * failed and we didn't already get a status write.
-        */
-       spin_lock_irqsave(&card->lock, flags);
-
-       if (orb->rcode == -1)
-               orb->rcode = rcode;
-       if (orb->rcode != RCODE_COMPLETE) {
-               list_del(&orb->link);
-               spin_unlock_irqrestore(&card->lock, flags);
-               orb->callback(orb, NULL);
-       } else {
-               spin_unlock_irqrestore(&card->lock, flags);
-       }
-
-       kref_put(&orb->kref, free_orb);
-}
-
-static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
-                         int node_id, int generation, u64 offset)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       unsigned long flags;
-
-       orb->pointer.high = 0;
-       orb->pointer.low = cpu_to_be32(orb->request_bus);
-
-       spin_lock_irqsave(&device->card->lock, flags);
-       list_add_tail(&orb->link, &lu->orb_list);
-       spin_unlock_irqrestore(&device->card->lock, flags);
-
-       /* Take a ref for the orb list and for the transaction callback. */
-       kref_get(&orb->kref);
-       kref_get(&orb->kref);
-
-       fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
-                       node_id, generation, device->max_speed, offset,
-                       &orb->pointer, sizeof(orb->pointer),
-                       complete_transaction, orb);
-}
-
-static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_orb *orb, *next;
-       struct list_head list;
-       unsigned long flags;
-       int retval = -ENOENT;
-
-       INIT_LIST_HEAD(&list);
-       spin_lock_irqsave(&device->card->lock, flags);
-       list_splice_init(&lu->orb_list, &list);
-       spin_unlock_irqrestore(&device->card->lock, flags);
-
-       list_for_each_entry_safe(orb, next, &list, link) {
-               retval = 0;
-               if (fw_cancel_transaction(device->card, &orb->t) == 0)
-                       continue;
-
-               orb->rcode = RCODE_CANCELLED;
-               orb->callback(orb, NULL);
-       }
-
-       return retval;
-}
-
-static void complete_management_orb(struct sbp2_orb *base_orb,
-                                   struct sbp2_status *status)
-{
-       struct sbp2_management_orb *orb =
-               container_of(base_orb, struct sbp2_management_orb, base);
-
-       if (status)
-               memcpy(&orb->status, status, sizeof(*status));
-       complete(&orb->done);
-}
-
-static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
-                                   int generation, int function,
-                                   int lun_or_login_id, void *response)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_management_orb *orb;
-       unsigned int timeout;
-       int retval = -ENOMEM;
-
-       if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
-               return 0;
-
-       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
-       if (orb == NULL)
-               return -ENOMEM;
-
-       kref_init(&orb->base.kref);
-       orb->response_bus =
-               dma_map_single(device->card->device, &orb->response,
-                              sizeof(orb->response), DMA_FROM_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->response_bus))
-               goto fail_mapping_response;
-
-       orb->request.response.high = 0;
-       orb->request.response.low  = cpu_to_be32(orb->response_bus);
-
-       orb->request.misc = cpu_to_be32(
-               MANAGEMENT_ORB_NOTIFY |
-               MANAGEMENT_ORB_FUNCTION(function) |
-               MANAGEMENT_ORB_LUN(lun_or_login_id));
-       orb->request.length = cpu_to_be32(
-               MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
-
-       orb->request.status_fifo.high =
-               cpu_to_be32(lu->address_handler.offset >> 32);
-       orb->request.status_fifo.low  =
-               cpu_to_be32(lu->address_handler.offset);
-
-       if (function == SBP2_LOGIN_REQUEST) {
-               /* Ask for 2^2 == 4 seconds reconnect grace period */
-               orb->request.misc |= cpu_to_be32(
-                       MANAGEMENT_ORB_RECONNECT(2) |
-                       MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
-               timeout = lu->tgt->mgt_orb_timeout;
-       } else {
-               timeout = SBP2_ORB_TIMEOUT;
-       }
-
-       init_completion(&orb->done);
-       orb->base.callback = complete_management_orb;
-
-       orb->base.request_bus =
-               dma_map_single(device->card->device, &orb->request,
-                              sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->base.request_bus))
-               goto fail_mapping_request;
-
-       sbp2_send_orb(&orb->base, lu, node_id, generation,
-                     lu->tgt->management_agent_address);
-
-       wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
-
-       retval = -EIO;
-       if (sbp2_cancel_orbs(lu) == 0) {
-               fw_error("%s: orb reply timed out, rcode=0x%02x\n",
-                        lu->tgt->bus_id, orb->base.rcode);
-               goto out;
-       }
-
-       if (orb->base.rcode != RCODE_COMPLETE) {
-               fw_error("%s: management write failed, rcode 0x%02x\n",
-                        lu->tgt->bus_id, orb->base.rcode);
-               goto out;
-       }
-
-       if (STATUS_GET_RESPONSE(orb->status) != 0 ||
-           STATUS_GET_SBP_STATUS(orb->status) != 0) {
-               fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
-                        STATUS_GET_RESPONSE(orb->status),
-                        STATUS_GET_SBP_STATUS(orb->status));
-               goto out;
-       }
-
-       retval = 0;
- out:
-       dma_unmap_single(device->card->device, orb->base.request_bus,
-                        sizeof(orb->request), DMA_TO_DEVICE);
- fail_mapping_request:
-       dma_unmap_single(device->card->device, orb->response_bus,
-                        sizeof(orb->response), DMA_FROM_DEVICE);
- fail_mapping_response:
-       if (response)
-               memcpy(response, orb->response, sizeof(orb->response));
-       kref_put(&orb->base.kref, free_orb);
-
-       return retval;
-}
-
-static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       __be32 d = 0;
-
-       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
-                          lu->tgt->node_id, lu->generation, device->max_speed,
-                          lu->command_block_agent_address + SBP2_AGENT_RESET,
-                          &d, sizeof(d));
-}
-
-static void complete_agent_reset_write_no_wait(struct fw_card *card,
-               int rcode, void *payload, size_t length, void *data)
-{
-       kfree(data);
-}
-
-static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct fw_transaction *t;
-       static __be32 d;
-
-       t = kmalloc(sizeof(*t), GFP_ATOMIC);
-       if (t == NULL)
-               return;
-
-       fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
-                       lu->tgt->node_id, lu->generation, device->max_speed,
-                       lu->command_block_agent_address + SBP2_AGENT_RESET,
-                       &d, sizeof(d), complete_agent_reset_write_no_wait, t);
-}
-
-static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
-{
-       /*
-        * We may access dont_block without taking card->lock here:
-        * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
-        * are currently serialized against each other.
-        * And a wrong result in sbp2_conditionally_block()'s access of
-        * dont_block is rather harmless, it simply misses its first chance.
-        */
-       --lu->tgt->dont_block;
-}
-
-/*
- * Blocks lu->tgt if all of the following conditions are met:
- *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
- *     logical units have been finished (indicated by dont_block == 0).
- *   - lu->generation is stale.
- *
- * Note, scsi_block_requests() must be called while holding card->lock,
- * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
- * unblock the target.
- */
-static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
-{
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       if (!tgt->dont_block && !lu->blocked &&
-           lu->generation != card->generation) {
-               lu->blocked = true;
-               if (++tgt->blocked == 1)
-                       scsi_block_requests(shost);
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/*
- * Unblocks lu->tgt as soon as all its logical units can be unblocked.
- * Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section.  On the other hand, running it inside
- * the section might clash with shost->host_lock.
- */
-static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
-{
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-       bool unblock = false;
-
-       spin_lock_irqsave(&card->lock, flags);
-       if (lu->blocked && lu->generation == card->generation) {
-               lu->blocked = false;
-               unblock = --tgt->blocked == 0;
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (unblock)
-               scsi_unblock_requests(shost);
-}
-
-/*
- * Prevents future blocking of tgt and unblocks it.
- * Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section.  On the other hand, running it inside
- * the section might clash with shost->host_lock.
- */
-static void sbp2_unblock(struct sbp2_target *tgt)
-{
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       ++tgt->dont_block;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       scsi_unblock_requests(shost);
-}
-
-static int sbp2_lun2int(u16 lun)
-{
-       struct scsi_lun eight_bytes_lun;
-
-       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
-       eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
-       eight_bytes_lun.scsi_lun[1] = lun & 0xff;
-
-       return scsilun_to_int(&eight_bytes_lun);
-}
-
-static void sbp2_release_target(struct kref *kref)
-{
-       struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
-       struct sbp2_logical_unit *lu, *next;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       struct scsi_device *sdev;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-
-       /* prevent deadlocks */
-       sbp2_unblock(tgt);
-
-       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
-               sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
-               if (sdev) {
-                       scsi_remove_device(sdev);
-                       scsi_device_put(sdev);
-               }
-               if (lu->login_id != INVALID_LOGIN_ID) {
-                       int generation, node_id;
-                       /*
-                        * tgt->node_id may be obsolete here if we failed
-                        * during initial login or after a bus reset where
-                        * the topology changed.
-                        */
-                       generation = device->generation;
-                       smp_rmb(); /* node_id vs. generation */
-                       node_id    = device->node_id;
-                       sbp2_send_management_orb(lu, node_id, generation,
-                                                SBP2_LOGOUT_REQUEST,
-                                                lu->login_id, NULL);
-               }
-               fw_core_remove_address_handler(&lu->address_handler);
-               list_del(&lu->link);
-               kfree(lu);
-       }
-       scsi_remove_host(shost);
-       fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
-
-       fw_unit_put(tgt->unit);
-       scsi_host_put(shost);
-       fw_device_put(device);
-}
-
-static struct workqueue_struct *sbp2_wq;
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
-       kref_put(&tgt->kref, sbp2_release_target);
-}
-
-/*
- * Always get the target's kref when scheduling work on one its units.
- * Each workqueue job is responsible to call sbp2_target_put() upon return.
- */
-static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-{
-       kref_get(&lu->tgt->kref);
-       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
-               sbp2_target_put(lu->tgt);
-}
-
-/*
- * Write retransmit retry values into the BUSY_TIMEOUT register.
- * - The single-phase retry protocol is supported by all SBP-2 devices, but the
- *   default retry_limit value is 0 (i.e. never retry transmission). We write a
- *   saner value after logging into the device.
- * - The dual-phase retry protocol is optional to implement, and if not
- *   supported, writes to the dual-phase portion of the register will be
- *   ignored. We try to write the original 1394-1995 default here.
- * - In the case of devices that are also SBP-3-compliant, all writes are
- *   ignored, as the register is read-only, but contains single-phase retry of
- *   15, which is what we're trying to set for all SBP-2 device anyway, so this
- *   write attempt is safe and yields more consistent behavior for all devices.
- *
- * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
- * and section 6.4 of the SBP-3 spec for further details.
- */
-static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
-
-       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
-                          lu->tgt->node_id, lu->generation, device->max_speed,
-                          CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
-                          &d, sizeof(d));
-}
-
-static void sbp2_reconnect(struct work_struct *work);
-
-static void sbp2_login(struct work_struct *work)
-{
-       struct sbp2_logical_unit *lu =
-               container_of(work, struct sbp2_logical_unit, work.work);
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-       struct Scsi_Host *shost;
-       struct scsi_device *sdev;
-       struct sbp2_login_response response;
-       int generation, node_id, local_node_id;
-
-       if (fw_device_is_shutdown(device))
-               goto out;
-
-       generation    = device->generation;
-       smp_rmb();    /* node IDs must not be older than generation */
-       node_id       = device->node_id;
-       local_node_id = device->card->node_id;
-
-       /* If this is a re-login attempt, log out, or we might be rejected. */
-       if (lu->has_sdev)
-               sbp2_send_management_orb(lu, device->node_id, generation,
-                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-
-       if (sbp2_send_management_orb(lu, node_id, generation,
-                               SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
-               if (lu->retries++ < 5) {
-                       sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-               } else {
-                       fw_error("%s: failed to login to LUN %04x\n",
-                                tgt->bus_id, lu->lun);
-                       /* Let any waiting I/O fail from now on. */
-                       sbp2_unblock(lu->tgt);
-               }
-               goto out;
-       }
-
-       tgt->node_id      = node_id;
-       tgt->address_high = local_node_id << 16;
-       smp_wmb();        /* node IDs must not be older than generation */
-       lu->generation    = generation;
-
-       lu->command_block_agent_address =
-               ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
-                     << 32) | be32_to_cpu(response.command_block_agent.low);
-       lu->login_id = be32_to_cpu(response.misc) & 0xffff;
-
-       fw_notify("%s: logged in to LUN %04x (%d retries)\n",
-                 tgt->bus_id, lu->lun, lu->retries);
-
-       /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
-       sbp2_set_busy_timeout(lu);
-
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
-       sbp2_agent_reset(lu);
-
-       /* This was a re-login. */
-       if (lu->has_sdev) {
-               sbp2_cancel_orbs(lu);
-               sbp2_conditionally_unblock(lu);
-               goto out;
-       }
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
-               ssleep(SBP2_INQUIRY_DELAY);
-
-       shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
-       /*
-        * FIXME:  We are unable to perform reconnects while in sbp2_login().
-        * Therefore __scsi_add_device() will get into trouble if a bus reset
-        * happens in parallel.  It will either fail or leave us with an
-        * unusable sdev.  As a workaround we check for this and retry the
-        * whole login and SCSI probing.
-        */
-
-       /* Reported error during __scsi_add_device() */
-       if (IS_ERR(sdev))
-               goto out_logout_login;
-
-       /* Unreported error during __scsi_add_device() */
-       smp_rmb(); /* get current card generation */
-       if (generation != device->card->generation) {
-               scsi_remove_device(sdev);
-               scsi_device_put(sdev);
-               goto out_logout_login;
-       }
-
-       /* No error during __scsi_add_device() */
-       lu->has_sdev = true;
-       scsi_device_put(sdev);
-       sbp2_allow_block(lu);
-       goto out;
-
- out_logout_login:
-       smp_rmb(); /* generation may have changed */
-       generation = device->generation;
-       smp_rmb(); /* node_id must not be older than generation */
-
-       sbp2_send_management_orb(lu, device->node_id, generation,
-                                SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-       /*
-        * If a bus reset happened, sbp2_update will have requeued
-        * lu->work already.  Reset the work from reconnect to login.
-        */
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- out:
-       sbp2_target_put(tgt);
-}
-
-static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
-{
-       struct sbp2_logical_unit *lu;
-
-       lu = kmalloc(sizeof(*lu), GFP_KERNEL);
-       if (!lu)
-               return -ENOMEM;
-
-       lu->address_handler.length           = 0x100;
-       lu->address_handler.address_callback = sbp2_status_write;
-       lu->address_handler.callback_data    = lu;
-
-       if (fw_core_add_address_handler(&lu->address_handler,
-                                       &fw_high_memory_region) < 0) {
-               kfree(lu);
-               return -ENOMEM;
-       }
-
-       lu->tgt      = tgt;
-       lu->lun      = lun_entry & 0xffff;
-       lu->login_id = INVALID_LOGIN_ID;
-       lu->retries  = 0;
-       lu->has_sdev = false;
-       lu->blocked  = false;
-       ++tgt->dont_block;
-       INIT_LIST_HEAD(&lu->orb_list);
-       INIT_DELAYED_WORK(&lu->work, sbp2_login);
-
-       list_add_tail(&lu->link, &tgt->lu_list);
-       return 0;
-}
-
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
-{
-       struct fw_csr_iterator ci;
-       int key, value;
-
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value))
-               if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
-                   sbp2_add_logical_unit(tgt, value) < 0)
-                       return -ENOMEM;
-       return 0;
-}
-
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
-                             u32 *model, u32 *firmware_revision)
-{
-       struct fw_csr_iterator ci;
-       int key, value;
-       unsigned int timeout;
-
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-
-               case CSR_DEPENDENT_INFO | CSR_OFFSET:
-                       tgt->management_agent_address =
-                                       CSR_REGISTER_BASE + 4 * value;
-                       break;
-
-               case CSR_DIRECTORY_ID:
-                       tgt->directory_id = value;
-                       break;
-
-               case CSR_MODEL:
-                       *model = value;
-                       break;
-
-               case SBP2_CSR_FIRMWARE_REVISION:
-                       *firmware_revision = value;
-                       break;
-
-               case SBP2_CSR_UNIT_CHARACTERISTICS:
-                       /* the timeout value is stored in 500ms units */
-                       timeout = ((unsigned int) value >> 8 & 0xff) * 500;
-                       timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
-                       tgt->mgt_orb_timeout =
-                                 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
-
-                       if (timeout > tgt->mgt_orb_timeout)
-                               fw_notify("%s: config rom contains %ds "
-                                         "management ORB timeout, limiting "
-                                         "to %ds\n", tgt->bus_id,
-                                         timeout / 1000,
-                                         tgt->mgt_orb_timeout / 1000);
-                       break;
-
-               case SBP2_CSR_LOGICAL_UNIT_NUMBER:
-                       if (sbp2_add_logical_unit(tgt, value) < 0)
-                               return -ENOMEM;
-                       break;
-
-               case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
-                       /* Adjust for the increment in the iterator */
-                       if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
-                               return -ENOMEM;
-                       break;
-               }
-       }
-       return 0;
-}
-
-static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
-                                 u32 firmware_revision)
-{
-       int i;
-       unsigned int w = sbp2_param_workarounds;
-
-       if (w)
-               fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
-                         "if you need the workarounds parameter for %s\n",
-                         tgt->bus_id);
-
-       if (w & SBP2_WORKAROUND_OVERRIDE)
-               goto out;
-
-       for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
-
-               if (sbp2_workarounds_table[i].firmware_revision !=
-                   (firmware_revision & 0xffffff00))
-                       continue;
-
-               if (sbp2_workarounds_table[i].model != model &&
-                   sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
-                       continue;
-
-               w |= sbp2_workarounds_table[i].workarounds;
-               break;
-       }
- out:
-       if (w)
-               fw_notify("Workarounds for %s: 0x%x "
-                         "(firmware_revision 0x%06x, model_id 0x%06x)\n",
-                         tgt->bus_id, w, firmware_revision, model);
-       tgt->workarounds = w;
-}
-
-static struct scsi_host_template scsi_driver_template;
-
-static int sbp2_probe(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_device *device = fw_device(unit->device.parent);
-       struct sbp2_target *tgt;
-       struct sbp2_logical_unit *lu;
-       struct Scsi_Host *shost;
-       u32 model, firmware_revision;
-
-       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
-               BUG_ON(dma_set_max_seg_size(device->card->device,
-                                           SBP2_MAX_SEG_SIZE));
-
-       shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
-       if (shost == NULL)
-               return -ENOMEM;
-
-       tgt = (struct sbp2_target *)shost->hostdata;
-       unit->device.driver_data = tgt;
-       tgt->unit = unit;
-       kref_init(&tgt->kref);
-       INIT_LIST_HEAD(&tgt->lu_list);
-       tgt->bus_id = dev_name(&unit->device);
-       tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
-
-       if (fw_device_enable_phys_dma(device) < 0)
-               goto fail_shost_put;
-
-       if (scsi_add_host(shost, &unit->device) < 0)
-               goto fail_shost_put;
-
-       fw_device_get(device);
-       fw_unit_get(unit);
-
-       /* implicit directory ID */
-       tgt->directory_id = ((unit->directory - device->config_rom) * 4
-                            + CSR_CONFIG_ROM) & 0xffffff;
-
-       firmware_revision = SBP2_ROM_VALUE_MISSING;
-       model             = SBP2_ROM_VALUE_MISSING;
-
-       if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
-                              &firmware_revision) < 0)
-               goto fail_tgt_put;
-
-       sbp2_init_workarounds(tgt, model, firmware_revision);
-
-       /*
-        * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
-        * and so on up to 4096 bytes.  The SBP-2 max_payload field
-        * specifies the max payload size as 2 ^ (max_payload + 2), so
-        * if we set this to max_speed + 7, we get the right value.
-        */
-       tgt->max_payload = min(device->max_speed + 7, 10U);
-       tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
-
-       /* Do the login in a workqueue so we can easily reschedule retries. */
-       list_for_each_entry(lu, &tgt->lu_list, link)
-               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-       return 0;
-
- fail_tgt_put:
-       sbp2_target_put(tgt);
-       return -ENOMEM;
-
- fail_shost_put:
-       scsi_host_put(shost);
-       return -ENOMEM;
-}
-
-static int sbp2_remove(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct sbp2_target *tgt = unit->device.driver_data;
-
-       sbp2_target_put(tgt);
-       return 0;
-}
-
-static void sbp2_reconnect(struct work_struct *work)
-{
-       struct sbp2_logical_unit *lu =
-               container_of(work, struct sbp2_logical_unit, work.work);
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-       int generation, node_id, local_node_id;
-
-       if (fw_device_is_shutdown(device))
-               goto out;
-
-       generation    = device->generation;
-       smp_rmb();    /* node IDs must not be older than generation */
-       node_id       = device->node_id;
-       local_node_id = device->card->node_id;
-
-       if (sbp2_send_management_orb(lu, node_id, generation,
-                                    SBP2_RECONNECT_REQUEST,
-                                    lu->login_id, NULL) < 0) {
-               /*
-                * If reconnect was impossible even though we are in the
-                * current generation, fall back and try to log in again.
-                *
-                * We could check for "Function rejected" status, but
-                * looking at the bus generation as simpler and more general.
-                */
-               smp_rmb(); /* get current card generation */
-               if (generation == device->card->generation ||
-                   lu->retries++ >= 5) {
-                       fw_error("%s: failed to reconnect\n", tgt->bus_id);
-                       lu->retries = 0;
-                       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
-               }
-               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-               goto out;
-       }
-
-       tgt->node_id      = node_id;
-       tgt->address_high = local_node_id << 16;
-       smp_wmb();        /* node IDs must not be older than generation */
-       lu->generation    = generation;
-
-       fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
-                 tgt->bus_id, lu->lun, lu->retries);
-
-       sbp2_agent_reset(lu);
-       sbp2_cancel_orbs(lu);
-       sbp2_conditionally_unblock(lu);
- out:
-       sbp2_target_put(tgt);
-}
-
-static void sbp2_update(struct fw_unit *unit)
-{
-       struct sbp2_target *tgt = unit->device.driver_data;
-       struct sbp2_logical_unit *lu;
-
-       fw_device_enable_phys_dma(fw_device(unit->device.parent));
-
-       /*
-        * Fw-core serializes sbp2_update() against sbp2_remove().
-        * Iteration over tgt->lu_list is therefore safe here.
-        */
-       list_for_each_entry(lu, &tgt->lu_list, link) {
-               sbp2_conditionally_block(lu);
-               lu->retries = 0;
-               sbp2_queue_work(lu, 0);
-       }
-}
-
-#define SBP2_UNIT_SPEC_ID_ENTRY        0x0000609e
-#define SBP2_SW_VERSION_ENTRY  0x00010483
-
-static const struct ieee1394_device_id sbp2_id_table[] = {
-       {
-               .match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
-                               IEEE1394_MATCH_VERSION,
-               .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
-               .version      = SBP2_SW_VERSION_ENTRY,
-       },
-       { }
-};
-
-static struct fw_driver sbp2_driver = {
-       .driver   = {
-               .owner  = THIS_MODULE,
-               .name   = sbp2_driver_name,
-               .bus    = &fw_bus_type,
-               .probe  = sbp2_probe,
-               .remove = sbp2_remove,
-       },
-       .update   = sbp2_update,
-       .id_table = sbp2_id_table,
-};
-
-static void sbp2_unmap_scatterlist(struct device *card_device,
-                                  struct sbp2_command_orb *orb)
-{
-       if (scsi_sg_count(orb->cmd))
-               dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
-                            scsi_sg_count(orb->cmd),
-                            orb->cmd->sc_data_direction);
-
-       if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
-               dma_unmap_single(card_device, orb->page_table_bus,
-                                sizeof(orb->page_table), DMA_TO_DEVICE);
-}
-
-static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
-{
-       int sam_status;
-
-       sense_data[0] = 0x70;
-       sense_data[1] = 0x0;
-       sense_data[2] = sbp2_status[1];
-       sense_data[3] = sbp2_status[4];
-       sense_data[4] = sbp2_status[5];
-       sense_data[5] = sbp2_status[6];
-       sense_data[6] = sbp2_status[7];
-       sense_data[7] = 10;
-       sense_data[8] = sbp2_status[8];
-       sense_data[9] = sbp2_status[9];
-       sense_data[10] = sbp2_status[10];
-       sense_data[11] = sbp2_status[11];
-       sense_data[12] = sbp2_status[2];
-       sense_data[13] = sbp2_status[3];
-       sense_data[14] = sbp2_status[12];
-       sense_data[15] = sbp2_status[13];
-
-       sam_status = sbp2_status[0] & 0x3f;
-
-       switch (sam_status) {
-       case SAM_STAT_GOOD:
-       case SAM_STAT_CHECK_CONDITION:
-       case SAM_STAT_CONDITION_MET:
-       case SAM_STAT_BUSY:
-       case SAM_STAT_RESERVATION_CONFLICT:
-       case SAM_STAT_COMMAND_TERMINATED:
-               return DID_OK << 16 | sam_status;
-
-       default:
-               return DID_ERROR << 16;
-       }
-}
-
-static void complete_command_orb(struct sbp2_orb *base_orb,
-                                struct sbp2_status *status)
-{
-       struct sbp2_command_orb *orb =
-               container_of(base_orb, struct sbp2_command_orb, base);
-       struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
-       int result;
-
-       if (status != NULL) {
-               if (STATUS_GET_DEAD(*status))
-                       sbp2_agent_reset_no_wait(orb->lu);
-
-               switch (STATUS_GET_RESPONSE(*status)) {
-               case SBP2_STATUS_REQUEST_COMPLETE:
-                       result = DID_OK << 16;
-                       break;
-               case SBP2_STATUS_TRANSPORT_FAILURE:
-                       result = DID_BUS_BUSY << 16;
-                       break;
-               case SBP2_STATUS_ILLEGAL_REQUEST:
-               case SBP2_STATUS_VENDOR_DEPENDENT:
-               default:
-                       result = DID_ERROR << 16;
-                       break;
-               }
-
-               if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
-                       result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
-                                                          orb->cmd->sense_buffer);
-       } else {
-               /*
-                * If the orb completes with status == NULL, something
-                * went wrong, typically a bus reset happened mid-orb
-                * or when sending the write (less likely).
-                */
-               result = DID_BUS_BUSY << 16;
-               sbp2_conditionally_block(orb->lu);
-       }
-
-       dma_unmap_single(device->card->device, orb->base.request_bus,
-                        sizeof(orb->request), DMA_TO_DEVICE);
-       sbp2_unmap_scatterlist(device->card->device, orb);
-
-       orb->cmd->result = result;
-       orb->done(orb->cmd);
-}
-
-static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
-               struct fw_device *device, struct sbp2_logical_unit *lu)
-{
-       struct scatterlist *sg = scsi_sglist(orb->cmd);
-       int i, n;
-
-       n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
-                      orb->cmd->sc_data_direction);
-       if (n == 0)
-               goto fail;
-
-       /*
-        * Handle the special case where there is only one element in
-        * the scatter list by converting it to an immediate block
-        * request. This is also a workaround for broken devices such
-        * as the second generation iPod which doesn't support page
-        * tables.
-        */
-       if (n == 1) {
-               orb->request.data_descriptor.high =
-                       cpu_to_be32(lu->tgt->address_high);
-               orb->request.data_descriptor.low  =
-                       cpu_to_be32(sg_dma_address(sg));
-               orb->request.misc |=
-                       cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
-               return 0;
-       }
-
-       for_each_sg(sg, sg, n, i) {
-               orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
-               orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
-       }
-
-       orb->page_table_bus =
-               dma_map_single(device->card->device, orb->page_table,
-                              sizeof(orb->page_table), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->page_table_bus))
-               goto fail_page_table;
-
-       /*
-        * The data_descriptor pointer is the one case where we need
-        * to fill in the node ID part of the address.  All other
-        * pointers assume that the data referenced reside on the
-        * initiator (i.e. us), but data_descriptor can refer to data
-        * on other nodes so we need to put our ID in descriptor.high.
-        */
-       orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
-       orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
-       orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
-                                        COMMAND_ORB_DATA_SIZE(n));
-
-       return 0;
-
- fail_page_table:
-       dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
-                    scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
- fail:
-       return -ENOMEM;
-}
-
-/* SCSI stack integration */
-
-static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
-{
-       struct sbp2_logical_unit *lu = cmd->device->hostdata;
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_command_orb *orb;
-       int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
-
-       /*
-        * Bidirectional commands are not yet implemented, and unknown
-        * transfer direction not handled.
-        */
-       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
-               fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
-               cmd->result = DID_ERROR << 16;
-               done(cmd);
-               return 0;
-       }
-
-       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
-       if (orb == NULL) {
-               fw_notify("failed to alloc orb\n");
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-       /* Initialize rcode to something not RCODE_COMPLETE. */
-       orb->base.rcode = -1;
-       kref_init(&orb->base.kref);
-
-       orb->lu   = lu;
-       orb->done = done;
-       orb->cmd  = cmd;
-
-       orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
-       orb->request.misc = cpu_to_be32(
-               COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
-               COMMAND_ORB_SPEED(device->max_speed) |
-               COMMAND_ORB_NOTIFY);
-
-       if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-               orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
-
-       generation = device->generation;
-       smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */
-
-       if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
-               goto out;
-
-       memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
-
-       orb->base.callback = complete_command_orb;
-       orb->base.request_bus =
-               dma_map_single(device->card->device, &orb->request,
-                              sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
-               sbp2_unmap_scatterlist(device->card->device, orb);
-               goto out;
-       }
-
-       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
-                     lu->command_block_agent_address + SBP2_ORB_POINTER);
-       retval = 0;
- out:
-       kref_put(&orb->base.kref, free_orb);
-       return retval;
-}
-
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
-{
-       struct sbp2_logical_unit *lu = sdev->hostdata;
-
-       /* (Re-)Adding logical units via the SCSI stack is not supported. */
-       if (!lu)
-               return -ENOSYS;
-
-       sdev->allow_restart = 1;
-
-       /* SBP-2 requires quadlet alignment of the data buffers. */
-       blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
-               sdev->inquiry_len = 36;
-
-       return 0;
-}
-
-static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
-{
-       struct sbp2_logical_unit *lu = sdev->hostdata;
-
-       sdev->use_10_for_rw = 1;
-
-       if (sbp2_param_exclusive_login)
-               sdev->manage_start_stop = 1;
-
-       if (sdev->type == TYPE_ROM)
-               sdev->use_10_for_ms = 1;
-
-       if (sdev->type == TYPE_DISK &&
-           lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
-               sdev->skip_ms_page_8 = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
-               sdev->fix_capacity = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
-               sdev->start_stop_pwr_cond = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
-               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
-
-       blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
-
-       return 0;
-}
-
-/*
- * Called by scsi stack when something has really gone wrong.  Usually
- * called when a command has timed-out for some reason.
- */
-static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
-{
-       struct sbp2_logical_unit *lu = cmd->device->hostdata;
-
-       fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
-       sbp2_agent_reset(lu);
-       sbp2_cancel_orbs(lu);
-
-       return SUCCESS;
-}
-
-/*
- * Format of /sys/bus/scsi/devices/.../ieee1394_id:
- * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
- *
- * This is the concatenation of target port identifier and logical unit
- * identifier as per SAM-2...SAM-4 annex A.
- */
-static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       struct scsi_device *sdev = to_scsi_device(dev);
-       struct sbp2_logical_unit *lu;
-
-       if (!sdev)
-               return 0;
-
-       lu = sdev->hostdata;
-
-       return sprintf(buf, "%016llx:%06x:%04x\n",
-                       (unsigned long long)lu->tgt->guid,
-                       lu->tgt->directory_id, lu->lun);
-}
-
-static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
-
-static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
-       &dev_attr_ieee1394_id,
-       NULL
-};
-
-static struct scsi_host_template scsi_driver_template = {
-       .module                 = THIS_MODULE,
-       .name                   = "SBP-2 IEEE-1394",
-       .proc_name              = sbp2_driver_name,
-       .queuecommand           = sbp2_scsi_queuecommand,
-       .slave_alloc            = sbp2_scsi_slave_alloc,
-       .slave_configure        = sbp2_scsi_slave_configure,
-       .eh_abort_handler       = sbp2_scsi_abort,
-       .this_id                = -1,
-       .sg_tablesize           = SG_ALL,
-       .use_clustering         = ENABLE_CLUSTERING,
-       .cmd_per_lun            = 1,
-       .can_queue              = 1,
-       .sdev_attrs             = sbp2_scsi_sysfs_attrs,
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("SCSI over IEEE1394");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
-
-/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_SBP2_MODULE
-MODULE_ALIAS("sbp2");
-#endif
-
-static int __init sbp2_init(void)
-{
-       sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
-       if (!sbp2_wq)
-               return -ENOMEM;
-
-       return driver_register(&sbp2_driver.driver);
-}
-
-static void __exit sbp2_cleanup(void)
-{
-       driver_unregister(&sbp2_driver.driver);
-       destroy_workqueue(sbp2_wq);
-}
-
-module_init(sbp2_init);
-module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
deleted file mode 100644 (file)
index fddf2b3..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Incremental bus scan, based on bus topology
- *
- * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/bug.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-#include "core.h"
-
-#define SELF_ID_PHY_ID(q)              (((q) >> 24) & 0x3f)
-#define SELF_ID_EXTENDED(q)            (((q) >> 23) & 0x01)
-#define SELF_ID_LINK_ON(q)             (((q) >> 22) & 0x01)
-#define SELF_ID_GAP_COUNT(q)           (((q) >> 16) & 0x3f)
-#define SELF_ID_PHY_SPEED(q)           (((q) >> 14) & 0x03)
-#define SELF_ID_CONTENDER(q)           (((q) >> 11) & 0x01)
-#define SELF_ID_PHY_INITIATOR(q)       (((q) >>  1) & 0x01)
-#define SELF_ID_MORE_PACKETS(q)                (((q) >>  0) & 0x01)
-
-#define SELF_ID_EXT_SEQUENCE(q)                (((q) >> 20) & 0x07)
-
-#define SELFID_PORT_CHILD      0x3
-#define SELFID_PORT_PARENT     0x2
-#define SELFID_PORT_NCONN      0x1
-#define SELFID_PORT_NONE       0x0
-
-static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
-{
-       u32 q;
-       int port_type, shift, seq;
-
-       *total_port_count = 0;
-       *child_port_count = 0;
-
-       shift = 6;
-       q = *sid;
-       seq = 0;
-
-       while (1) {
-               port_type = (q >> shift) & 0x03;
-               switch (port_type) {
-               case SELFID_PORT_CHILD:
-                       (*child_port_count)++;
-               case SELFID_PORT_PARENT:
-               case SELFID_PORT_NCONN:
-                       (*total_port_count)++;
-               case SELFID_PORT_NONE:
-                       break;
-               }
-
-               shift -= 2;
-               if (shift == 0) {
-                       if (!SELF_ID_MORE_PACKETS(q))
-                               return sid + 1;
-
-                       shift = 16;
-                       sid++;
-                       q = *sid;
-
-                       /*
-                        * Check that the extra packets actually are
-                        * extended self ID packets and that the
-                        * sequence numbers in the extended self ID
-                        * packets increase as expected.
-                        */
-
-                       if (!SELF_ID_EXTENDED(q) ||
-                           seq != SELF_ID_EXT_SEQUENCE(q))
-                               return NULL;
-
-                       seq++;
-               }
-       }
-}
-
-static int get_port_type(u32 *sid, int port_index)
-{
-       int index, shift;
-
-       index = (port_index + 5) / 8;
-       shift = 16 - ((port_index + 5) & 7) * 2;
-       return (sid[index] >> shift) & 0x03;
-}
-
-static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
-{
-       struct fw_node *node;
-
-       node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
-                      GFP_ATOMIC);
-       if (node == NULL)
-               return NULL;
-
-       node->color = color;
-       node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
-       node->link_on = SELF_ID_LINK_ON(sid);
-       node->phy_speed = SELF_ID_PHY_SPEED(sid);
-       node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
-       node->port_count = port_count;
-
-       atomic_set(&node->ref_count, 1);
-       INIT_LIST_HEAD(&node->link);
-
-       return node;
-}
-
-/*
- * Compute the maximum hop count for this node and it's children.  The
- * maximum hop count is the maximum number of connections between any
- * two nodes in the subtree rooted at this node.  We need this for
- * setting the gap count.  As we build the tree bottom up in
- * build_tree() below, this is fairly easy to do: for each node we
- * maintain the max hop count and the max depth, ie the number of hops
- * to the furthest leaf.  Computing the max hop count breaks down into
- * two cases: either the path goes through this node, in which case
- * the hop count is the sum of the two biggest child depths plus 2.
- * Or it could be the case that the max hop path is entirely
- * containted in a child tree, in which case the max hop count is just
- * the max hop count of this child.
- */
-static void update_hop_count(struct fw_node *node)
-{
-       int depths[2] = { -1, -1 };
-       int max_child_hops = 0;
-       int i;
-
-       for (i = 0; i < node->port_count; i++) {
-               if (node->ports[i] == NULL)
-                       continue;
-
-               if (node->ports[i]->max_hops > max_child_hops)
-                       max_child_hops = node->ports[i]->max_hops;
-
-               if (node->ports[i]->max_depth > depths[0]) {
-                       depths[1] = depths[0];
-                       depths[0] = node->ports[i]->max_depth;
-               } else if (node->ports[i]->max_depth > depths[1])
-                       depths[1] = node->ports[i]->max_depth;
-       }
-
-       node->max_depth = depths[0] + 1;
-       node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
-}
-
-static inline struct fw_node *fw_node(struct list_head *l)
-{
-       return list_entry(l, struct fw_node, link);
-}
-
-/**
- * build_tree - Build the tree representation of the topology
- * @self_ids: array of self IDs to create the tree from
- * @self_id_count: the length of the self_ids array
- * @local_id: the node ID of the local node
- *
- * This function builds the tree representation of the topology given
- * by the self IDs from the latest bus reset.  During the construction
- * of the tree, the function checks that the self IDs are valid and
- * internally consistent.  On succcess this function returns the
- * fw_node corresponding to the local card otherwise NULL.
- */
-static struct fw_node *build_tree(struct fw_card *card,
-                                 u32 *sid, int self_id_count)
-{
-       struct fw_node *node, *child, *local_node, *irm_node;
-       struct list_head stack, *h;
-       u32 *next_sid, *end, q;
-       int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
-       int gap_count;
-       bool beta_repeaters_present;
-
-       local_node = NULL;
-       node = NULL;
-       INIT_LIST_HEAD(&stack);
-       stack_depth = 0;
-       end = sid + self_id_count;
-       phy_id = 0;
-       irm_node = NULL;
-       gap_count = SELF_ID_GAP_COUNT(*sid);
-       beta_repeaters_present = false;
-
-       while (sid < end) {
-               next_sid = count_ports(sid, &port_count, &child_port_count);
-
-               if (next_sid == NULL) {
-                       fw_error("Inconsistent extended self IDs.\n");
-                       return NULL;
-               }
-
-               q = *sid;
-               if (phy_id != SELF_ID_PHY_ID(q)) {
-                       fw_error("PHY ID mismatch in self ID: %d != %d.\n",
-                                phy_id, SELF_ID_PHY_ID(q));
-                       return NULL;
-               }
-
-               if (child_port_count > stack_depth) {
-                       fw_error("Topology stack underflow\n");
-                       return NULL;
-               }
-
-               /*
-                * Seek back from the top of our stack to find the
-                * start of the child nodes for this node.
-                */
-               for (i = 0, h = &stack; i < child_port_count; i++)
-                       h = h->prev;
-               /*
-                * When the stack is empty, this yields an invalid value,
-                * but that pointer will never be dereferenced.
-                */
-               child = fw_node(h);
-
-               node = fw_node_create(q, port_count, card->color);
-               if (node == NULL) {
-                       fw_error("Out of memory while building topology.\n");
-                       return NULL;
-               }
-
-               if (phy_id == (card->node_id & 0x3f))
-                       local_node = node;
-
-               if (SELF_ID_CONTENDER(q))
-                       irm_node = node;
-
-               parent_count = 0;
-
-               for (i = 0; i < port_count; i++) {
-                       switch (get_port_type(sid, i)) {
-                       case SELFID_PORT_PARENT:
-                               /*
-                                * Who's your daddy?  We dont know the
-                                * parent node at this time, so we
-                                * temporarily abuse node->color for
-                                * remembering the entry in the
-                                * node->ports array where the parent
-                                * node should be.  Later, when we
-                                * handle the parent node, we fix up
-                                * the reference.
-                                */
-                               parent_count++;
-                               node->color = i;
-                               break;
-
-                       case SELFID_PORT_CHILD:
-                               node->ports[i] = child;
-                               /*
-                                * Fix up parent reference for this
-                                * child node.
-                                */
-                               child->ports[child->color] = node;
-                               child->color = card->color;
-                               child = fw_node(child->link.next);
-                               break;
-                       }
-               }
-
-               /*
-                * Check that the node reports exactly one parent
-                * port, except for the root, which of course should
-                * have no parents.
-                */
-               if ((next_sid == end && parent_count != 0) ||
-                   (next_sid < end && parent_count != 1)) {
-                       fw_error("Parent port inconsistency for node %d: "
-                                "parent_count=%d\n", phy_id, parent_count);
-                       return NULL;
-               }
-
-               /* Pop the child nodes off the stack and push the new node. */
-               __list_del(h->prev, &stack);
-               list_add_tail(&node->link, &stack);
-               stack_depth += 1 - child_port_count;
-
-               if (node->phy_speed == SCODE_BETA &&
-                   parent_count + child_port_count > 1)
-                       beta_repeaters_present = true;
-
-               /*
-                * If PHYs report different gap counts, set an invalid count
-                * which will force a gap count reconfiguration and a reset.
-                */
-               if (SELF_ID_GAP_COUNT(q) != gap_count)
-                       gap_count = 0;
-
-               update_hop_count(node);
-
-               sid = next_sid;
-               phy_id++;
-       }
-
-       card->root_node = node;
-       card->irm_node = irm_node;
-       card->gap_count = gap_count;
-       card->beta_repeaters_present = beta_repeaters_present;
-
-       return local_node;
-}
-
-typedef void (*fw_node_callback_t)(struct fw_card * card,
-                                  struct fw_node * node,
-                                  struct fw_node * parent);
-
-static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
-                            fw_node_callback_t callback)
-{
-       struct list_head list;
-       struct fw_node *node, *next, *child, *parent;
-       int i;
-
-       INIT_LIST_HEAD(&list);
-
-       fw_node_get(root);
-       list_add_tail(&root->link, &list);
-       parent = NULL;
-       list_for_each_entry(node, &list, link) {
-               node->color = card->color;
-
-               for (i = 0; i < node->port_count; i++) {
-                       child = node->ports[i];
-                       if (!child)
-                               continue;
-                       if (child->color == card->color)
-                               parent = child;
-                       else {
-                               fw_node_get(child);
-                               list_add_tail(&child->link, &list);
-                       }
-               }
-
-               callback(card, node, parent);
-       }
-
-       list_for_each_entry_safe(node, next, &list, link)
-               fw_node_put(node);
-}
-
-static void report_lost_node(struct fw_card *card,
-                            struct fw_node *node, struct fw_node *parent)
-{
-       fw_node_event(card, node, FW_NODE_DESTROYED);
-       fw_node_put(node);
-
-       /* Topology has changed - reset bus manager retry counter */
-       card->bm_retries = 0;
-}
-
-static void report_found_node(struct fw_card *card,
-                             struct fw_node *node, struct fw_node *parent)
-{
-       int b_path = (node->phy_speed == SCODE_BETA);
-
-       if (parent != NULL) {
-               /* min() macro doesn't work here with gcc 3.4 */
-               node->max_speed = parent->max_speed < node->phy_speed ?
-                                       parent->max_speed : node->phy_speed;
-               node->b_path = parent->b_path && b_path;
-       } else {
-               node->max_speed = node->phy_speed;
-               node->b_path = b_path;
-       }
-
-       fw_node_event(card, node, FW_NODE_CREATED);
-
-       /* Topology has changed - reset bus manager retry counter */
-       card->bm_retries = 0;
-}
-
-void fw_destroy_nodes(struct fw_card *card)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       card->color++;
-       if (card->local_node != NULL)
-               for_each_fw_node(card, card->local_node, report_lost_node);
-       card->local_node = NULL;
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
-{
-       struct fw_node *tree;
-       int i;
-
-       tree = node1->ports[port];
-       node0->ports[port] = tree;
-       for (i = 0; i < tree->port_count; i++) {
-               if (tree->ports[i] == node1) {
-                       tree->ports[i] = node0;
-                       break;
-               }
-       }
-}
-
-/**
- * update_tree - compare the old topology tree for card with the new
- * one specified by root.  Queue the nodes and mark them as either
- * found, lost or updated.  Update the nodes in the card topology tree
- * as we go.
- */
-static void update_tree(struct fw_card *card, struct fw_node *root)
-{
-       struct list_head list0, list1;
-       struct fw_node *node0, *node1, *next1;
-       int i, event;
-
-       INIT_LIST_HEAD(&list0);
-       list_add_tail(&card->local_node->link, &list0);
-       INIT_LIST_HEAD(&list1);
-       list_add_tail(&root->link, &list1);
-
-       node0 = fw_node(list0.next);
-       node1 = fw_node(list1.next);
-
-       while (&node0->link != &list0) {
-               WARN_ON(node0->port_count != node1->port_count);
-
-               if (node0->link_on && !node1->link_on)
-                       event = FW_NODE_LINK_OFF;
-               else if (!node0->link_on && node1->link_on)
-                       event = FW_NODE_LINK_ON;
-               else if (node1->initiated_reset && node1->link_on)
-                       event = FW_NODE_INITIATED_RESET;
-               else
-                       event = FW_NODE_UPDATED;
-
-               node0->node_id = node1->node_id;
-               node0->color = card->color;
-               node0->link_on = node1->link_on;
-               node0->initiated_reset = node1->initiated_reset;
-               node0->max_hops = node1->max_hops;
-               node1->color = card->color;
-               fw_node_event(card, node0, event);
-
-               if (card->root_node == node1)
-                       card->root_node = node0;
-               if (card->irm_node == node1)
-                       card->irm_node = node0;
-
-               for (i = 0; i < node0->port_count; i++) {
-                       if (node0->ports[i] && node1->ports[i]) {
-                               /*
-                                * This port didn't change, queue the
-                                * connected node for further
-                                * investigation.
-                                */
-                               if (node0->ports[i]->color == card->color)
-                                       continue;
-                               list_add_tail(&node0->ports[i]->link, &list0);
-                               list_add_tail(&node1->ports[i]->link, &list1);
-                       } else if (node0->ports[i]) {
-                               /*
-                                * The nodes connected here were
-                                * unplugged; unref the lost nodes and
-                                * queue FW_NODE_LOST callbacks for
-                                * them.
-                                */
-
-                               for_each_fw_node(card, node0->ports[i],
-                                                report_lost_node);
-                               node0->ports[i] = NULL;
-                       } else if (node1->ports[i]) {
-                               /*
-                                * One or more node were connected to
-                                * this port. Move the new nodes into
-                                * the tree and queue FW_NODE_CREATED
-                                * callbacks for them.
-                                */
-                               move_tree(node0, node1, i);
-                               for_each_fw_node(card, node0->ports[i],
-                                                report_found_node);
-                       }
-               }
-
-               node0 = fw_node(node0->link.next);
-               next1 = fw_node(node1->link.next);
-               fw_node_put(node1);
-               node1 = next1;
-       }
-}
-
-static void update_topology_map(struct fw_card *card,
-                               u32 *self_ids, int self_id_count)
-{
-       int node_count;
-
-       card->topology_map[1]++;
-       node_count = (card->root_node->node_id & 0x3f) + 1;
-       card->topology_map[2] = (node_count << 16) | self_id_count;
-       card->topology_map[0] = (self_id_count + 2) << 16;
-       memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
-       fw_compute_block_crc(card->topology_map);
-}
-
-void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
-                             int self_id_count, u32 *self_ids)
-{
-       struct fw_node *local_node;
-       unsigned long flags;
-
-       /*
-        * If the selfID buffer is not the immediate successor of the
-        * previously processed one, we cannot reliably compare the
-        * old and new topologies.
-        */
-       if (!is_next_generation(generation, card->generation) &&
-           card->local_node != NULL) {
-               fw_notify("skipped bus generations, destroying all nodes\n");
-               fw_destroy_nodes(card);
-               card->bm_retries = 0;
-       }
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       card->broadcast_channel_allocated = false;
-       card->node_id = node_id;
-       /*
-        * Update node_id before generation to prevent anybody from using
-        * a stale node_id together with a current generation.
-        */
-       smp_wmb();
-       card->generation = generation;
-       card->reset_jiffies = jiffies;
-       fw_schedule_bm_work(card, 0);
-
-       local_node = build_tree(card, self_ids, self_id_count);
-
-       update_topology_map(card, self_ids, self_id_count);
-
-       card->color++;
-
-       if (local_node == NULL) {
-               fw_error("topology build failed\n");
-               /* FIXME: We need to issue a bus reset in this case. */
-       } else if (card->local_node == NULL) {
-               card->local_node = local_node;
-               for_each_fw_node(card, local_node, report_found_node);
-       } else {
-               update_tree(card, local_node);
-       }
-
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
deleted file mode 100644 (file)
index 9a6ce9a..0000000
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Core IEEE1394 transaction logic
- *
- * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/bug.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-
-#include <asm/byteorder.h>
-
-#include "core.h"
-
-#define HEADER_PRI(pri)                        ((pri) << 0)
-#define HEADER_TCODE(tcode)            ((tcode) << 4)
-#define HEADER_RETRY(retry)            ((retry) << 8)
-#define HEADER_TLABEL(tlabel)          ((tlabel) << 10)
-#define HEADER_DESTINATION(destination)        ((destination) << 16)
-#define HEADER_SOURCE(source)          ((source) << 16)
-#define HEADER_RCODE(rcode)            ((rcode) << 12)
-#define HEADER_OFFSET_HIGH(offset_high)        ((offset_high) << 0)
-#define HEADER_DATA_LENGTH(length)     ((length) << 16)
-#define HEADER_EXTENDED_TCODE(tcode)   ((tcode) << 0)
-
-#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
-#define HEADER_GET_TLABEL(q)           (((q) >> 10) & 0x3f)
-#define HEADER_GET_RCODE(q)            (((q) >> 12) & 0x0f)
-#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_SOURCE(q)           (((q) >> 16) & 0xffff)
-#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
-
-#define HEADER_DESTINATION_IS_BROADCAST(q) \
-       (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
-
-#define PHY_PACKET_CONFIG      0x0
-#define PHY_PACKET_LINK_ON     0x1
-#define PHY_PACKET_SELF_ID     0x2
-
-#define PHY_CONFIG_GAP_COUNT(gap_count)        (((gap_count) << 16) | (1 << 22))
-#define PHY_CONFIG_ROOT_ID(node_id)    ((((node_id) & 0x3f) << 24) | (1 << 23))
-#define PHY_IDENTIFIER(id)             ((id) << 30)
-
-static int close_transaction(struct fw_transaction *transaction,
-                            struct fw_card *card, int rcode)
-{
-       struct fw_transaction *t;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(t, &card->transaction_list, link) {
-               if (t == transaction) {
-                       list_del(&t->link);
-                       card->tlabel_mask &= ~(1 << t->tlabel);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&t->link != &card->transaction_list) {
-               t->callback(card, rcode, NULL, 0, t->callback_data);
-               return 0;
-       }
-
-       return -ENOENT;
-}
-
-/*
- * Only valid for transactions that are potentially pending (ie have
- * been sent).
- */
-int fw_cancel_transaction(struct fw_card *card,
-                         struct fw_transaction *transaction)
-{
-       /*
-        * Cancel the packet transmission if it's still queued.  That
-        * will call the packet transmission callback which cancels
-        * the transaction.
-        */
-
-       if (card->driver->cancel_packet(card, &transaction->packet) == 0)
-               return 0;
-
-       /*
-        * If the request packet has already been sent, we need to see
-        * if the transaction is still pending and remove it in that case.
-        */
-
-       return close_transaction(transaction, card, RCODE_CANCELLED);
-}
-EXPORT_SYMBOL(fw_cancel_transaction);
-
-static void transmit_complete_callback(struct fw_packet *packet,
-                                      struct fw_card *card, int status)
-{
-       struct fw_transaction *t =
-           container_of(packet, struct fw_transaction, packet);
-
-       switch (status) {
-       case ACK_COMPLETE:
-               close_transaction(t, card, RCODE_COMPLETE);
-               break;
-       case ACK_PENDING:
-               t->timestamp = packet->timestamp;
-               break;
-       case ACK_BUSY_X:
-       case ACK_BUSY_A:
-       case ACK_BUSY_B:
-               close_transaction(t, card, RCODE_BUSY);
-               break;
-       case ACK_DATA_ERROR:
-               close_transaction(t, card, RCODE_DATA_ERROR);
-               break;
-       case ACK_TYPE_ERROR:
-               close_transaction(t, card, RCODE_TYPE_ERROR);
-               break;
-       default:
-               /*
-                * In this case the ack is really a juju specific
-                * rcode, so just forward that to the callback.
-                */
-               close_transaction(t, card, status);
-               break;
-       }
-}
-
-static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
-               int destination_id, int source_id, int generation, int speed,
-               unsigned long long offset, void *payload, size_t length)
-{
-       int ext_tcode;
-
-       if (tcode == TCODE_STREAM_DATA) {
-               packet->header[0] =
-                       HEADER_DATA_LENGTH(length) |
-                       destination_id |
-                       HEADER_TCODE(TCODE_STREAM_DATA);
-               packet->header_length = 4;
-               packet->payload = payload;
-               packet->payload_length = length;
-
-               goto common;
-       }
-
-       if (tcode > 0x10) {
-               ext_tcode = tcode & ~0x10;
-               tcode = TCODE_LOCK_REQUEST;
-       } else
-               ext_tcode = 0;
-
-       packet->header[0] =
-               HEADER_RETRY(RETRY_X) |
-               HEADER_TLABEL(tlabel) |
-               HEADER_TCODE(tcode) |
-               HEADER_DESTINATION(destination_id);
-       packet->header[1] =
-               HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
-       packet->header[2] =
-               offset;
-
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-               packet->header[3] = *(u32 *)payload;
-               packet->header_length = 16;
-               packet->payload_length = 0;
-               break;
-
-       case TCODE_LOCK_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               packet->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(ext_tcode);
-               packet->header_length = 16;
-               packet->payload = payload;
-               packet->payload_length = length;
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               packet->header_length = 12;
-               packet->payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-               packet->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(ext_tcode);
-               packet->header_length = 16;
-               packet->payload_length = 0;
-               break;
-       }
- common:
-       packet->speed = speed;
-       packet->generation = generation;
-       packet->ack = 0;
-       packet->payload_bus = 0;
-}
-
-/**
- * This function provides low-level access to the IEEE1394 transaction
- * logic.  Most C programs would use either fw_read(), fw_write() or
- * fw_lock() instead - those function are convenience wrappers for
- * this function.  The fw_send_request() function is primarily
- * provided as a flexible, one-stop entry point for languages bindings
- * and protocol bindings.
- *
- * FIXME: Document this function further, in particular the possible
- * values for rcode in the callback.  In short, we map ACK_COMPLETE to
- * RCODE_COMPLETE, internal errors set errno and set rcode to
- * RCODE_SEND_ERROR (which is out of range for standard ieee1394
- * rcodes).  All other rcodes are forwarded unchanged.  For all
- * errors, payload is NULL, length is 0.
- *
- * Can not expect the callback to be called before the function
- * returns, though this does happen in some cases (ACK_COMPLETE and
- * errors).
- *
- * The payload is only used for write requests and must not be freed
- * until the callback has been called.
- *
- * @param card the card from which to send the request
- * @param tcode the tcode for this transaction.  Do not use
- *   TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
- *   etc. to specify tcode and ext_tcode.
- * @param node_id the destination node ID (bus ID and PHY ID concatenated)
- * @param generation the generation for which node_id is valid
- * @param speed the speed to use for sending the request
- * @param offset the 48 bit offset on the destination node
- * @param payload the data payload for the request subaction
- * @param length the length in bytes of the data to read
- * @param callback function to be called when the transaction is completed
- * @param callback_data pointer to arbitrary data, which will be
- *   passed to the callback
- *
- * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
- * needs to synthesize @destination_id with fw_stream_packet_destination_id().
- */
-void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
-                    int destination_id, int generation, int speed,
-                    unsigned long long offset, void *payload, size_t length,
-                    fw_transaction_callback_t callback, void *callback_data)
-{
-       unsigned long flags;
-       int tlabel;
-
-       /*
-        * Bump the flush timer up 100ms first of all so we
-        * don't race with a flush timer callback.
-        */
-
-       mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
-
-       /*
-        * Allocate tlabel from the bitmap and put the transaction on
-        * the list while holding the card spinlock.
-        */
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       tlabel = card->current_tlabel;
-       if (card->tlabel_mask & (1 << tlabel)) {
-               spin_unlock_irqrestore(&card->lock, flags);
-               callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
-               return;
-       }
-
-       card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
-       card->tlabel_mask |= (1 << tlabel);
-
-       t->node_id = destination_id;
-       t->tlabel = tlabel;
-       t->callback = callback;
-       t->callback_data = callback_data;
-
-       fw_fill_request(&t->packet, tcode, t->tlabel,
-                       destination_id, card->node_id, generation,
-                       speed, offset, payload, length);
-       t->packet.callback = transmit_complete_callback;
-
-       list_add_tail(&t->link, &card->transaction_list);
-
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       card->driver->send_request(card, &t->packet);
-}
-EXPORT_SYMBOL(fw_send_request);
-
-struct transaction_callback_data {
-       struct completion done;
-       void *payload;
-       int rcode;
-};
-
-static void transaction_callback(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct transaction_callback_data *d = data;
-
-       if (rcode == RCODE_COMPLETE)
-               memcpy(d->payload, payload, length);
-       d->rcode = rcode;
-       complete(&d->done);
-}
-
-/**
- * fw_run_transaction - send request and sleep until transaction is completed
- *
- * Returns the RCODE.
- */
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-                      int generation, int speed, unsigned long long offset,
-                      void *payload, size_t length)
-{
-       struct transaction_callback_data d;
-       struct fw_transaction t;
-
-       init_completion(&d.done);
-       d.payload = payload;
-       fw_send_request(card, &t, tcode, destination_id, generation, speed,
-                       offset, payload, length, transaction_callback, &d);
-       wait_for_completion(&d.done);
-
-       return d.rcode;
-}
-EXPORT_SYMBOL(fw_run_transaction);
-
-static DEFINE_MUTEX(phy_config_mutex);
-static DECLARE_COMPLETION(phy_config_done);
-
-static void transmit_phy_packet_callback(struct fw_packet *packet,
-                                        struct fw_card *card, int status)
-{
-       complete(&phy_config_done);
-}
-
-static struct fw_packet phy_config_packet = {
-       .header_length  = 8,
-       .payload_length = 0,
-       .speed          = SCODE_100,
-       .callback       = transmit_phy_packet_callback,
-};
-
-void fw_send_phy_config(struct fw_card *card,
-                       int node_id, int generation, int gap_count)
-{
-       long timeout = DIV_ROUND_UP(HZ, 10);
-       u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
-                  PHY_CONFIG_ROOT_ID(node_id) |
-                  PHY_CONFIG_GAP_COUNT(gap_count);
-
-       mutex_lock(&phy_config_mutex);
-
-       phy_config_packet.header[0] = data;
-       phy_config_packet.header[1] = ~data;
-       phy_config_packet.generation = generation;
-       INIT_COMPLETION(phy_config_done);
-
-       card->driver->send_request(card, &phy_config_packet);
-       wait_for_completion_timeout(&phy_config_done, timeout);
-
-       mutex_unlock(&phy_config_mutex);
-}
-
-void fw_flush_transactions(struct fw_card *card)
-{
-       struct fw_transaction *t, *next;
-       struct list_head list;
-       unsigned long flags;
-
-       INIT_LIST_HEAD(&list);
-       spin_lock_irqsave(&card->lock, flags);
-       list_splice_init(&card->transaction_list, &list);
-       card->tlabel_mask = 0;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       list_for_each_entry_safe(t, next, &list, link) {
-               card->driver->cancel_packet(card, &t->packet);
-
-               /*
-                * At this point cancel_packet will never call the
-                * transaction callback, since we just took all the
-                * transactions out of the list.  So do it here.
-                */
-               t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
-       }
-}
-
-static struct fw_address_handler *lookup_overlapping_address_handler(
-       struct list_head *list, unsigned long long offset, size_t length)
-{
-       struct fw_address_handler *handler;
-
-       list_for_each_entry(handler, list, link) {
-               if (handler->offset < offset + length &&
-                   offset < handler->offset + handler->length)
-                       return handler;
-       }
-
-       return NULL;
-}
-
-static struct fw_address_handler *lookup_enclosing_address_handler(
-       struct list_head *list, unsigned long long offset, size_t length)
-{
-       struct fw_address_handler *handler;
-
-       list_for_each_entry(handler, list, link) {
-               if (handler->offset <= offset &&
-                   offset + length <= handler->offset + handler->length)
-                       return handler;
-       }
-
-       return NULL;
-}
-
-static DEFINE_SPINLOCK(address_handler_lock);
-static LIST_HEAD(address_handler_list);
-
-const struct fw_address_region fw_high_memory_region =
-       { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
-EXPORT_SYMBOL(fw_high_memory_region);
-
-#if 0
-const struct fw_address_region fw_low_memory_region =
-       { .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
-const struct fw_address_region fw_private_region =
-       { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
-const struct fw_address_region fw_csr_region =
-       { .start = CSR_REGISTER_BASE,
-         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
-const struct fw_address_region fw_unit_space_region =
-       { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
-#endif  /*  0  */
-
-/**
- * fw_core_add_address_handler - register for incoming requests
- * @handler: callback
- * @region: region in the IEEE 1212 node space address range
- *
- * region->start, ->end, and handler->length have to be quadlet-aligned.
- *
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked.  The parameters passed to the callback
- * give the details of the particular request.
- *
- * Return value:  0 on success, non-zero otherwise.
- * The start offset of the handler's address region is determined by
- * fw_core_add_address_handler() and is returned in handler->offset.
- */
-int fw_core_add_address_handler(struct fw_address_handler *handler,
-                               const struct fw_address_region *region)
-{
-       struct fw_address_handler *other;
-       unsigned long flags;
-       int ret = -EBUSY;
-
-       if (region->start & 0xffff000000000003ULL ||
-           region->end   & 0xffff000000000003ULL ||
-           region->start >= region->end ||
-           handler->length & 3 ||
-           handler->length == 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-
-       handler->offset = region->start;
-       while (handler->offset + handler->length <= region->end) {
-               other =
-                   lookup_overlapping_address_handler(&address_handler_list,
-                                                      handler->offset,
-                                                      handler->length);
-               if (other != NULL) {
-                       handler->offset += other->length;
-               } else {
-                       list_add_tail(&handler->link, &address_handler_list);
-                       ret = 0;
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(fw_core_add_address_handler);
-
-/**
- * fw_core_remove_address_handler - unregister an address handler
- */
-void fw_core_remove_address_handler(struct fw_address_handler *handler)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-       list_del(&handler->link);
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-}
-EXPORT_SYMBOL(fw_core_remove_address_handler);
-
-struct fw_request {
-       struct fw_packet response;
-       u32 request_header[4];
-       int ack;
-       u32 length;
-       u32 data[0];
-};
-
-static void free_response_callback(struct fw_packet *packet,
-                                  struct fw_card *card, int status)
-{
-       struct fw_request *request;
-
-       request = container_of(packet, struct fw_request, response);
-       kfree(request);
-}
-
-void fw_fill_response(struct fw_packet *response, u32 *request_header,
-                     int rcode, void *payload, size_t length)
-{
-       int tcode, tlabel, extended_tcode, source, destination;
-
-       tcode          = HEADER_GET_TCODE(request_header[0]);
-       tlabel         = HEADER_GET_TLABEL(request_header[0]);
-       source         = HEADER_GET_DESTINATION(request_header[0]);
-       destination    = HEADER_GET_SOURCE(request_header[1]);
-       extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
-
-       response->header[0] =
-               HEADER_RETRY(RETRY_1) |
-               HEADER_TLABEL(tlabel) |
-               HEADER_DESTINATION(destination);
-       response->header[1] =
-               HEADER_SOURCE(source) |
-               HEADER_RCODE(rcode);
-       response->header[2] = 0;
-
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
-               response->header_length = 12;
-               response->payload_length = 0;
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               response->header[0] |=
-                       HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
-               if (payload != NULL)
-                       response->header[3] = *(u32 *)payload;
-               else
-                       response->header[3] = 0;
-               response->header_length = 16;
-               response->payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-       case TCODE_LOCK_REQUEST:
-               response->header[0] |= HEADER_TCODE(tcode + 2);
-               response->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(extended_tcode);
-               response->header_length = 16;
-               response->payload = payload;
-               response->payload_length = length;
-               break;
-
-       default:
-               BUG();
-               return;
-       }
-
-       response->payload_bus = 0;
-}
-EXPORT_SYMBOL(fw_fill_response);
-
-static struct fw_request *allocate_request(struct fw_packet *p)
-{
-       struct fw_request *request;
-       u32 *data, length;
-       int request_tcode, t;
-
-       request_tcode = HEADER_GET_TCODE(p->header[0]);
-       switch (request_tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-               data = &p->header[3];
-               length = 4;
-               break;
-
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_LOCK_REQUEST:
-               data = p->payload;
-               length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               data = NULL;
-               length = 4;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-               data = NULL;
-               length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       default:
-               fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
-                        p->header[0], p->header[1], p->header[2]);
-               return NULL;
-       }
-
-       request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
-       if (request == NULL)
-               return NULL;
-
-       t = (p->timestamp & 0x1fff) + 4000;
-       if (t >= 8000)
-               t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
-       else
-               t = (p->timestamp & ~0x1fff) + t;
-
-       request->response.speed = p->speed;
-       request->response.timestamp = t;
-       request->response.generation = p->generation;
-       request->response.ack = 0;
-       request->response.callback = free_response_callback;
-       request->ack = p->ack;
-       request->length = length;
-       if (data)
-               memcpy(request->data, data, length);
-
-       memcpy(request->request_header, p->header, sizeof(p->header));
-
-       return request;
-}
-
-void fw_send_response(struct fw_card *card,
-                     struct fw_request *request, int rcode)
-{
-       /* unified transaction or broadcast transaction: don't respond */
-       if (request->ack != ACK_PENDING ||
-           HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
-               kfree(request);
-               return;
-       }
-
-       if (rcode == RCODE_COMPLETE)
-               fw_fill_response(&request->response, request->request_header,
-                                rcode, request->data, request->length);
-       else
-               fw_fill_response(&request->response, request->request_header,
-                                rcode, NULL, 0);
-
-       card->driver->send_response(card, &request->response);
-}
-EXPORT_SYMBOL(fw_send_response);
-
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
-{
-       struct fw_address_handler *handler;
-       struct fw_request *request;
-       unsigned long long offset;
-       unsigned long flags;
-       int tcode, destination, source;
-
-       if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
-               return;
-
-       request = allocate_request(p);
-       if (request == NULL) {
-               /* FIXME: send statically allocated busy packet. */
-               return;
-       }
-
-       offset      =
-               ((unsigned long long)
-                HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
-       tcode       = HEADER_GET_TCODE(p->header[0]);
-       destination = HEADER_GET_DESTINATION(p->header[0]);
-       source      = HEADER_GET_SOURCE(p->header[1]);
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-       handler = lookup_enclosing_address_handler(&address_handler_list,
-                                                  offset, request->length);
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-
-       /*
-        * FIXME: lookup the fw_node corresponding to the sender of
-        * this request and pass that to the address handler instead
-        * of the node ID.  We may also want to move the address
-        * allocations to fw_node so we only do this callback if the
-        * upper layers registered it for this node.
-        */
-
-       if (handler == NULL)
-               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
-       else
-               handler->address_callback(card, request,
-                                         tcode, destination, source,
-                                         p->generation, p->speed, offset,
-                                         request->data, request->length,
-                                         handler->callback_data);
-}
-EXPORT_SYMBOL(fw_core_handle_request);
-
-void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
-{
-       struct fw_transaction *t;
-       unsigned long flags;
-       u32 *data;
-       size_t data_length;
-       int tcode, tlabel, destination, source, rcode;
-
-       tcode       = HEADER_GET_TCODE(p->header[0]);
-       tlabel      = HEADER_GET_TLABEL(p->header[0]);
-       destination = HEADER_GET_DESTINATION(p->header[0]);
-       source      = HEADER_GET_SOURCE(p->header[1]);
-       rcode       = HEADER_GET_RCODE(p->header[1]);
-
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(t, &card->transaction_list, link) {
-               if (t->node_id == source && t->tlabel == tlabel) {
-                       list_del(&t->link);
-                       card->tlabel_mask &= ~(1 << t->tlabel);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&t->link == &card->transaction_list) {
-               fw_notify("Unsolicited response (source %x, tlabel %x)\n",
-                         source, tlabel);
-               return;
-       }
-
-       /*
-        * FIXME: sanity check packet, is length correct, does tcodes
-        * and addresses match.
-        */
-
-       switch (tcode) {
-       case TCODE_READ_QUADLET_RESPONSE:
-               data = (u32 *) &p->header[3];
-               data_length = 4;
-               break;
-
-       case TCODE_WRITE_RESPONSE:
-               data = NULL;
-               data_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_RESPONSE:
-       case TCODE_LOCK_RESPONSE:
-               data = p->payload;
-               data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       default:
-               /* Should never happen, this is just to shut up gcc. */
-               data = NULL;
-               data_length = 0;
-               break;
-       }
-
-       /*
-        * The response handler may be executed while the request handler
-        * is still pending.  Cancel the request handler.
-        */
-       card->driver->cancel_packet(card, &t->packet);
-
-       t->callback(card, rcode, data, data_length, t->callback_data);
-}
-EXPORT_SYMBOL(fw_core_handle_response);
-
-static const struct fw_address_region topology_map_region =
-       { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
-         .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
-
-static void handle_topology_map(struct fw_card *card, struct fw_request *request,
-               int tcode, int destination, int source, int generation,
-               int speed, unsigned long long offset,
-               void *payload, size_t length, void *callback_data)
-{
-       int i, start, end;
-       __be32 *map;
-
-       if (!TCODE_IS_READ_REQUEST(tcode)) {
-               fw_send_response(card, request, RCODE_TYPE_ERROR);
-               return;
-       }
-
-       if ((offset & 3) > 0 || (length & 3) > 0) {
-               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
-               return;
-       }
-
-       start = (offset - topology_map_region.start) / 4;
-       end = start + length / 4;
-       map = payload;
-
-       for (i = 0; i < length / 4; i++)
-               map[i] = cpu_to_be32(card->topology_map[start + i]);
-
-       fw_send_response(card, request, RCODE_COMPLETE);
-}
-
-static struct fw_address_handler topology_map = {
-       .length                 = 0x200,
-       .address_callback       = handle_topology_map,
-};
-
-static const struct fw_address_region registers_region =
-       { .start = CSR_REGISTER_BASE,
-         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
-
-static void handle_registers(struct fw_card *card, struct fw_request *request,
-               int tcode, int destination, int source, int generation,
-               int speed, unsigned long long offset,
-               void *payload, size_t length, void *callback_data)
-{
-       int reg = offset & ~CSR_REGISTER_BASE;
-       unsigned long long bus_time;
-       __be32 *data = payload;
-       int rcode = RCODE_COMPLETE;
-
-       switch (reg) {
-       case CSR_CYCLE_TIME:
-       case CSR_BUS_TIME:
-               if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
-                       rcode = RCODE_TYPE_ERROR;
-                       break;
-               }
-
-               bus_time = card->driver->get_bus_time(card);
-               if (reg == CSR_CYCLE_TIME)
-                       *data = cpu_to_be32(bus_time);
-               else
-                       *data = cpu_to_be32(bus_time >> 25);
-               break;
-
-       case CSR_BROADCAST_CHANNEL:
-               if (tcode == TCODE_READ_QUADLET_REQUEST)
-                       *data = cpu_to_be32(card->broadcast_channel);
-               else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
-                       card->broadcast_channel =
-                           (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
-                           BROADCAST_CHANNEL_INITIAL;
-               else
-                       rcode = RCODE_TYPE_ERROR;
-               break;
-
-       case CSR_BUS_MANAGER_ID:
-       case CSR_BANDWIDTH_AVAILABLE:
-       case CSR_CHANNELS_AVAILABLE_HI:
-       case CSR_CHANNELS_AVAILABLE_LO:
-               /*
-                * FIXME: these are handled by the OHCI hardware and
-                * the stack never sees these request. If we add
-                * support for a new type of controller that doesn't
-                * handle this in hardware we need to deal with these
-                * transactions.
-                */
-               BUG();
-               break;
-
-       case CSR_BUSY_TIMEOUT:
-               /* FIXME: Implement this. */
-
-       default:
-               rcode = RCODE_ADDRESS_ERROR;
-               break;
-       }
-
-       fw_send_response(card, request, rcode);
-}
-
-static struct fw_address_handler registers = {
-       .length                 = 0x400,
-       .address_callback       = handle_registers,
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
-MODULE_LICENSE("GPL");
-
-static const u32 vendor_textual_descriptor[] = {
-       /* textual descriptor leaf () */
-       0x00060000,
-       0x00000000,
-       0x00000000,
-       0x4c696e75,             /* L i n u */
-       0x78204669,             /* x   F i */
-       0x72657769,             /* r e w i */
-       0x72650000,             /* r e     */
-};
-
-static const u32 model_textual_descriptor[] = {
-       /* model descriptor leaf () */
-       0x00030000,
-       0x00000000,
-       0x00000000,
-       0x4a756a75,             /* J u j u */
-};
-
-static struct fw_descriptor vendor_id_descriptor = {
-       .length = ARRAY_SIZE(vendor_textual_descriptor),
-       .immediate = 0x03d00d1e,
-       .key = 0x81000000,
-       .data = vendor_textual_descriptor,
-};
-
-static struct fw_descriptor model_id_descriptor = {
-       .length = ARRAY_SIZE(model_textual_descriptor),
-       .immediate = 0x17000001,
-       .key = 0x81000000,
-       .data = model_textual_descriptor,
-};
-
-static int __init fw_core_init(void)
-{
-       int ret;
-
-       ret = bus_register(&fw_bus_type);
-       if (ret < 0)
-               return ret;
-
-       fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
-       if (fw_cdev_major < 0) {
-               bus_unregister(&fw_bus_type);
-               return fw_cdev_major;
-       }
-
-       fw_core_add_address_handler(&topology_map, &topology_map_region);
-       fw_core_add_address_handler(&registers, &registers_region);
-       fw_core_add_descriptor(&vendor_id_descriptor);
-       fw_core_add_descriptor(&model_id_descriptor);
-
-       return 0;
-}
-
-static void __exit fw_core_cleanup(void)
-{
-       unregister_chrdev(fw_cdev_major, "firewire");
-       bus_unregister(&fw_bus_type);
-       idr_destroy(&fw_device_idr);
-}
-
-module_init(fw_core_init);
-module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
new file mode 100644 (file)
index 0000000..ecddd11
--- /dev/null
@@ -0,0 +1,2636 @@
+/*
+ * Driver for OHCI 1394 controllers
+ *
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pmac_feature.h>
+#endif
+
+#include "core.h"
+#include "ohci.h"
+
+#define DESCRIPTOR_OUTPUT_MORE         0
+#define DESCRIPTOR_OUTPUT_LAST         (1 << 12)
+#define DESCRIPTOR_INPUT_MORE          (2 << 12)
+#define DESCRIPTOR_INPUT_LAST          (3 << 12)
+#define DESCRIPTOR_STATUS              (1 << 11)
+#define DESCRIPTOR_KEY_IMMEDIATE       (2 << 8)
+#define DESCRIPTOR_PING                        (1 << 7)
+#define DESCRIPTOR_YY                  (1 << 6)
+#define DESCRIPTOR_NO_IRQ              (0 << 4)
+#define DESCRIPTOR_IRQ_ERROR           (1 << 4)
+#define DESCRIPTOR_IRQ_ALWAYS          (3 << 4)
+#define DESCRIPTOR_BRANCH_ALWAYS       (3 << 2)
+#define DESCRIPTOR_WAIT                        (3 << 0)
+
+struct descriptor {
+       __le16 req_count;
+       __le16 control;
+       __le32 data_address;
+       __le32 branch_address;
+       __le16 res_count;
+       __le16 transfer_status;
+} __attribute__((aligned(16)));
+
+struct db_descriptor {
+       __le16 first_size;
+       __le16 control;
+       __le16 second_req_count;
+       __le16 first_req_count;
+       __le32 branch_address;
+       __le16 second_res_count;
+       __le16 first_res_count;
+       __le32 reserved0;
+       __le32 first_buffer;
+       __le32 second_buffer;
+       __le32 reserved1;
+} __attribute__((aligned(16)));
+
+#define CONTROL_SET(regs)      (regs)
+#define CONTROL_CLEAR(regs)    ((regs) + 4)
+#define COMMAND_PTR(regs)      ((regs) + 12)
+#define CONTEXT_MATCH(regs)    ((regs) + 16)
+
+struct ar_buffer {
+       struct descriptor descriptor;
+       struct ar_buffer *next;
+       __le32 data[0];
+};
+
+struct ar_context {
+       struct fw_ohci *ohci;
+       struct ar_buffer *current_buffer;
+       struct ar_buffer *last_buffer;
+       void *pointer;
+       u32 regs;
+       struct tasklet_struct tasklet;
+};
+
+struct context;
+
+typedef int (*descriptor_callback_t)(struct context *ctx,
+                                    struct descriptor *d,
+                                    struct descriptor *last);
+
+/*
+ * A buffer that contains a block of DMA-able coherent memory used for
+ * storing a portion of a DMA descriptor program.
+ */
+struct descriptor_buffer {
+       struct list_head list;
+       dma_addr_t buffer_bus;
+       size_t buffer_size;
+       size_t used;
+       struct descriptor buffer[0];
+};
+
+struct context {
+       struct fw_ohci *ohci;
+       u32 regs;
+       int total_allocation;
+
+       /*
+        * List of page-sized buffers for storing DMA descriptors.
+        * Head of list contains buffers in use and tail of list contains
+        * free buffers.
+        */
+       struct list_head buffer_list;
+
+       /*
+        * Pointer to a buffer inside buffer_list that contains the tail
+        * end of the current DMA program.
+        */
+       struct descriptor_buffer *buffer_tail;
+
+       /*
+        * The descriptor containing the branch address of the first
+        * descriptor that has not yet been filled by the device.
+        */
+       struct descriptor *last;
+
+       /*
+        * The last descriptor in the DMA program.  It contains the branch
+        * address that must be updated upon appending a new descriptor.
+        */
+       struct descriptor *prev;
+
+       descriptor_callback_t callback;
+
+       struct tasklet_struct tasklet;
+};
+
+#define IT_HEADER_SY(v)          ((v) <<  0)
+#define IT_HEADER_TCODE(v)       ((v) <<  4)
+#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
+#define IT_HEADER_TAG(v)         ((v) << 14)
+#define IT_HEADER_SPEED(v)       ((v) << 16)
+#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
+
+struct iso_context {
+       struct fw_iso_context base;
+       struct context context;
+       int excess_bytes;
+       void *header;
+       size_t header_length;
+};
+
+#define CONFIG_ROM_SIZE 1024
+
+struct fw_ohci {
+       struct fw_card card;
+
+       __iomem char *registers;
+       dma_addr_t self_id_bus;
+       __le32 *self_id_cpu;
+       struct tasklet_struct bus_reset_tasklet;
+       int node_id;
+       int generation;
+       int request_generation; /* for timestamping incoming requests */
+       atomic_t bus_seconds;
+
+       bool use_dualbuffer;
+       bool old_uninorth;
+       bool bus_reset_packet_quirk;
+
+       /*
+        * Spinlock for accessing fw_ohci data.  Never call out of
+        * this driver with this lock held.
+        */
+       spinlock_t lock;
+       u32 self_id_buffer[512];
+
+       /* Config rom buffers */
+       __be32 *config_rom;
+       dma_addr_t config_rom_bus;
+       __be32 *next_config_rom;
+       dma_addr_t next_config_rom_bus;
+       u32 next_header;
+
+       struct ar_context ar_request_ctx;
+       struct ar_context ar_response_ctx;
+       struct context at_request_ctx;
+       struct context at_response_ctx;
+
+       u32 it_context_mask;
+       struct iso_context *it_context_list;
+       u64 ir_context_channels;
+       u32 ir_context_mask;
+       struct iso_context *ir_context_list;
+};
+
+static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+{
+       return container_of(card, struct fw_ohci, card);
+}
+
+#define IT_CONTEXT_CYCLE_MATCH_ENABLE  0x80000000
+#define IR_CONTEXT_BUFFER_FILL         0x80000000
+#define IR_CONTEXT_ISOCH_HEADER                0x40000000
+#define IR_CONTEXT_CYCLE_MATCH_ENABLE  0x20000000
+#define IR_CONTEXT_MULTI_CHANNEL_MODE  0x10000000
+#define IR_CONTEXT_DUAL_BUFFER_MODE    0x08000000
+
+#define CONTEXT_RUN    0x8000
+#define CONTEXT_WAKE   0x1000
+#define CONTEXT_DEAD   0x0800
+#define CONTEXT_ACTIVE 0x0400
+
+#define OHCI1394_MAX_AT_REQ_RETRIES    0xf
+#define OHCI1394_MAX_AT_RESP_RETRIES   0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
+
+#define OHCI1394_REGISTER_SIZE         0x800
+#define OHCI_LOOP_COUNT                        500
+#define OHCI1394_PCI_HCI_Control       0x40
+#define SELF_ID_BUF_SIZE               0x800
+#define OHCI_TCODE_PHY_PACKET          0x0e
+#define OHCI_VERSION_1_1               0x010010
+
+static char ohci_driver_name[] = KBUILD_MODNAME;
+
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
+#define OHCI_PARAM_DEBUG_AT_AR         1
+#define OHCI_PARAM_DEBUG_SELFIDS       2
+#define OHCI_PARAM_DEBUG_IRQS          4
+#define OHCI_PARAM_DEBUG_BUSRESETS     8 /* only effective before chip init */
+
+static int param_debug;
+module_param_named(debug, param_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+       ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
+       ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
+       ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
+       ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
+       ", or a combination, or all = -1)");
+
+static void log_irqs(u32 evt)
+{
+       if (likely(!(param_debug &
+                       (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
+               return;
+
+       if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
+           !(evt & OHCI1394_busReset))
+               return;
+
+       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+           evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
+           evt & OHCI1394_RQPkt                ? " AR_req"             : "",
+           evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
+           evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
+           evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
+           evt & OHCI1394_isochRx              ? " IR"                 : "",
+           evt & OHCI1394_isochTx              ? " IT"                 : "",
+           evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
+           evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
+           evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
+           evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
+           evt & OHCI1394_busReset             ? " busReset"           : "",
+           evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
+                   OHCI1394_RSPkt | OHCI1394_reqTxComplete |
+                   OHCI1394_respTxComplete | OHCI1394_isochRx |
+                   OHCI1394_isochTx | OHCI1394_postedWriteErr |
+                   OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+                   OHCI1394_regAccessFail | OHCI1394_busReset)
+                                               ? " ?"                  : "");
+}
+
+static const char *speed[] = {
+       [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
+};
+static const char *power[] = {
+       [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
+       [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
+};
+static const char port[] = { '.', '-', 'p', 'c', };
+
+static char _p(u32 *s, int shift)
+{
+       return port[*s >> shift & 3];
+}
+
+static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
+{
+       if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
+               return;
+
+       fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
+                 self_id_count, generation, node_id);
+
+       for (; self_id_count--; ++s)
+               if ((*s & 1 << 23) == 0)
+                       fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
+                           "%s gc=%d %s %s%s%s\n",
+                           *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
+                           speed[*s >> 14 & 3], *s >> 16 & 63,
+                           power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+                           *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+               else
+                       fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
+                           *s, *s >> 24 & 63,
+                           _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
+                           _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
+}
+
+static const char *evts[] = {
+       [0x00] = "evt_no_status",       [0x01] = "-reserved-",
+       [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
+       [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
+       [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+       [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
+       [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
+       [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
+       [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
+       [0x10] = "-reserved-",          [0x11] = "ack_complete",
+       [0x12] = "ack_pending ",        [0x13] = "-reserved-",
+       [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
+       [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
+       [0x18] = "-reserved-",          [0x19] = "-reserved-",
+       [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
+       [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
+       [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
+       [0x20] = "pending/cancelled",
+};
+static const char *tcodes[] = {
+       [0x0] = "QW req",               [0x1] = "BW req",
+       [0x2] = "W resp",               [0x3] = "-reserved-",
+       [0x4] = "QR req",               [0x5] = "BR req",
+       [0x6] = "QR resp",              [0x7] = "BR resp",
+       [0x8] = "cycle start",          [0x9] = "Lk req",
+       [0xa] = "async stream packet",  [0xb] = "Lk resp",
+       [0xc] = "-reserved-",           [0xd] = "-reserved-",
+       [0xe] = "link internal",        [0xf] = "-reserved-",
+};
+static const char *phys[] = {
+       [0x0] = "phy config packet",    [0x1] = "link-on packet",
+       [0x2] = "self-id packet",       [0x3] = "-reserved-",
+};
+
+static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
+{
+       int tcode = header[0] >> 4 & 0xf;
+       char specific[12];
+
+       if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
+               return;
+
+       if (unlikely(evt >= ARRAY_SIZE(evts)))
+                       evt = 0x1f;
+
+       if (evt == OHCI1394_evt_bus_reset) {
+               fw_notify("A%c evt_bus_reset, generation %d\n",
+                   dir, (header[2] >> 16) & 0xff);
+               return;
+       }
+
+       if (header[0] == ~header[1]) {
+               fw_notify("A%c %s, %s, %08x\n",
+                   dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
+               return;
+       }
+
+       switch (tcode) {
+       case 0x0: case 0x6: case 0x8:
+               snprintf(specific, sizeof(specific), " = %08x",
+                        be32_to_cpu((__force __be32)header[3]));
+               break;
+       case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+               snprintf(specific, sizeof(specific), " %x,%x",
+                        header[3] >> 16, header[3] & 0xffff);
+               break;
+       default:
+               specific[0] = '\0';
+       }
+
+       switch (tcode) {
+       case 0xe: case 0xa:
+               fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
+               break;
+       case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+               fw_notify("A%c spd %x tl %02x, "
+                   "%04x -> %04x, %s, "
+                   "%s, %04x%08x%s\n",
+                   dir, speed, header[0] >> 10 & 0x3f,
+                   header[1] >> 16, header[0] >> 16, evts[evt],
+                   tcodes[tcode], header[1] & 0xffff, header[2], specific);
+               break;
+       default:
+               fw_notify("A%c spd %x tl %02x, "
+                   "%04x -> %04x, %s, "
+                   "%s%s\n",
+                   dir, speed, header[0] >> 10 & 0x3f,
+                   header[1] >> 16, header[0] >> 16, evts[evt],
+                   tcodes[tcode], specific);
+       }
+}
+
+#else
+
+#define log_irqs(evt)
+#define log_selfids(node_id, generation, self_id_count, sid)
+#define log_ar_at_event(dir, speed, header, evt)
+
+#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
+
+static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
+{
+       writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
+{
+       return readl(ohci->registers + offset);
+}
+
+static inline void flush_writes(const struct fw_ohci *ohci)
+{
+       /* Do a dummy read to flush writes. */
+       reg_read(ohci, OHCI1394_Version);
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+                              int clear_bits, int set_bits)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       u32 val, old;
+
+       reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+       flush_writes(ohci);
+       msleep(2);
+       val = reg_read(ohci, OHCI1394_PhyControl);
+       if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
+               fw_error("failed to set phy reg bits.\n");
+               return -EBUSY;
+       }
+
+       old = OHCI1394_PhyControl_ReadData(val);
+       old = (old & ~clear_bits) | set_bits;
+       reg_write(ohci, OHCI1394_PhyControl,
+                 OHCI1394_PhyControl_Write(addr, old));
+
+       return 0;
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+       struct device *dev = ctx->ohci->card.device;
+       struct ar_buffer *ab;
+       dma_addr_t uninitialized_var(ab_bus);
+       size_t offset;
+
+       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+       if (ab == NULL)
+               return -ENOMEM;
+
+       ab->next = NULL;
+       memset(&ab->descriptor, 0, sizeof(ab->descriptor));
+       ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+                                                   DESCRIPTOR_STATUS |
+                                                   DESCRIPTOR_BRANCH_ALWAYS);
+       offset = offsetof(struct ar_buffer, data);
+       ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
+       ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
+       ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
+       ab->descriptor.branch_address = 0;
+
+       ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
+       ctx->last_buffer->next = ab;
+       ctx->last_buffer = ab;
+
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+       flush_writes(ctx->ohci);
+
+       return 0;
+}
+
+static void ar_context_release(struct ar_context *ctx)
+{
+       struct ar_buffer *ab, *ab_next;
+       size_t offset;
+       dma_addr_t ab_bus;
+
+       for (ab = ctx->current_buffer; ab; ab = ab_next) {
+               ab_next = ab->next;
+               offset = offsetof(struct ar_buffer, data);
+               ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+               dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
+                                 ab, ab_bus);
+       }
+}
+
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#define cond_le32_to_cpu(v) \
+       (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+#else
+#define cond_le32_to_cpu(v) le32_to_cpu(v)
+#endif
+
+static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+       struct fw_packet p;
+       u32 status, length, tcode;
+       int evt;
+
+       p.header[0] = cond_le32_to_cpu(buffer[0]);
+       p.header[1] = cond_le32_to_cpu(buffer[1]);
+       p.header[2] = cond_le32_to_cpu(buffer[2]);
+
+       tcode = (p.header[0] >> 4) & 0x0f;
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_READ_QUADLET_RESPONSE:
+               p.header[3] = (__force __u32) buffer[3];
+               p.header_length = 16;
+               p.payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST :
+               p.header[3] = cond_le32_to_cpu(buffer[3]);
+               p.header_length = 16;
+               p.payload_length = 0;
+               break;
+
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_READ_BLOCK_RESPONSE:
+       case TCODE_LOCK_REQUEST:
+       case TCODE_LOCK_RESPONSE:
+               p.header[3] = cond_le32_to_cpu(buffer[3]);
+               p.header_length = 16;
+               p.payload_length = p.header[3] >> 16;
+               break;
+
+       case TCODE_WRITE_RESPONSE:
+       case TCODE_READ_QUADLET_REQUEST:
+       case OHCI_TCODE_PHY_PACKET:
+               p.header_length = 12;
+               p.payload_length = 0;
+               break;
+
+       default:
+               /* FIXME: Stop context, discard everything, and restart? */
+               p.header_length = 0;
+               p.payload_length = 0;
+       }
+
+       p.payload = (void *) buffer + p.header_length;
+
+       /* FIXME: What to do about evt_* errors? */
+       length = (p.header_length + p.payload_length + 3) / 4;
+       status = cond_le32_to_cpu(buffer[length]);
+       evt    = (status >> 16) & 0x1f;
+
+       p.ack        = evt - 16;
+       p.speed      = (status >> 21) & 0x7;
+       p.timestamp  = status & 0xffff;
+       p.generation = ohci->request_generation;
+
+       log_ar_at_event('R', p.speed, p.header, evt);
+
+       /*
+        * The OHCI bus reset handler synthesizes a phy packet with
+        * the new generation number when a bus reset happens (see
+        * section 8.4.2.3).  This helps us determine when a request
+        * was received and make sure we send the response in the same
+        * generation.  We only need this for requests; for responses
+        * we use the unique tlabel for finding the matching
+        * request.
+        *
+        * Alas some chips sometimes emit bus reset packets with a
+        * wrong generation.  We set the correct generation for these
+        * at a slightly incorrect time (in bus_reset_tasklet).
+        */
+       if (evt == OHCI1394_evt_bus_reset) {
+               if (!ohci->bus_reset_packet_quirk)
+                       ohci->request_generation = (p.header[2] >> 16) & 0xff;
+       } else if (ctx == &ohci->ar_request_ctx) {
+               fw_core_handle_request(&ohci->card, &p);
+       } else {
+               fw_core_handle_response(&ohci->card, &p);
+       }
+
+       return buffer + length + 1;
+}
+
+static void ar_context_tasklet(unsigned long data)
+{
+       struct ar_context *ctx = (struct ar_context *)data;
+       struct fw_ohci *ohci = ctx->ohci;
+       struct ar_buffer *ab;
+       struct descriptor *d;
+       void *buffer, *end;
+
+       ab = ctx->current_buffer;
+       d = &ab->descriptor;
+
+       if (d->res_count == 0) {
+               size_t size, rest, offset;
+               dma_addr_t start_bus;
+               void *start;
+
+               /*
+                * This descriptor is finished and we may have a
+                * packet split across this and the next buffer. We
+                * reuse the page for reassembling the split packet.
+                */
+
+               offset = offsetof(struct ar_buffer, data);
+               start = buffer = ab;
+               start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+
+               ab = ab->next;
+               d = &ab->descriptor;
+               size = buffer + PAGE_SIZE - ctx->pointer;
+               rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+               memmove(buffer, ctx->pointer, size);
+               memcpy(buffer + size, ab->data, rest);
+               ctx->current_buffer = ab;
+               ctx->pointer = (void *) ab->data + rest;
+               end = buffer + size + rest;
+
+               while (buffer < end)
+                       buffer = handle_ar_packet(ctx, buffer);
+
+               dma_free_coherent(ohci->card.device, PAGE_SIZE,
+                                 start, start_bus);
+               ar_context_add_page(ctx);
+       } else {
+               buffer = ctx->pointer;
+               ctx->pointer = end =
+                       (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+
+               while (buffer < end)
+                       buffer = handle_ar_packet(ctx, buffer);
+       }
+}
+
+static int ar_context_init(struct ar_context *ctx,
+                          struct fw_ohci *ohci, u32 regs)
+{
+       struct ar_buffer ab;
+
+       ctx->regs        = regs;
+       ctx->ohci        = ohci;
+       ctx->last_buffer = &ab;
+       tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+
+       ar_context_add_page(ctx);
+       ar_context_add_page(ctx);
+       ctx->current_buffer = ab.next;
+       ctx->pointer = ctx->current_buffer->data;
+
+       return 0;
+}
+
+static void ar_context_run(struct ar_context *ctx)
+{
+       struct ar_buffer *ab = ctx->current_buffer;
+       dma_addr_t ab_bus;
+       size_t offset;
+
+       offset = offsetof(struct ar_buffer, data);
+       ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+
+       reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
+       flush_writes(ctx->ohci);
+}
+
+static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
+{
+       int b, key;
+
+       b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
+       key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
+
+       /* figure out which descriptor the branch address goes in */
+       if (z == 2 && (b == 3 || key == 2))
+               return d;
+       else
+               return d + z - 1;
+}
+
+static void context_tasklet(unsigned long data)
+{
+       struct context *ctx = (struct context *) data;
+       struct descriptor *d, *last;
+       u32 address;
+       int z;
+       struct descriptor_buffer *desc;
+
+       desc = list_entry(ctx->buffer_list.next,
+                       struct descriptor_buffer, list);
+       last = ctx->last;
+       while (last->branch_address != 0) {
+               struct descriptor_buffer *old_desc = desc;
+               address = le32_to_cpu(last->branch_address);
+               z = address & 0xf;
+               address &= ~0xf;
+
+               /* If the branch address points to a buffer outside of the
+                * current buffer, advance to the next buffer. */
+               if (address < desc->buffer_bus ||
+                               address >= desc->buffer_bus + desc->used)
+                       desc = list_entry(desc->list.next,
+                                       struct descriptor_buffer, list);
+               d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
+               last = find_branch_descriptor(d, z);
+
+               if (!ctx->callback(ctx, d, last))
+                       break;
+
+               if (old_desc != desc) {
+                       /* If we've advanced to the next buffer, move the
+                        * previous buffer to the free list. */
+                       unsigned long flags;
+                       old_desc->used = 0;
+                       spin_lock_irqsave(&ctx->ohci->lock, flags);
+                       list_move_tail(&old_desc->list, &ctx->buffer_list);
+                       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+               }
+               ctx->last = last;
+       }
+}
+
+/*
+ * Allocate a new buffer and add it to the list of free buffers for this
+ * context.  Must be called with ohci->lock held.
+ */
+static int context_add_buffer(struct context *ctx)
+{
+       struct descriptor_buffer *desc;
+       dma_addr_t uninitialized_var(bus_addr);
+       int offset;
+
+       /*
+        * 16MB of descriptors should be far more than enough for any DMA
+        * program.  This will catch run-away userspace or DoS attacks.
+        */
+       if (ctx->total_allocation >= 16*1024*1024)
+               return -ENOMEM;
+
+       desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
+                       &bus_addr, GFP_ATOMIC);
+       if (!desc)
+               return -ENOMEM;
+
+       offset = (void *)&desc->buffer - (void *)desc;
+       desc->buffer_size = PAGE_SIZE - offset;
+       desc->buffer_bus = bus_addr + offset;
+       desc->used = 0;
+
+       list_add_tail(&desc->list, &ctx->buffer_list);
+       ctx->total_allocation += PAGE_SIZE;
+
+       return 0;
+}
+
+static int context_init(struct context *ctx, struct fw_ohci *ohci,
+                       u32 regs, descriptor_callback_t callback)
+{
+       ctx->ohci = ohci;
+       ctx->regs = regs;
+       ctx->total_allocation = 0;
+
+       INIT_LIST_HEAD(&ctx->buffer_list);
+       if (context_add_buffer(ctx) < 0)
+               return -ENOMEM;
+
+       ctx->buffer_tail = list_entry(ctx->buffer_list.next,
+                       struct descriptor_buffer, list);
+
+       tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+       ctx->callback = callback;
+
+       /*
+        * We put a dummy descriptor in the buffer that has a NULL
+        * branch address and looks like it's been sent.  That way we
+        * have a descriptor to append DMA programs to.
+        */
+       memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
+       ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+       ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
+       ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
+       ctx->last = ctx->buffer_tail->buffer;
+       ctx->prev = ctx->buffer_tail->buffer;
+
+       return 0;
+}
+
+static void context_release(struct context *ctx)
+{
+       struct fw_card *card = &ctx->ohci->card;
+       struct descriptor_buffer *desc, *tmp;
+
+       list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
+               dma_free_coherent(card->device, PAGE_SIZE, desc,
+                       desc->buffer_bus -
+                       ((void *)&desc->buffer - (void *)desc));
+}
+
+/* Must be called with ohci->lock held */
+static struct descriptor *context_get_descriptors(struct context *ctx,
+                                                 int z, dma_addr_t *d_bus)
+{
+       struct descriptor *d = NULL;
+       struct descriptor_buffer *desc = ctx->buffer_tail;
+
+       if (z * sizeof(*d) > desc->buffer_size)
+               return NULL;
+
+       if (z * sizeof(*d) > desc->buffer_size - desc->used) {
+               /* No room for the descriptor in this buffer, so advance to the
+                * next one. */
+
+               if (desc->list.next == &ctx->buffer_list) {
+                       /* If there is no free buffer next in the list,
+                        * allocate one. */
+                       if (context_add_buffer(ctx) < 0)
+                               return NULL;
+               }
+               desc = list_entry(desc->list.next,
+                               struct descriptor_buffer, list);
+               ctx->buffer_tail = desc;
+       }
+
+       d = desc->buffer + desc->used / sizeof(*d);
+       memset(d, 0, z * sizeof(*d));
+       *d_bus = desc->buffer_bus + desc->used;
+
+       return d;
+}
+
+static void context_run(struct context *ctx, u32 extra)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+
+       reg_write(ohci, COMMAND_PTR(ctx->regs),
+                 le32_to_cpu(ctx->last->branch_address));
+       reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+       reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+       flush_writes(ohci);
+}
+
+static void context_append(struct context *ctx,
+                          struct descriptor *d, int z, int extra)
+{
+       dma_addr_t d_bus;
+       struct descriptor_buffer *desc = ctx->buffer_tail;
+
+       d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
+
+       desc->used += (z + extra) * sizeof(*d);
+       ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+       ctx->prev = find_branch_descriptor(d, z);
+
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+       flush_writes(ctx->ohci);
+}
+
+static void context_stop(struct context *ctx)
+{
+       u32 reg;
+       int i;
+
+       reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+       flush_writes(ctx->ohci);
+
+       for (i = 0; i < 10; i++) {
+               reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+               if ((reg & CONTEXT_ACTIVE) == 0)
+                       return;
+
+               mdelay(1);
+       }
+       fw_error("Error: DMA context still active (0x%08x)\n", reg);
+}
+
+struct driver_data {
+       struct fw_packet *packet;
+};
+
+/*
+ * This function apppends a packet to the DMA queue for transmission.
+ * Must always be called with the ochi->lock held to ensure proper
+ * generation handling and locking around packet queue manipulation.
+ */
+static int at_context_queue_packet(struct context *ctx,
+                                  struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+       dma_addr_t d_bus, uninitialized_var(payload_bus);
+       struct driver_data *driver_data;
+       struct descriptor *d, *last;
+       __le32 *header;
+       int z, tcode;
+       u32 reg;
+
+       d = context_get_descriptors(ctx, 4, &d_bus);
+       if (d == NULL) {
+               packet->ack = RCODE_SEND_ERROR;
+               return -1;
+       }
+
+       d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+       d[0].res_count = cpu_to_le16(packet->timestamp);
+
+       /*
+        * The DMA format for asyncronous link packets is different
+        * from the IEEE1394 layout, so shift the fields around
+        * accordingly.  If header_length is 8, it's a PHY packet, to
+        * which we need to prepend an extra quadlet.
+        */
+
+       header = (__le32 *) &d[1];
+       switch (packet->header_length) {
+       case 16:
+       case 12:
+               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
+                                       (packet->header[0] & 0xffff0000));
+               header[2] = cpu_to_le32(packet->header[2]);
+
+               tcode = (packet->header[0] >> 4) & 0x0f;
+               if (TCODE_IS_BLOCK_PACKET(tcode))
+                       header[3] = cpu_to_le32(packet->header[3]);
+               else
+                       header[3] = (__force __le32) packet->header[3];
+
+               d[0].req_count = cpu_to_le16(packet->header_length);
+               break;
+
+       case 8:
+               header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32(packet->header[0]);
+               header[2] = cpu_to_le32(packet->header[1]);
+               d[0].req_count = cpu_to_le16(12);
+               break;
+
+       case 4:
+               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+               d[0].req_count = cpu_to_le16(8);
+               break;
+
+       default:
+               /* BUG(); */
+               packet->ack = RCODE_SEND_ERROR;
+               return -1;
+       }
+
+       driver_data = (struct driver_data *) &d[3];
+       driver_data->packet = packet;
+       packet->driver_data = driver_data;
+
+       if (packet->payload_length > 0) {
+               payload_bus =
+                       dma_map_single(ohci->card.device, packet->payload,
+                                      packet->payload_length, DMA_TO_DEVICE);
+               if (dma_mapping_error(ohci->card.device, payload_bus)) {
+                       packet->ack = RCODE_SEND_ERROR;
+                       return -1;
+               }
+               packet->payload_bus = payload_bus;
+
+               d[2].req_count    = cpu_to_le16(packet->payload_length);
+               d[2].data_address = cpu_to_le32(payload_bus);
+               last = &d[2];
+               z = 3;
+       } else {
+               last = &d[0];
+               z = 2;
+       }
+
+       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+                                    DESCRIPTOR_IRQ_ALWAYS |
+                                    DESCRIPTOR_BRANCH_ALWAYS);
+
+       /*
+        * If the controller and packet generations don't match, we need to
+        * bail out and try again.  If IntEvent.busReset is set, the AT context
+        * is halted, so appending to the context and trying to run it is
+        * futile.  Most controllers do the right thing and just flush the AT
+        * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
+        * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
+        * up stalling out.  So we just bail out in software and try again
+        * later, and everyone is happy.
+        * FIXME: Document how the locking works.
+        */
+       if (ohci->generation != packet->generation ||
+           reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
+               if (packet->payload_length > 0)
+                       dma_unmap_single(ohci->card.device, payload_bus,
+                                        packet->payload_length, DMA_TO_DEVICE);
+               packet->ack = RCODE_GENERATION;
+               return -1;
+       }
+
+       context_append(ctx, d, z, 4 - z);
+
+       /* If the context isn't already running, start it up. */
+       reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+       if ((reg & CONTEXT_RUN) == 0)
+               context_run(ctx, 0);
+
+       return 0;
+}
+
+static int handle_at_packet(struct context *context,
+                           struct descriptor *d,
+                           struct descriptor *last)
+{
+       struct driver_data *driver_data;
+       struct fw_packet *packet;
+       struct fw_ohci *ohci = context->ohci;
+       int evt;
+
+       if (last->transfer_status == 0)
+               /* This descriptor isn't done yet, stop iteration. */
+               return 0;
+
+       driver_data = (struct driver_data *) &d[3];
+       packet = driver_data->packet;
+       if (packet == NULL)
+               /* This packet was cancelled, just continue. */
+               return 1;
+
+       if (packet->payload_bus)
+               dma_unmap_single(ohci->card.device, packet->payload_bus,
+                                packet->payload_length, DMA_TO_DEVICE);
+
+       evt = le16_to_cpu(last->transfer_status) & 0x1f;
+       packet->timestamp = le16_to_cpu(last->res_count);
+
+       log_ar_at_event('T', packet->speed, packet->header, evt);
+
+       switch (evt) {
+       case OHCI1394_evt_timeout:
+               /* Async response transmit timed out. */
+               packet->ack = RCODE_CANCELLED;
+               break;
+
+       case OHCI1394_evt_flushed:
+               /*
+                * The packet was flushed should give same error as
+                * when we try to use a stale generation count.
+                */
+               packet->ack = RCODE_GENERATION;
+               break;
+
+       case OHCI1394_evt_missing_ack:
+               /*
+                * Using a valid (current) generation count, but the
+                * node is not on the bus or not sending acks.
+                */
+               packet->ack = RCODE_NO_ACK;
+               break;
+
+       case ACK_COMPLETE + 0x10:
+       case ACK_PENDING + 0x10:
+       case ACK_BUSY_X + 0x10:
+       case ACK_BUSY_A + 0x10:
+       case ACK_BUSY_B + 0x10:
+       case ACK_DATA_ERROR + 0x10:
+       case ACK_TYPE_ERROR + 0x10:
+               packet->ack = evt - 0x10;
+               break;
+
+       default:
+               packet->ack = RCODE_SEND_ERROR;
+               break;
+       }
+
+       packet->callback(packet, &ohci->card, packet->ack);
+
+       return 1;
+}
+
+#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
+#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
+
+static void handle_local_rom(struct fw_ohci *ohci,
+                            struct fw_packet *packet, u32 csr)
+{
+       struct fw_packet response;
+       int tcode, length, i;
+
+       tcode = HEADER_GET_TCODE(packet->header[0]);
+       if (TCODE_IS_BLOCK_PACKET(tcode))
+               length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+       else
+               length = 4;
+
+       i = csr - CSR_CONFIG_ROM;
+       if (i + length > CONFIG_ROM_SIZE) {
+               fw_fill_response(&response, packet->header,
+                                RCODE_ADDRESS_ERROR, NULL, 0);
+       } else if (!TCODE_IS_READ_REQUEST(tcode)) {
+               fw_fill_response(&response, packet->header,
+                                RCODE_TYPE_ERROR, NULL, 0);
+       } else {
+               fw_fill_response(&response, packet->header, RCODE_COMPLETE,
+                                (void *) ohci->config_rom + i, length);
+       }
+
+       fw_core_handle_response(&ohci->card, &response);
+}
+
+static void handle_local_lock(struct fw_ohci *ohci,
+                             struct fw_packet *packet, u32 csr)
+{
+       struct fw_packet response;
+       int tcode, length, ext_tcode, sel;
+       __be32 *payload, lock_old;
+       u32 lock_arg, lock_data;
+
+       tcode = HEADER_GET_TCODE(packet->header[0]);
+       length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+       payload = packet->payload;
+       ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+
+       if (tcode == TCODE_LOCK_REQUEST &&
+           ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
+               lock_arg = be32_to_cpu(payload[0]);
+               lock_data = be32_to_cpu(payload[1]);
+       } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
+               lock_arg = 0;
+               lock_data = 0;
+       } else {
+               fw_fill_response(&response, packet->header,
+                                RCODE_TYPE_ERROR, NULL, 0);
+               goto out;
+       }
+
+       sel = (csr - CSR_BUS_MANAGER_ID) / 4;
+       reg_write(ohci, OHCI1394_CSRData, lock_data);
+       reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
+       reg_write(ohci, OHCI1394_CSRControl, sel);
+
+       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
+       else
+               fw_notify("swap not done yet\n");
+
+       fw_fill_response(&response, packet->header,
+                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
+ out:
+       fw_core_handle_response(&ohci->card, &response);
+}
+
+static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+{
+       u64 offset;
+       u32 csr;
+
+       if (ctx == &ctx->ohci->at_request_ctx) {
+               packet->ack = ACK_PENDING;
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+       }
+
+       offset =
+               ((unsigned long long)
+                HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
+               packet->header[2];
+       csr = offset - CSR_REGISTER_BASE;
+
+       /* Handle config rom reads. */
+       if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
+               handle_local_rom(ctx->ohci, packet, csr);
+       else switch (csr) {
+       case CSR_BUS_MANAGER_ID:
+       case CSR_BANDWIDTH_AVAILABLE:
+       case CSR_CHANNELS_AVAILABLE_HI:
+       case CSR_CHANNELS_AVAILABLE_LO:
+               handle_local_lock(ctx->ohci, packet, csr);
+               break;
+       default:
+               if (ctx == &ctx->ohci->at_request_ctx)
+                       fw_core_handle_request(&ctx->ohci->card, packet);
+               else
+                       fw_core_handle_response(&ctx->ohci->card, packet);
+               break;
+       }
+
+       if (ctx == &ctx->ohci->at_response_ctx) {
+               packet->ack = ACK_COMPLETE;
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+       }
+}
+
+static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->ohci->lock, flags);
+
+       if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+           ctx->ohci->generation == packet->generation) {
+               spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+               handle_local_request(ctx, packet);
+               return;
+       }
+
+       ret = at_context_queue_packet(ctx, packet);
+       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+       if (ret < 0)
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+
+}
+
+static void bus_reset_tasklet(unsigned long data)
+{
+       struct fw_ohci *ohci = (struct fw_ohci *)data;
+       int self_id_count, i, j, reg;
+       int generation, new_generation;
+       unsigned long flags;
+       void *free_rom = NULL;
+       dma_addr_t free_rom_bus = 0;
+
+       reg = reg_read(ohci, OHCI1394_NodeID);
+       if (!(reg & OHCI1394_NodeID_idValid)) {
+               fw_notify("node ID not valid, new bus reset in progress\n");
+               return;
+       }
+       if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
+               fw_notify("malconfigured bus\n");
+               return;
+       }
+       ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
+                              OHCI1394_NodeID_nodeNumber);
+
+       reg = reg_read(ohci, OHCI1394_SelfIDCount);
+       if (reg & OHCI1394_SelfIDCount_selfIDError) {
+               fw_notify("inconsistent self IDs\n");
+               return;
+       }
+       /*
+        * The count in the SelfIDCount register is the number of
+        * bytes in the self ID receive buffer.  Since we also receive
+        * the inverted quadlets and a header quadlet, we shift one
+        * bit extra to get the actual number of self IDs.
+        */
+       self_id_count = (reg >> 3) & 0x3ff;
+       if (self_id_count == 0) {
+               fw_notify("inconsistent self IDs\n");
+               return;
+       }
+       generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+       rmb();
+
+       for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
+               if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+                       fw_notify("inconsistent self IDs\n");
+                       return;
+               }
+               ohci->self_id_buffer[j] =
+                               cond_le32_to_cpu(ohci->self_id_cpu[i]);
+       }
+       rmb();
+
+       /*
+        * Check the consistency of the self IDs we just read.  The
+        * problem we face is that a new bus reset can start while we
+        * read out the self IDs from the DMA buffer. If this happens,
+        * the DMA buffer will be overwritten with new self IDs and we
+        * will read out inconsistent data.  The OHCI specification
+        * (section 11.2) recommends a technique similar to
+        * linux/seqlock.h, where we remember the generation of the
+        * self IDs in the buffer before reading them out and compare
+        * it to the current generation after reading them out.  If
+        * the two generations match we know we have a consistent set
+        * of self IDs.
+        */
+
+       new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+       if (new_generation != generation) {
+               fw_notify("recursive bus reset detected, "
+                         "discarding self ids\n");
+               return;
+       }
+
+       /* FIXME: Document how the locking works. */
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       ohci->generation = generation;
+       context_stop(&ohci->at_request_ctx);
+       context_stop(&ohci->at_response_ctx);
+       reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+
+       if (ohci->bus_reset_packet_quirk)
+               ohci->request_generation = generation;
+
+       /*
+        * This next bit is unrelated to the AT context stuff but we
+        * have to do it under the spinlock also.  If a new config rom
+        * was set up before this reset, the old one is now no longer
+        * in use and we can free it. Update the config rom pointers
+        * to point to the current config rom and clear the
+        * next_config_rom pointer so a new udpate can take place.
+        */
+
+       if (ohci->next_config_rom != NULL) {
+               if (ohci->next_config_rom != ohci->config_rom) {
+                       free_rom      = ohci->config_rom;
+                       free_rom_bus  = ohci->config_rom_bus;
+               }
+               ohci->config_rom      = ohci->next_config_rom;
+               ohci->config_rom_bus  = ohci->next_config_rom_bus;
+               ohci->next_config_rom = NULL;
+
+               /*
+                * Restore config_rom image and manually update
+                * config_rom registers.  Writing the header quadlet
+                * will indicate that the config rom is ready, so we
+                * do that last.
+                */
+               reg_write(ohci, OHCI1394_BusOptions,
+                         be32_to_cpu(ohci->config_rom[2]));
+               ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
+               reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
+       }
+
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+       reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+       reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+#endif
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       if (free_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 free_rom, free_rom_bus);
+
+       log_selfids(ohci->node_id, generation,
+                   self_id_count, ohci->self_id_buffer);
+
+       fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
+                                self_id_count, ohci->self_id_buffer);
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+       struct fw_ohci *ohci = data;
+       u32 event, iso_event, cycle_time;
+       int i;
+
+       event = reg_read(ohci, OHCI1394_IntEventClear);
+
+       if (!event || !~event)
+               return IRQ_NONE;
+
+       /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
+       reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+       log_irqs(event);
+
+       if (event & OHCI1394_selfIDComplete)
+               tasklet_schedule(&ohci->bus_reset_tasklet);
+
+       if (event & OHCI1394_RQPkt)
+               tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+
+       if (event & OHCI1394_RSPkt)
+               tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+
+       if (event & OHCI1394_reqTxComplete)
+               tasklet_schedule(&ohci->at_request_ctx.tasklet);
+
+       if (event & OHCI1394_respTxComplete)
+               tasklet_schedule(&ohci->at_response_ctx.tasklet);
+
+       iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+       reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+
+       while (iso_event) {
+               i = ffs(iso_event) - 1;
+               tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
+               iso_event &= ~(1 << i);
+       }
+
+       iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+       reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+
+       while (iso_event) {
+               i = ffs(iso_event) - 1;
+               tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
+               iso_event &= ~(1 << i);
+       }
+
+       if (unlikely(event & OHCI1394_regAccessFail))
+               fw_error("Register access failure - "
+                        "please notify linux1394-devel@lists.sf.net\n");
+
+       if (unlikely(event & OHCI1394_postedWriteErr))
+               fw_error("PCI posted write error\n");
+
+       if (unlikely(event & OHCI1394_cycleTooLong)) {
+               if (printk_ratelimit())
+                       fw_notify("isochronous cycle too long\n");
+               reg_write(ohci, OHCI1394_LinkControlSet,
+                         OHCI1394_LinkControl_cycleMaster);
+       }
+
+       if (event & OHCI1394_cycle64Seconds) {
+               cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+               if ((cycle_time & 0x80000000) == 0)
+                       atomic_inc(&ohci->bus_seconds);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int software_reset(struct fw_ohci *ohci)
+{
+       int i;
+
+       reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+
+       for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+               if ((reg_read(ohci, OHCI1394_HCControlSet) &
+                    OHCI1394_HCControl_softReset) == 0)
+                       return 0;
+               msleep(1);
+       }
+
+       return -EBUSY;
+}
+
+static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct pci_dev *dev = to_pci_dev(card->device);
+       u32 lps;
+       int i;
+
+       if (software_reset(ohci)) {
+               fw_error("Failed to reset ohci card.\n");
+               return -EBUSY;
+       }
+
+       /*
+        * Now enable LPS, which we need in order to start accessing
+        * most of the registers.  In fact, on some cards (ALI M5251),
+        * accessing registers in the SClk domain without LPS enabled
+        * will lock up the machine.  Wait 50msec to make sure we have
+        * full link enabled.  However, with some cards (well, at least
+        * a JMicron PCIe card), we have to try again sometimes.
+        */
+       reg_write(ohci, OHCI1394_HCControlSet,
+                 OHCI1394_HCControl_LPS |
+                 OHCI1394_HCControl_postedWriteEnable);
+       flush_writes(ohci);
+
+       for (lps = 0, i = 0; !lps && i < 3; i++) {
+               msleep(50);
+               lps = reg_read(ohci, OHCI1394_HCControlSet) &
+                     OHCI1394_HCControl_LPS;
+       }
+
+       if (!lps) {
+               fw_error("Failed to set Link Power Status\n");
+               return -EIO;
+       }
+
+       reg_write(ohci, OHCI1394_HCControlClear,
+                 OHCI1394_HCControl_noByteSwapData);
+
+       reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
+       reg_write(ohci, OHCI1394_LinkControlClear,
+                 OHCI1394_LinkControl_rcvPhyPkt);
+       reg_write(ohci, OHCI1394_LinkControlSet,
+                 OHCI1394_LinkControl_rcvSelfID |
+                 OHCI1394_LinkControl_cycleTimerEnable |
+                 OHCI1394_LinkControl_cycleMaster);
+
+       reg_write(ohci, OHCI1394_ATRetries,
+                 OHCI1394_MAX_AT_REQ_RETRIES |
+                 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
+                 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+
+       ar_context_run(&ohci->ar_request_ctx);
+       ar_context_run(&ohci->ar_response_ctx);
+
+       reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+       reg_write(ohci, OHCI1394_IntEventClear, ~0);
+       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+       reg_write(ohci, OHCI1394_IntMaskSet,
+                 OHCI1394_selfIDComplete |
+                 OHCI1394_RQPkt | OHCI1394_RSPkt |
+                 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+                 OHCI1394_isochRx | OHCI1394_isochTx |
+                 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
+                 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+                 OHCI1394_masterIntEnable);
+       if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+               reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+       /* Activate link_on bit and contender bit in our self ID packets.*/
+       if (ohci_update_phy_reg(card, 4, 0,
+                               PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
+               return -EIO;
+
+       /*
+        * When the link is not yet enabled, the atomic config rom
+        * update mechanism described below in ohci_set_config_rom()
+        * is not active.  We have to update ConfigRomHeader and
+        * BusOptions manually, and the write to ConfigROMmap takes
+        * effect immediately.  We tie this to the enabling of the
+        * link, so we have a valid config rom before enabling - the
+        * OHCI requires that ConfigROMhdr and BusOptions have valid
+        * values before enabling.
+        *
+        * However, when the ConfigROMmap is written, some controllers
+        * always read back quadlets 0 and 2 from the config rom to
+        * the ConfigRomHeader and BusOptions registers on bus reset.
+        * They shouldn't do that in this initial case where the link
+        * isn't enabled.  This means we have to use the same
+        * workaround here, setting the bus header to 0 and then write
+        * the right values in the bus reset tasklet.
+        */
+
+       if (config_rom) {
+               ohci->next_config_rom =
+                       dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                          &ohci->next_config_rom_bus,
+                                          GFP_KERNEL);
+               if (ohci->next_config_rom == NULL)
+                       return -ENOMEM;
+
+               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+               fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
+       } else {
+               /*
+                * In the suspend case, config_rom is NULL, which
+                * means that we just reuse the old config rom.
+                */
+               ohci->next_config_rom = ohci->config_rom;
+               ohci->next_config_rom_bus = ohci->config_rom_bus;
+       }
+
+       ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
+       ohci->next_config_rom[0] = 0;
+       reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
+       reg_write(ohci, OHCI1394_BusOptions,
+                 be32_to_cpu(ohci->next_config_rom[2]));
+       reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+
+       reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+
+       if (request_irq(dev->irq, irq_handler,
+                       IRQF_SHARED, ohci_driver_name, ohci)) {
+               fw_error("Failed to allocate shared interrupt %d.\n",
+                        dev->irq);
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->config_rom, ohci->config_rom_bus);
+               return -EIO;
+       }
+
+       reg_write(ohci, OHCI1394_HCControlSet,
+                 OHCI1394_HCControl_linkEnable |
+                 OHCI1394_HCControl_BIBimageValid);
+       flush_writes(ohci);
+
+       /*
+        * We are ready to go, initiate bus reset to finish the
+        * initialization.
+        */
+
+       fw_core_initiate_bus_reset(&ohci->card, 1);
+
+       return 0;
+}
+
+static int ohci_set_config_rom(struct fw_card *card,
+                              u32 *config_rom, size_t length)
+{
+       struct fw_ohci *ohci;
+       unsigned long flags;
+       int ret = -EBUSY;
+       __be32 *next_config_rom;
+       dma_addr_t uninitialized_var(next_config_rom_bus);
+
+       ohci = fw_ohci(card);
+
+       /*
+        * When the OHCI controller is enabled, the config rom update
+        * mechanism is a bit tricky, but easy enough to use.  See
+        * section 5.5.6 in the OHCI specification.
+        *
+        * The OHCI controller caches the new config rom address in a
+        * shadow register (ConfigROMmapNext) and needs a bus reset
+        * for the changes to take place.  When the bus reset is
+        * detected, the controller loads the new values for the
+        * ConfigRomHeader and BusOptions registers from the specified
+        * config rom and loads ConfigROMmap from the ConfigROMmapNext
+        * shadow register. All automatically and atomically.
+        *
+        * Now, there's a twist to this story.  The automatic load of
+        * ConfigRomHeader and BusOptions doesn't honor the
+        * noByteSwapData bit, so with a be32 config rom, the
+        * controller will load be32 values in to these registers
+        * during the atomic update, even on litte endian
+        * architectures.  The workaround we use is to put a 0 in the
+        * header quadlet; 0 is endian agnostic and means that the
+        * config rom isn't ready yet.  In the bus reset tasklet we
+        * then set up the real values for the two registers.
+        *
+        * We use ohci->lock to avoid racing with the code that sets
+        * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+        */
+
+       next_config_rom =
+               dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                  &next_config_rom_bus, GFP_KERNEL);
+       if (next_config_rom == NULL)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ohci->next_config_rom == NULL) {
+               ohci->next_config_rom = next_config_rom;
+               ohci->next_config_rom_bus = next_config_rom_bus;
+
+               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+               fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
+                                 length * 4);
+
+               ohci->next_header = config_rom[0];
+               ohci->next_config_rom[0] = 0;
+
+               reg_write(ohci, OHCI1394_ConfigROMmap,
+                         ohci->next_config_rom_bus);
+               ret = 0;
+       }
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       /*
+        * Now initiate a bus reset to have the changes take
+        * effect. We clean up the old config rom memory and DMA
+        * mappings in the bus reset tasklet, since the OHCI
+        * controller could need to access it before the bus reset
+        * takes effect.
+        */
+       if (ret == 0)
+               fw_core_initiate_bus_reset(&ohci->card, 1);
+       else
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 next_config_rom, next_config_rom_bus);
+
+       return ret;
+}
+
+static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+
+       at_context_transmit(&ohci->at_request_ctx, packet);
+}
+
+static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+
+       at_context_transmit(&ohci->at_response_ctx, packet);
+}
+
+static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct context *ctx = &ohci->at_request_ctx;
+       struct driver_data *driver_data = packet->driver_data;
+       int ret = -ENOENT;
+
+       tasklet_disable(&ctx->tasklet);
+
+       if (packet->ack != 0)
+               goto out;
+
+       if (packet->payload_bus)
+               dma_unmap_single(ohci->card.device, packet->payload_bus,
+                                packet->payload_length, DMA_TO_DEVICE);
+
+       log_ar_at_event('T', packet->speed, packet->header, 0x20);
+       driver_data->packet = NULL;
+       packet->ack = RCODE_CANCELLED;
+       packet->callback(packet, &ohci->card, packet->ack);
+       ret = 0;
+ out:
+       tasklet_enable(&ctx->tasklet);
+
+       return ret;
+}
+
+static int ohci_enable_phys_dma(struct fw_card *card,
+                               int node_id, int generation)
+{
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+       return 0;
+#else
+       struct fw_ohci *ohci = fw_ohci(card);
+       unsigned long flags;
+       int n, ret = 0;
+
+       /*
+        * FIXME:  Make sure this bitmask is cleared when we clear the busReset
+        * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
+        */
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ohci->generation != generation) {
+               ret = -ESTALE;
+               goto out;
+       }
+
+       /*
+        * Note, if the node ID contains a non-local bus ID, physical DMA is
+        * enabled for _all_ nodes on remote buses.
+        */
+
+       n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
+       if (n < 32)
+               reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
+       else
+               reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
+
+       flush_writes(ohci);
+ out:
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       return ret;
+#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
+}
+
+static u64 ohci_get_bus_time(struct fw_card *card)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       u32 cycle_time;
+       u64 bus_time;
+
+       cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+       bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
+
+       return bus_time;
+}
+
+static void copy_iso_headers(struct iso_context *ctx, void *p)
+{
+       int i = ctx->header_length;
+
+       if (i + ctx->base.header_size > PAGE_SIZE)
+               return;
+
+       /*
+        * The iso header is byteswapped to little endian by
+        * the controller, but the remaining header quadlets
+        * are big endian.  We want to present all the headers
+        * as big endian, so we have to swap the first quadlet.
+        */
+       if (ctx->base.header_size > 0)
+               *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+       if (ctx->base.header_size > 4)
+               *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
+       if (ctx->base.header_size > 8)
+               memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
+       ctx->header_length += ctx->base.header_size;
+}
+
+static int handle_ir_dualbuffer_packet(struct context *context,
+                                      struct descriptor *d,
+                                      struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+       struct db_descriptor *db = (struct db_descriptor *) d;
+       __le32 *ir_header;
+       size_t header_length;
+       void *p, *end;
+
+       if (db->first_res_count != 0 && db->second_res_count != 0) {
+               if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
+                       /* This descriptor isn't done yet, stop iteration. */
+                       return 0;
+               }
+               ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
+       }
+
+       header_length = le16_to_cpu(db->first_req_count) -
+               le16_to_cpu(db->first_res_count);
+
+       p = db + 1;
+       end = p + header_length;
+       while (p < end) {
+               copy_iso_headers(ctx, p);
+               ctx->excess_bytes +=
+                       (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
+               p += max(ctx->base.header_size, (size_t)8);
+       }
+
+       ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
+               le16_to_cpu(db->second_res_count);
+
+       if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
+               ir_header = (__le32 *) (db + 1);
+               ctx->base.callback(&ctx->base,
+                                  le32_to_cpu(ir_header[0]) & 0xffff,
+                                  ctx->header_length, ctx->header,
+                                  ctx->base.callback_data);
+               ctx->header_length = 0;
+       }
+
+       return 1;
+}
+
+static int handle_ir_packet_per_buffer(struct context *context,
+                                      struct descriptor *d,
+                                      struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+       struct descriptor *pd;
+       __le32 *ir_header;
+       void *p;
+
+       for (pd = d; pd <= last; pd++) {
+               if (pd->transfer_status)
+                       break;
+       }
+       if (pd > last)
+               /* Descriptor(s) not done yet, stop iteration */
+               return 0;
+
+       p = last + 1;
+       copy_iso_headers(ctx, p);
+
+       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
+               ir_header = (__le32 *) p;
+               ctx->base.callback(&ctx->base,
+                                  le32_to_cpu(ir_header[0]) & 0xffff,
+                                  ctx->header_length, ctx->header,
+                                  ctx->base.callback_data);
+               ctx->header_length = 0;
+       }
+
+       return 1;
+}
+
+static int handle_it_packet(struct context *context,
+                           struct descriptor *d,
+                           struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+
+       if (last->transfer_status == 0)
+               /* This descriptor isn't done yet, stop iteration. */
+               return 0;
+
+       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+               ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
+                                  0, NULL, ctx->base.callback_data);
+
+       return 1;
+}
+
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
+                               int type, int channel, size_t header_size)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct iso_context *ctx, *list;
+       descriptor_callback_t callback;
+       u64 *channels, dont_care = ~0ULL;
+       u32 *mask, regs;
+       unsigned long flags;
+       int index, ret = -ENOMEM;
+
+       if (type == FW_ISO_CONTEXT_TRANSMIT) {
+               channels = &dont_care;
+               mask = &ohci->it_context_mask;
+               list = ohci->it_context_list;
+               callback = handle_it_packet;
+       } else {
+               channels = &ohci->ir_context_channels;
+               mask = &ohci->ir_context_mask;
+               list = ohci->ir_context_list;
+               if (ohci->use_dualbuffer)
+                       callback = handle_ir_dualbuffer_packet;
+               else
+                       callback = handle_ir_packet_per_buffer;
+       }
+
+       spin_lock_irqsave(&ohci->lock, flags);
+       index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+       if (index >= 0) {
+               *channels &= ~(1ULL << channel);
+               *mask &= ~(1 << index);
+       }
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       if (index < 0)
+               return ERR_PTR(-EBUSY);
+
+       if (type == FW_ISO_CONTEXT_TRANSMIT)
+               regs = OHCI1394_IsoXmitContextBase(index);
+       else
+               regs = OHCI1394_IsoRcvContextBase(index);
+
+       ctx = &list[index];
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->header_length = 0;
+       ctx->header = (void *) __get_free_page(GFP_KERNEL);
+       if (ctx->header == NULL)
+               goto out;
+
+       ret = context_init(&ctx->context, ohci, regs, callback);
+       if (ret < 0)
+               goto out_with_header;
+
+       return &ctx->base;
+
+ out_with_header:
+       free_page((unsigned long)ctx->header);
+ out:
+       spin_lock_irqsave(&ohci->lock, flags);
+       *mask |= 1 << index;
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       return ERR_PTR(ret);
+}
+
+static int ohci_start_iso(struct fw_iso_context *base,
+                         s32 cycle, u32 sync, u32 tags)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct fw_ohci *ohci = ctx->context.ohci;
+       u32 control, match;
+       int index;
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               match = 0;
+               if (cycle >= 0)
+                       match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
+                               (cycle & 0x7fff) << 16;
+
+               reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
+               reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
+               context_run(&ctx->context, match);
+       } else {
+               index = ctx - ohci->ir_context_list;
+               control = IR_CONTEXT_ISOCH_HEADER;
+               if (ohci->use_dualbuffer)
+                       control |= IR_CONTEXT_DUAL_BUFFER_MODE;
+               match = (tags << 28) | (sync << 8) | ctx->base.channel;
+               if (cycle >= 0) {
+                       match |= (cycle & 0x07fff) << 12;
+                       control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
+               }
+
+               reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
+               reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
+               reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
+               context_run(&ctx->context, control);
+       }
+
+       return 0;
+}
+
+static int ohci_stop_iso(struct fw_iso_context *base)
+{
+       struct fw_ohci *ohci = fw_ohci(base->card);
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       int index;
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
+       } else {
+               index = ctx - ohci->ir_context_list;
+               reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+       }
+       flush_writes(ohci);
+       context_stop(&ctx->context);
+
+       return 0;
+}
+
+static void ohci_free_iso_context(struct fw_iso_context *base)
+{
+       struct fw_ohci *ohci = fw_ohci(base->card);
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       unsigned long flags;
+       int index;
+
+       ohci_stop_iso(base);
+       context_release(&ctx->context);
+       free_page((unsigned long)ctx->header);
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               ohci->it_context_mask |= 1 << index;
+       } else {
+               index = ctx - ohci->ir_context_list;
+               ohci->ir_context_mask |= 1 << index;
+               ohci->ir_context_channels |= 1ULL << base->channel;
+       }
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
+static int ohci_queue_iso_transmit(struct fw_iso_context *base,
+                                  struct fw_iso_packet *packet,
+                                  struct fw_iso_buffer *buffer,
+                                  unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct descriptor *d, *last, *pd;
+       struct fw_iso_packet *p;
+       __le32 *header;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, payload_z, irq;
+       u32 payload_index, payload_end_index, next_page_index;
+       int page, end_page, i, length, offset;
+
+       /*
+        * FIXME: Cycle lost behavior should be configurable: lose
+        * packet, retransmit or terminate..
+        */
+
+       p = packet;
+       payload_index = payload;
+
+       if (p->skip)
+               z = 1;
+       else
+               z = 2;
+       if (p->header_length > 0)
+               z++;
+
+       /* Determine the first page the payload isn't contained in. */
+       end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
+       if (p->payload_length > 0)
+               payload_z = end_page - (payload_index >> PAGE_SHIFT);
+       else
+               payload_z = 0;
+
+       z += payload_z;
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
+
+       d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
+       if (d == NULL)
+               return -ENOMEM;
+
+       if (!p->skip) {
+               d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+               d[0].req_count = cpu_to_le16(8);
+
+               header = (__le32 *) &d[1];
+               header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
+                                       IT_HEADER_TAG(p->tag) |
+                                       IT_HEADER_TCODE(TCODE_STREAM_DATA) |
+                                       IT_HEADER_CHANNEL(ctx->base.channel) |
+                                       IT_HEADER_SPEED(ctx->base.speed));
+               header[1] =
+                       cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
+                                                         p->payload_length));
+       }
+
+       if (p->header_length > 0) {
+               d[2].req_count    = cpu_to_le16(p->header_length);
+               d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
+               memcpy(&d[z], p->header, p->header_length);
+       }
+
+       pd = d + z - payload_z;
+       payload_end_index = payload_index + p->payload_length;
+       for (i = 0; i < payload_z; i++) {
+               page               = payload_index >> PAGE_SHIFT;
+               offset             = payload_index & ~PAGE_MASK;
+               next_page_index    = (page + 1) << PAGE_SHIFT;
+               length             =
+                       min(next_page_index, payload_end_index) - payload_index;
+               pd[i].req_count    = cpu_to_le16(length);
+
+               page_bus = page_private(buffer->pages[page]);
+               pd[i].data_address = cpu_to_le32(page_bus + offset);
+
+               payload_index += length;
+       }
+
+       if (p->interrupt)
+               irq = DESCRIPTOR_IRQ_ALWAYS;
+       else
+               irq = DESCRIPTOR_NO_IRQ;
+
+       last = z == 2 ? d : d + z - 1;
+       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+                                    DESCRIPTOR_STATUS |
+                                    DESCRIPTOR_BRANCH_ALWAYS |
+                                    irq);
+
+       context_append(&ctx->context, d, z, header_z);
+
+       return 0;
+}
+
+static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+                                            struct fw_iso_packet *packet,
+                                            struct fw_iso_buffer *buffer,
+                                            unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct db_descriptor *db = NULL;
+       struct descriptor *d;
+       struct fw_iso_packet *p;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, length, rest;
+       int page, offset, packet_count, header_size;
+
+       /*
+        * FIXME: Cycle lost behavior should be configurable: lose
+        * packet, retransmit or terminate..
+        */
+
+       p = packet;
+       z = 2;
+
+       /*
+        * The OHCI controller puts the isochronous header and trailer in the
+        * buffer, so we need at least 8 bytes.
+        */
+       packet_count = p->header_length / ctx->base.header_size;
+       header_size = packet_count * max(ctx->base.header_size, (size_t)8);
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+       page     = payload >> PAGE_SHIFT;
+       offset   = payload & ~PAGE_MASK;
+       rest     = p->payload_length;
+
+       /* FIXME: make packet-per-buffer/dual-buffer a context option */
+       while (rest > 0) {
+               d = context_get_descriptors(&ctx->context,
+                                           z + header_z, &d_bus);
+               if (d == NULL)
+                       return -ENOMEM;
+
+               db = (struct db_descriptor *) d;
+               db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                         DESCRIPTOR_BRANCH_ALWAYS);
+               db->first_size =
+                   cpu_to_le16(max(ctx->base.header_size, (size_t)8));
+               if (p->skip && rest == p->payload_length) {
+                       db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+                       db->first_req_count = db->first_size;
+               } else {
+                       db->first_req_count = cpu_to_le16(header_size);
+               }
+               db->first_res_count = db->first_req_count;
+               db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
+
+               if (p->skip && rest == p->payload_length)
+                       length = 4;
+               else if (offset + rest < PAGE_SIZE)
+                       length = rest;
+               else
+                       length = PAGE_SIZE - offset;
+
+               db->second_req_count = cpu_to_le16(length);
+               db->second_res_count = db->second_req_count;
+               page_bus = page_private(buffer->pages[page]);
+               db->second_buffer = cpu_to_le32(page_bus + offset);
+
+               if (p->interrupt && length == rest)
+                       db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+               context_append(&ctx->context, d, z, header_z);
+               offset = (offset + length) & ~PAGE_MASK;
+               rest -= length;
+               if (offset == 0)
+                       page++;
+       }
+
+       return 0;
+}
+
+static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+                                       struct fw_iso_packet *packet,
+                                       struct fw_iso_buffer *buffer,
+                                       unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct descriptor *d = NULL, *pd = NULL;
+       struct fw_iso_packet *p = packet;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, rest;
+       int i, j, length;
+       int page, offset, packet_count, header_size, payload_per_buffer;
+
+       /*
+        * The OHCI controller puts the isochronous header and trailer in the
+        * buffer, so we need at least 8 bytes.
+        */
+       packet_count = p->header_length / ctx->base.header_size;
+       header_size  = max(ctx->base.header_size, (size_t)8);
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+       page     = payload >> PAGE_SHIFT;
+       offset   = payload & ~PAGE_MASK;
+       payload_per_buffer = p->payload_length / packet_count;
+
+       for (i = 0; i < packet_count; i++) {
+               /* d points to the header descriptor */
+               z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
+               d = context_get_descriptors(&ctx->context,
+                               z + header_z, &d_bus);
+               if (d == NULL)
+                       return -ENOMEM;
+
+               d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
+                                             DESCRIPTOR_INPUT_MORE);
+               if (p->skip && i == 0)
+                       d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+               d->req_count    = cpu_to_le16(header_size);
+               d->res_count    = d->req_count;
+               d->transfer_status = 0;
+               d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
+
+               rest = payload_per_buffer;
+               for (j = 1; j < z; j++) {
+                       pd = d + j;
+                       pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                                 DESCRIPTOR_INPUT_MORE);
+
+                       if (offset + rest < PAGE_SIZE)
+                               length = rest;
+                       else
+                               length = PAGE_SIZE - offset;
+                       pd->req_count = cpu_to_le16(length);
+                       pd->res_count = pd->req_count;
+                       pd->transfer_status = 0;
+
+                       page_bus = page_private(buffer->pages[page]);
+                       pd->data_address = cpu_to_le32(page_bus + offset);
+
+                       offset = (offset + length) & ~PAGE_MASK;
+                       rest -= length;
+                       if (offset == 0)
+                               page++;
+               }
+               pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                         DESCRIPTOR_INPUT_LAST |
+                                         DESCRIPTOR_BRANCH_ALWAYS);
+               if (p->interrupt && i == packet_count - 1)
+                       pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+               context_append(&ctx->context, d, z, header_z);
+       }
+
+       return 0;
+}
+
+static int ohci_queue_iso(struct fw_iso_context *base,
+                         struct fw_iso_packet *packet,
+                         struct fw_iso_buffer *buffer,
+                         unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+       if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+               ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
+       else if (ctx->context.ohci->use_dualbuffer)
+               ret = ohci_queue_iso_receive_dualbuffer(base, packet,
+                                                       buffer, payload);
+       else
+               ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+                                                       buffer, payload);
+       spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
+
+       return ret;
+}
+
+static const struct fw_card_driver ohci_driver = {
+       .enable                 = ohci_enable,
+       .update_phy_reg         = ohci_update_phy_reg,
+       .set_config_rom         = ohci_set_config_rom,
+       .send_request           = ohci_send_request,
+       .send_response          = ohci_send_response,
+       .cancel_packet          = ohci_cancel_packet,
+       .enable_phys_dma        = ohci_enable_phys_dma,
+       .get_bus_time           = ohci_get_bus_time,
+
+       .allocate_iso_context   = ohci_allocate_iso_context,
+       .free_iso_context       = ohci_free_iso_context,
+       .queue_iso              = ohci_queue_iso,
+       .start_iso              = ohci_start_iso,
+       .stop_iso               = ohci_stop_iso,
+};
+
+#ifdef CONFIG_PPC_PMAC
+static void ohci_pmac_on(struct pci_dev *dev)
+{
+       if (machine_is(powermac)) {
+               struct device_node *ofn = pci_device_to_OF_node(dev);
+
+               if (ofn) {
+                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
+                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
+               }
+       }
+}
+
+static void ohci_pmac_off(struct pci_dev *dev)
+{
+       if (machine_is(powermac)) {
+               struct device_node *ofn = pci_device_to_OF_node(dev);
+
+               if (ofn) {
+                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
+                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
+               }
+       }
+}
+#else
+#define ohci_pmac_on(dev)
+#define ohci_pmac_off(dev)
+#endif /* CONFIG_PPC_PMAC */
+
+static int __devinit pci_probe(struct pci_dev *dev,
+                              const struct pci_device_id *ent)
+{
+       struct fw_ohci *ohci;
+       u32 bus_options, max_receive, link_speed, version;
+       u64 guid;
+       int err;
+       size_t size;
+
+       ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
+       if (ohci == NULL) {
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
+
+       ohci_pmac_on(dev);
+
+       err = pci_enable_device(dev);
+       if (err) {
+               fw_error("Failed to enable OHCI hardware\n");
+               goto fail_free;
+       }
+
+       pci_set_master(dev);
+       pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+       pci_set_drvdata(dev, ohci);
+
+       spin_lock_init(&ohci->lock);
+
+       tasklet_init(&ohci->bus_reset_tasklet,
+                    bus_reset_tasklet, (unsigned long)ohci);
+
+       err = pci_request_region(dev, 0, ohci_driver_name);
+       if (err) {
+               fw_error("MMIO resource unavailable\n");
+               goto fail_disable;
+       }
+
+       ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
+       if (ohci->registers == NULL) {
+               fw_error("Failed to remap registers\n");
+               err = -ENXIO;
+               goto fail_iomem;
+       }
+
+       version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+       ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+
+/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
+#if !defined(CONFIG_X86_32)
+       /* dual-buffer mode is broken with descriptor addresses above 2G */
+       if (dev->vendor == PCI_VENDOR_ID_TI &&
+           dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
+               ohci->use_dualbuffer = false;
+#endif
+
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+       ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
+                            dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
+#endif
+       ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+
+       ar_context_init(&ohci->ar_request_ctx, ohci,
+                       OHCI1394_AsReqRcvContextControlSet);
+
+       ar_context_init(&ohci->ar_response_ctx, ohci,
+                       OHCI1394_AsRspRcvContextControlSet);
+
+       context_init(&ohci->at_request_ctx, ohci,
+                    OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+
+       context_init(&ohci->at_response_ctx, ohci,
+                    OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+
+       reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+       ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+       reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+       size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
+       ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+
+       reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+       ohci->ir_context_channels = ~0ULL;
+       ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+       reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+       size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
+       ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+
+       if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
+               err = -ENOMEM;
+               goto fail_contexts;
+       }
+
+       /* self-id dma buffer allocation */
+       ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
+                                              SELF_ID_BUF_SIZE,
+                                              &ohci->self_id_bus,
+                                              GFP_KERNEL);
+       if (ohci->self_id_cpu == NULL) {
+               err = -ENOMEM;
+               goto fail_contexts;
+       }
+
+       bus_options = reg_read(ohci, OHCI1394_BusOptions);
+       max_receive = (bus_options >> 12) & 0xf;
+       link_speed = bus_options & 0x7;
+       guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
+               reg_read(ohci, OHCI1394_GUIDLo);
+
+       err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+       if (err)
+               goto fail_self_id;
+
+       fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
+                 dev_name(&dev->dev), version >> 16, version & 0xff);
+
+       return 0;
+
+ fail_self_id:
+       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+                         ohci->self_id_cpu, ohci->self_id_bus);
+ fail_contexts:
+       kfree(ohci->ir_context_list);
+       kfree(ohci->it_context_list);
+       context_release(&ohci->at_response_ctx);
+       context_release(&ohci->at_request_ctx);
+       ar_context_release(&ohci->ar_response_ctx);
+       ar_context_release(&ohci->ar_request_ctx);
+       pci_iounmap(dev, ohci->registers);
+ fail_iomem:
+       pci_release_region(dev, 0);
+ fail_disable:
+       pci_disable_device(dev);
+ fail_free:
+       kfree(&ohci->card);
+       ohci_pmac_off(dev);
+ fail:
+       if (err == -ENOMEM)
+               fw_error("Out of memory\n");
+
+       return err;
+}
+
+static void pci_remove(struct pci_dev *dev)
+{
+       struct fw_ohci *ohci;
+
+       ohci = pci_get_drvdata(dev);
+       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+       flush_writes(ohci);
+       fw_core_remove_card(&ohci->card);
+
+       /*
+        * FIXME: Fail all pending packets here, now that the upper
+        * layers can't queue any more.
+        */
+
+       software_reset(ohci);
+       free_irq(dev->irq, ohci);
+
+       if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->next_config_rom, ohci->next_config_rom_bus);
+       if (ohci->config_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->config_rom, ohci->config_rom_bus);
+       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+                         ohci->self_id_cpu, ohci->self_id_bus);
+       ar_context_release(&ohci->ar_request_ctx);
+       ar_context_release(&ohci->ar_response_ctx);
+       context_release(&ohci->at_request_ctx);
+       context_release(&ohci->at_response_ctx);
+       kfree(ohci->it_context_list);
+       kfree(ohci->ir_context_list);
+       pci_iounmap(dev, ohci->registers);
+       pci_release_region(dev, 0);
+       pci_disable_device(dev);
+       kfree(&ohci->card);
+       ohci_pmac_off(dev);
+
+       fw_notify("Removed fw-ohci device.\n");
+}
+
+#ifdef CONFIG_PM
+static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+       struct fw_ohci *ohci = pci_get_drvdata(dev);
+       int err;
+
+       software_reset(ohci);
+       free_irq(dev->irq, ohci);
+       err = pci_save_state(dev);
+       if (err) {
+               fw_error("pci_save_state failed\n");
+               return err;
+       }
+       err = pci_set_power_state(dev, pci_choose_state(dev, state));
+       if (err)
+               fw_error("pci_set_power_state failed with %d\n", err);
+       ohci_pmac_off(dev);
+
+       return 0;
+}
+
+static int pci_resume(struct pci_dev *dev)
+{
+       struct fw_ohci *ohci = pci_get_drvdata(dev);
+       int err;
+
+       ohci_pmac_on(dev);
+       pci_set_power_state(dev, PCI_D0);
+       pci_restore_state(dev);
+       err = pci_enable_device(dev);
+       if (err) {
+               fw_error("pci_enable_device failed\n");
+               return err;
+       }
+
+       return ohci_enable(&ohci->card, NULL, 0);
+}
+#endif
+
+static struct pci_device_id pci_table[] = {
+       { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
+       { }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static struct pci_driver fw_ohci_pci_driver = {
+       .name           = ohci_driver_name,
+       .id_table       = pci_table,
+       .probe          = pci_probe,
+       .remove         = pci_remove,
+#ifdef CONFIG_PM
+       .resume         = pci_resume,
+       .suspend        = pci_suspend,
+#endif
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
+MODULE_LICENSE("GPL");
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
+MODULE_ALIAS("ohci1394");
+#endif
+
+static int __init fw_ohci_init(void)
+{
+       return pci_register_driver(&fw_ohci_pci_driver);
+}
+
+static void __exit fw_ohci_cleanup(void)
+{
+       pci_unregister_driver(&fw_ohci_pci_driver);
+}
+
+module_init(fw_ohci_init);
+module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
new file mode 100644 (file)
index 0000000..d41cb6e
--- /dev/null
@@ -0,0 +1,1651 @@
+/*
+ * SBP2 driver (SCSI over IEEE1394)
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * The basic structure of this driver is based on the old storage driver,
+ * drivers/ieee1394/sbp2.c, originally written by
+ *     James Goodwin <jamesg@filanet.com>
+ * with later contributions and ongoing maintenance from
+ *     Ben Collins <bcollins@debian.org>,
+ *     Stefan Richter <stefanr@s5r6.in-berlin.de>
+ * and many others.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/*
+ * So far only bridges from Oxford Semiconductor are known to support
+ * concurrent logins. Depending on firmware, four or two concurrent logins
+ * are possible on OXFW911 and newer Oxsemi bridges.
+ *
+ * Concurrent logins are useful together with cluster filesystems.
+ */
+static int sbp2_param_exclusive_login = 1;
+module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
+MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
+                "(default = Y, use N for concurrent initiators)");
+
+/*
+ * Flags for firmware oddities
+ *
+ * - 128kB max transfer
+ *   Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ *   When scsi_mod probes the device, let the inquiry command look like that
+ *   from MS Windows.
+ *
+ * - skip mode page 8
+ *   Suppress sending of mode_sense for mode page 8 if the device pretends to
+ *   support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ *   Tell sd_mod to correct the last sector number reported by read_capacity.
+ *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ *   Don't use this with devices which don't have this bug.
+ *
+ * - delay inquiry
+ *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
+ *
+ * - power condition
+ *   Set the power condition field in the START STOP UNIT commands sent by
+ *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
+ *   Some disks need this to spin down or to resume properly.
+ *
+ * - override internal blacklist
+ *   Instead of adding to the built-in blacklist, use only the workarounds
+ *   specified in the module load parameter.
+ *   Useful if a blacklist entry interfered with a non-broken device.
+ */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36     0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8   0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY   0x8
+#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
+#define SBP2_INQUIRY_DELAY             12
+#define SBP2_WORKAROUND_POWER_CONDITION        0x20
+#define SBP2_WORKAROUND_OVERRIDE       0x100
+
+static int sbp2_param_workarounds;
+module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+       ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+       ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
+       ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+       ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
+       ", set power condition in start stop unit = "
+                                 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
+       ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+       ", or a combination)");
+
+/* I don't know why the SCSI stack doesn't define something like this... */
+typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
+
+static const char sbp2_driver_name[] = "sbp2";
+
+/*
+ * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
+ * and one struct scsi_device per sbp2_logical_unit.
+ */
+struct sbp2_logical_unit {
+       struct sbp2_target *tgt;
+       struct list_head link;
+       struct fw_address_handler address_handler;
+       struct list_head orb_list;
+
+       u64 command_block_agent_address;
+       u16 lun;
+       int login_id;
+
+       /*
+        * The generation is updated once we've logged in or reconnected
+        * to the logical unit.  Thus, I/O to the device will automatically
+        * fail and get retried if it happens in a window where the device
+        * is not ready, e.g. after a bus reset but before we reconnect.
+        */
+       int generation;
+       int retries;
+       struct delayed_work work;
+       bool has_sdev;
+       bool blocked;
+};
+
+/*
+ * We create one struct sbp2_target per IEEE 1212 Unit Directory
+ * and one struct Scsi_Host per sbp2_target.
+ */
+struct sbp2_target {
+       struct kref kref;
+       struct fw_unit *unit;
+       const char *bus_id;
+       struct list_head lu_list;
+
+       u64 management_agent_address;
+       u64 guid;
+       int directory_id;
+       int node_id;
+       int address_high;
+       unsigned int workarounds;
+       unsigned int mgt_orb_timeout;
+       unsigned int max_payload;
+
+       int dont_block; /* counter for each logical unit */
+       int blocked;    /* ditto */
+};
+
+/* Impossible login_id, to detect logout attempt before successful login */
+#define INVALID_LOGIN_ID 0x10000
+
+/*
+ * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
+ * provided in the config rom. Most devices do provide a value, which
+ * we'll use for login management orbs, but with some sane limits.
+ */
+#define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
+#define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
+#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
+#define SBP2_ORB_NULL                  0x80000000
+#define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
+#define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
+
+/*
+ * The default maximum s/g segment size of a FireWire controller is
+ * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
+ * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+ */
+#define SBP2_MAX_SEG_SIZE              0xfffc
+
+/* Unit directory keys */
+#define SBP2_CSR_UNIT_CHARACTERISTICS  0x3a
+#define SBP2_CSR_FIRMWARE_REVISION     0x3c
+#define SBP2_CSR_LOGICAL_UNIT_NUMBER   0x14
+#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY        0xd4
+
+/* Management orb opcodes */
+#define SBP2_LOGIN_REQUEST             0x0
+#define SBP2_QUERY_LOGINS_REQUEST      0x1
+#define SBP2_RECONNECT_REQUEST         0x3
+#define SBP2_SET_PASSWORD_REQUEST      0x4
+#define SBP2_LOGOUT_REQUEST            0x7
+#define SBP2_ABORT_TASK_REQUEST                0xb
+#define SBP2_ABORT_TASK_SET            0xc
+#define SBP2_LOGICAL_UNIT_RESET                0xe
+#define SBP2_TARGET_RESET_REQUEST      0xf
+
+/* Offsets for command block agent registers */
+#define SBP2_AGENT_STATE               0x00
+#define SBP2_AGENT_RESET               0x04
+#define SBP2_ORB_POINTER               0x08
+#define SBP2_DOORBELL                  0x10
+#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
+
+/* Status write response codes */
+#define SBP2_STATUS_REQUEST_COMPLETE   0x0
+#define SBP2_STATUS_TRANSPORT_FAILURE  0x1
+#define SBP2_STATUS_ILLEGAL_REQUEST    0x2
+#define SBP2_STATUS_VENDOR_DEPENDENT   0x3
+
+#define STATUS_GET_ORB_HIGH(v)         ((v).status & 0xffff)
+#define STATUS_GET_SBP_STATUS(v)       (((v).status >> 16) & 0xff)
+#define STATUS_GET_LEN(v)              (((v).status >> 24) & 0x07)
+#define STATUS_GET_DEAD(v)             (((v).status >> 27) & 0x01)
+#define STATUS_GET_RESPONSE(v)         (((v).status >> 28) & 0x03)
+#define STATUS_GET_SOURCE(v)           (((v).status >> 30) & 0x03)
+#define STATUS_GET_ORB_LOW(v)          ((v).orb_low)
+#define STATUS_GET_DATA(v)             ((v).data)
+
+struct sbp2_status {
+       u32 status;
+       u32 orb_low;
+       u8 data[24];
+};
+
+struct sbp2_pointer {
+       __be32 high;
+       __be32 low;
+};
+
+struct sbp2_orb {
+       struct fw_transaction t;
+       struct kref kref;
+       dma_addr_t request_bus;
+       int rcode;
+       struct sbp2_pointer pointer;
+       void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+       struct list_head link;
+};
+
+#define MANAGEMENT_ORB_LUN(v)                  ((v))
+#define MANAGEMENT_ORB_FUNCTION(v)             ((v) << 16)
+#define MANAGEMENT_ORB_RECONNECT(v)            ((v) << 20)
+#define MANAGEMENT_ORB_EXCLUSIVE(v)            ((v) ? 1 << 28 : 0)
+#define MANAGEMENT_ORB_REQUEST_FORMAT(v)       ((v) << 29)
+#define MANAGEMENT_ORB_NOTIFY                  ((1) << 31)
+
+#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)      ((v))
+#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)      ((v) << 16)
+
+struct sbp2_management_orb {
+       struct sbp2_orb base;
+       struct {
+               struct sbp2_pointer password;
+               struct sbp2_pointer response;
+               __be32 misc;
+               __be32 length;
+               struct sbp2_pointer status_fifo;
+       } request;
+       __be32 response[4];
+       dma_addr_t response_bus;
+       struct completion done;
+       struct sbp2_status status;
+};
+
+struct sbp2_login_response {
+       __be32 misc;
+       struct sbp2_pointer command_block_agent;
+       __be32 reconnect_hold;
+};
+#define COMMAND_ORB_DATA_SIZE(v)       ((v))
+#define COMMAND_ORB_PAGE_SIZE(v)       ((v) << 16)
+#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
+#define COMMAND_ORB_MAX_PAYLOAD(v)     ((v) << 20)
+#define COMMAND_ORB_SPEED(v)           ((v) << 24)
+#define COMMAND_ORB_DIRECTION          ((1) << 27)
+#define COMMAND_ORB_REQUEST_FORMAT(v)  ((v) << 29)
+#define COMMAND_ORB_NOTIFY             ((1) << 31)
+
+struct sbp2_command_orb {
+       struct sbp2_orb base;
+       struct {
+               struct sbp2_pointer next;
+               struct sbp2_pointer data_descriptor;
+               __be32 misc;
+               u8 command_block[12];
+       } request;
+       struct scsi_cmnd *cmd;
+       scsi_done_fn_t done;
+       struct sbp2_logical_unit *lu;
+
+       struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
+       dma_addr_t page_table_bus;
+};
+
+#define SBP2_ROM_VALUE_WILDCARD ~0         /* match all */
+#define SBP2_ROM_VALUE_MISSING  0xff000000 /* not present in the unit dir. */
+
+/*
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best
+ * indicator for the type of bridge chip of a device.  It yields a few
+ * false positives but this did not break correctly behaving devices
+ * so far.
+ */
+static const struct {
+       u32 firmware_revision;
+       u32 model;
+       unsigned int workarounds;
+} sbp2_workarounds_table[] = {
+       /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
+               .firmware_revision      = 0x002800,
+               .model                  = 0x001010,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
+                                         SBP2_WORKAROUND_MODE_SENSE_8 |
+                                         SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
+               .firmware_revision      = 0x002800,
+               .model                  = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY |
+                                         SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* Initio bridges, actually only needed for some older ones */ {
+               .firmware_revision      = 0x000200,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
+       },
+       /* PL-3507 bridge with Prolific firmware */ {
+               .firmware_revision      = 0x012800,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* Symbios bridge */ {
+               .firmware_revision      = 0xa0b800,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+       /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
+               .firmware_revision      = 0x002600,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+       /*
+        * iPod 2nd generation: needs 128k max transfer size workaround
+        * iPod 3rd generation: needs fix capacity workaround
+        */
+       {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
+                                         SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod 4th generation */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000021,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod mini */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000022,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod mini */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000023,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod Photo */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x00007e,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       }
+};
+
+static void free_orb(struct kref *kref)
+{
+       struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
+
+       kfree(orb);
+}
+
+static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
+                             int tcode, int destination, int source,
+                             int generation, int speed,
+                             unsigned long long offset,
+                             void *payload, size_t length, void *callback_data)
+{
+       struct sbp2_logical_unit *lu = callback_data;
+       struct sbp2_orb *orb;
+       struct sbp2_status status;
+       size_t header_size;
+       unsigned long flags;
+
+       if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
+           length == 0 || length > sizeof(status)) {
+               fw_send_response(card, request, RCODE_TYPE_ERROR);
+               return;
+       }
+
+       header_size = min(length, 2 * sizeof(u32));
+       fw_memcpy_from_be32(&status, payload, header_size);
+       if (length > header_size)
+               memcpy(status.data, payload + 8, length - header_size);
+       if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
+               fw_notify("non-orb related status write, not handled\n");
+               fw_send_response(card, request, RCODE_COMPLETE);
+               return;
+       }
+
+       /* Lookup the orb corresponding to this status write. */
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(orb, &lu->orb_list, link) {
+               if (STATUS_GET_ORB_HIGH(status) == 0 &&
+                   STATUS_GET_ORB_LOW(status) == orb->request_bus) {
+                       orb->rcode = RCODE_COMPLETE;
+                       list_del(&orb->link);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&orb->link != &lu->orb_list)
+               orb->callback(orb, &status);
+       else
+               fw_error("status write for unknown orb\n");
+
+       kref_put(&orb->kref, free_orb);
+
+       fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static void complete_transaction(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct sbp2_orb *orb = data;
+       unsigned long flags;
+
+       /*
+        * This is a little tricky.  We can get the status write for
+        * the orb before we get this callback.  The status write
+        * handler above will assume the orb pointer transaction was
+        * successful and set the rcode to RCODE_COMPLETE for the orb.
+        * So this callback only sets the rcode if it hasn't already
+        * been set and only does the cleanup if the transaction
+        * failed and we didn't already get a status write.
+        */
+       spin_lock_irqsave(&card->lock, flags);
+
+       if (orb->rcode == -1)
+               orb->rcode = rcode;
+       if (orb->rcode != RCODE_COMPLETE) {
+               list_del(&orb->link);
+               spin_unlock_irqrestore(&card->lock, flags);
+               orb->callback(orb, NULL);
+       } else {
+               spin_unlock_irqrestore(&card->lock, flags);
+       }
+
+       kref_put(&orb->kref, free_orb);
+}
+
+static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
+                         int node_id, int generation, u64 offset)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       unsigned long flags;
+
+       orb->pointer.high = 0;
+       orb->pointer.low = cpu_to_be32(orb->request_bus);
+
+       spin_lock_irqsave(&device->card->lock, flags);
+       list_add_tail(&orb->link, &lu->orb_list);
+       spin_unlock_irqrestore(&device->card->lock, flags);
+
+       /* Take a ref for the orb list and for the transaction callback. */
+       kref_get(&orb->kref);
+       kref_get(&orb->kref);
+
+       fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
+                       node_id, generation, device->max_speed, offset,
+                       &orb->pointer, sizeof(orb->pointer),
+                       complete_transaction, orb);
+}
+
+static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       struct sbp2_orb *orb, *next;
+       struct list_head list;
+       unsigned long flags;
+       int retval = -ENOENT;
+
+       INIT_LIST_HEAD(&list);
+       spin_lock_irqsave(&device->card->lock, flags);
+       list_splice_init(&lu->orb_list, &list);
+       spin_unlock_irqrestore(&device->card->lock, flags);
+
+       list_for_each_entry_safe(orb, next, &list, link) {
+               retval = 0;
+               if (fw_cancel_transaction(device->card, &orb->t) == 0)
+                       continue;
+
+               orb->rcode = RCODE_CANCELLED;
+               orb->callback(orb, NULL);
+       }
+
+       return retval;
+}
+
+static void complete_management_orb(struct sbp2_orb *base_orb,
+                                   struct sbp2_status *status)
+{
+       struct sbp2_management_orb *orb =
+               container_of(base_orb, struct sbp2_management_orb, base);
+
+       if (status)
+               memcpy(&orb->status, status, sizeof(*status));
+       complete(&orb->done);
+}
+
+static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+                                   int generation, int function,
+                                   int lun_or_login_id, void *response)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       struct sbp2_management_orb *orb;
+       unsigned int timeout;
+       int retval = -ENOMEM;
+
+       if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
+               return 0;
+
+       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+       if (orb == NULL)
+               return -ENOMEM;
+
+       kref_init(&orb->base.kref);
+       orb->response_bus =
+               dma_map_single(device->card->device, &orb->response,
+                              sizeof(orb->response), DMA_FROM_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->response_bus))
+               goto fail_mapping_response;
+
+       orb->request.response.high = 0;
+       orb->request.response.low  = cpu_to_be32(orb->response_bus);
+
+       orb->request.misc = cpu_to_be32(
+               MANAGEMENT_ORB_NOTIFY |
+               MANAGEMENT_ORB_FUNCTION(function) |
+               MANAGEMENT_ORB_LUN(lun_or_login_id));
+       orb->request.length = cpu_to_be32(
+               MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
+
+       orb->request.status_fifo.high =
+               cpu_to_be32(lu->address_handler.offset >> 32);
+       orb->request.status_fifo.low  =
+               cpu_to_be32(lu->address_handler.offset);
+
+       if (function == SBP2_LOGIN_REQUEST) {
+               /* Ask for 2^2 == 4 seconds reconnect grace period */
+               orb->request.misc |= cpu_to_be32(
+                       MANAGEMENT_ORB_RECONNECT(2) |
+                       MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
+               timeout = lu->tgt->mgt_orb_timeout;
+       } else {
+               timeout = SBP2_ORB_TIMEOUT;
+       }
+
+       init_completion(&orb->done);
+       orb->base.callback = complete_management_orb;
+
+       orb->base.request_bus =
+               dma_map_single(device->card->device, &orb->request,
+                              sizeof(orb->request), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
+               goto fail_mapping_request;
+
+       sbp2_send_orb(&orb->base, lu, node_id, generation,
+                     lu->tgt->management_agent_address);
+
+       wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
+
+       retval = -EIO;
+       if (sbp2_cancel_orbs(lu) == 0) {
+               fw_error("%s: orb reply timed out, rcode=0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
+               goto out;
+       }
+
+       if (orb->base.rcode != RCODE_COMPLETE) {
+               fw_error("%s: management write failed, rcode 0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
+               goto out;
+       }
+
+       if (STATUS_GET_RESPONSE(orb->status) != 0 ||
+           STATUS_GET_SBP_STATUS(orb->status) != 0) {
+               fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
+                        STATUS_GET_RESPONSE(orb->status),
+                        STATUS_GET_SBP_STATUS(orb->status));
+               goto out;
+       }
+
+       retval = 0;
+ out:
+       dma_unmap_single(device->card->device, orb->base.request_bus,
+                        sizeof(orb->request), DMA_TO_DEVICE);
+ fail_mapping_request:
+       dma_unmap_single(device->card->device, orb->response_bus,
+                        sizeof(orb->response), DMA_FROM_DEVICE);
+ fail_mapping_response:
+       if (response)
+               memcpy(response, orb->response, sizeof(orb->response));
+       kref_put(&orb->base.kref, free_orb);
+
+       return retval;
+}
+
+static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       __be32 d = 0;
+
+       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
+                          lu->tgt->node_id, lu->generation, device->max_speed,
+                          lu->command_block_agent_address + SBP2_AGENT_RESET,
+                          &d, sizeof(d));
+}
+
+static void complete_agent_reset_write_no_wait(struct fw_card *card,
+               int rcode, void *payload, size_t length, void *data)
+{
+       kfree(data);
+}
+
+static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       struct fw_transaction *t;
+       static __be32 d;
+
+       t = kmalloc(sizeof(*t), GFP_ATOMIC);
+       if (t == NULL)
+               return;
+
+       fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
+                       lu->tgt->node_id, lu->generation, device->max_speed,
+                       lu->command_block_agent_address + SBP2_AGENT_RESET,
+                       &d, sizeof(d), complete_agent_reset_write_no_wait, t);
+}
+
+static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+{
+       /*
+        * We may access dont_block without taking card->lock here:
+        * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
+        * are currently serialized against each other.
+        * And a wrong result in sbp2_conditionally_block()'s access of
+        * dont_block is rather harmless, it simply misses its first chance.
+        */
+       --lu->tgt->dont_block;
+}
+
+/*
+ * Blocks lu->tgt if all of the following conditions are met:
+ *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
+ *     logical units have been finished (indicated by dont_block == 0).
+ *   - lu->generation is stale.
+ *
+ * Note, scsi_block_requests() must be called while holding card->lock,
+ * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
+ * unblock the target.
+ */
+static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (!tgt->dont_block && !lu->blocked &&
+           lu->generation != card->generation) {
+               lu->blocked = true;
+               if (++tgt->blocked == 1)
+                       scsi_block_requests(shost);
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/*
+ * Unblocks lu->tgt as soon as all its logical units can be unblocked.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+       bool unblock = false;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (lu->blocked && lu->generation == card->generation) {
+               lu->blocked = false;
+               unblock = --tgt->blocked == 0;
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (unblock)
+               scsi_unblock_requests(shost);
+}
+
+/*
+ * Prevents future blocking of tgt and unblocks it.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_unblock(struct sbp2_target *tgt)
+{
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       ++tgt->dont_block;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       scsi_unblock_requests(shost);
+}
+
+static int sbp2_lun2int(u16 lun)
+{
+       struct scsi_lun eight_bytes_lun;
+
+       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
+       eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
+       eight_bytes_lun.scsi_lun[1] = lun & 0xff;
+
+       return scsilun_to_int(&eight_bytes_lun);
+}
+
+static void sbp2_release_target(struct kref *kref)
+{
+       struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
+       struct sbp2_logical_unit *lu, *next;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       struct scsi_device *sdev;
+       struct fw_device *device = fw_device(tgt->unit->device.parent);
+
+       /* prevent deadlocks */
+       sbp2_unblock(tgt);
+
+       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+               sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+               if (sdev) {
+                       scsi_remove_device(sdev);
+                       scsi_device_put(sdev);
+               }
+               if (lu->login_id != INVALID_LOGIN_ID) {
+                       int generation, node_id;
+                       /*
+                        * tgt->node_id may be obsolete here if we failed
+                        * during initial login or after a bus reset where
+                        * the topology changed.
+                        */
+                       generation = device->generation;
+                       smp_rmb(); /* node_id vs. generation */
+                       node_id    = device->node_id;
+                       sbp2_send_management_orb(lu, node_id, generation,
+                                                SBP2_LOGOUT_REQUEST,
+                                                lu->login_id, NULL);
+               }
+               fw_core_remove_address_handler(&lu->address_handler);
+               list_del(&lu->link);
+               kfree(lu);
+       }
+       scsi_remove_host(shost);
+       fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
+
+       fw_unit_put(tgt->unit);
+       scsi_host_put(shost);
+       fw_device_put(device);
+}
+
+static struct workqueue_struct *sbp2_wq;
+
+static void sbp2_target_put(struct sbp2_target *tgt)
+{
+       kref_put(&tgt->kref, sbp2_release_target);
+}
+
+/*
+ * Always get the target's kref when scheduling work on one its units.
+ * Each workqueue job is responsible to call sbp2_target_put() upon return.
+ */
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+       kref_get(&lu->tgt->kref);
+       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
+               sbp2_target_put(lu->tgt);
+}
+
+/*
+ * Write retransmit retry values into the BUSY_TIMEOUT register.
+ * - The single-phase retry protocol is supported by all SBP-2 devices, but the
+ *   default retry_limit value is 0 (i.e. never retry transmission). We write a
+ *   saner value after logging into the device.
+ * - The dual-phase retry protocol is optional to implement, and if not
+ *   supported, writes to the dual-phase portion of the register will be
+ *   ignored. We try to write the original 1394-1995 default here.
+ * - In the case of devices that are also SBP-3-compliant, all writes are
+ *   ignored, as the register is read-only, but contains single-phase retry of
+ *   15, which is what we're trying to set for all SBP-2 device anyway, so this
+ *   write attempt is safe and yields more consistent behavior for all devices.
+ *
+ * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
+ * and section 6.4 of the SBP-3 spec for further details.
+ */
+static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
+
+       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
+                          lu->tgt->node_id, lu->generation, device->max_speed,
+                          CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
+                          &d, sizeof(d));
+}
+
+static void sbp2_reconnect(struct work_struct *work);
+
+static void sbp2_login(struct work_struct *work)
+{
+       struct sbp2_logical_unit *lu =
+               container_of(work, struct sbp2_logical_unit, work.work);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = fw_device(tgt->unit->device.parent);
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct sbp2_login_response response;
+       int generation, node_id, local_node_id;
+
+       if (fw_device_is_shutdown(device))
+               goto out;
+
+       generation    = device->generation;
+       smp_rmb();    /* node IDs must not be older than generation */
+       node_id       = device->node_id;
+       local_node_id = device->card->node_id;
+
+       /* If this is a re-login attempt, log out, or we might be rejected. */
+       if (lu->has_sdev)
+               sbp2_send_management_orb(lu, device->node_id, generation,
+                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+
+       if (sbp2_send_management_orb(lu, node_id, generation,
+                               SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
+               if (lu->retries++ < 5) {
+                       sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+               } else {
+                       fw_error("%s: failed to login to LUN %04x\n",
+                                tgt->bus_id, lu->lun);
+                       /* Let any waiting I/O fail from now on. */
+                       sbp2_unblock(lu->tgt);
+               }
+               goto out;
+       }
+
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       smp_wmb();        /* node IDs must not be older than generation */
+       lu->generation    = generation;
+
+       lu->command_block_agent_address =
+               ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
+                     << 32) | be32_to_cpu(response.command_block_agent.low);
+       lu->login_id = be32_to_cpu(response.misc) & 0xffff;
+
+       fw_notify("%s: logged in to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
+
+       /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
+       sbp2_set_busy_timeout(lu);
+
+       PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+       sbp2_agent_reset(lu);
+
+       /* This was a re-login. */
+       if (lu->has_sdev) {
+               sbp2_cancel_orbs(lu);
+               sbp2_conditionally_unblock(lu);
+               goto out;
+       }
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
+               ssleep(SBP2_INQUIRY_DELAY);
+
+       shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
+       /*
+        * FIXME:  We are unable to perform reconnects while in sbp2_login().
+        * Therefore __scsi_add_device() will get into trouble if a bus reset
+        * happens in parallel.  It will either fail or leave us with an
+        * unusable sdev.  As a workaround we check for this and retry the
+        * whole login and SCSI probing.
+        */
+
+       /* Reported error during __scsi_add_device() */
+       if (IS_ERR(sdev))
+               goto out_logout_login;
+
+       /* Unreported error during __scsi_add_device() */
+       smp_rmb(); /* get current card generation */
+       if (generation != device->card->generation) {
+               scsi_remove_device(sdev);
+               scsi_device_put(sdev);
+               goto out_logout_login;
+       }
+
+       /* No error during __scsi_add_device() */
+       lu->has_sdev = true;
+       scsi_device_put(sdev);
+       sbp2_allow_block(lu);
+       goto out;
+
+ out_logout_login:
+       smp_rmb(); /* generation may have changed */
+       generation = device->generation;
+       smp_rmb(); /* node_id must not be older than generation */
+
+       sbp2_send_management_orb(lu, device->node_id, generation,
+                                SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+       /*
+        * If a bus reset happened, sbp2_update will have requeued
+        * lu->work already.  Reset the work from reconnect to login.
+        */
+       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ out:
+       sbp2_target_put(tgt);
+}
+
+static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+{
+       struct sbp2_logical_unit *lu;
+
+       lu = kmalloc(sizeof(*lu), GFP_KERNEL);
+       if (!lu)
+               return -ENOMEM;
+
+       lu->address_handler.length           = 0x100;
+       lu->address_handler.address_callback = sbp2_status_write;
+       lu->address_handler.callback_data    = lu;
+
+       if (fw_core_add_address_handler(&lu->address_handler,
+                                       &fw_high_memory_region) < 0) {
+               kfree(lu);
+               return -ENOMEM;
+       }
+
+       lu->tgt      = tgt;
+       lu->lun      = lun_entry & 0xffff;
+       lu->login_id = INVALID_LOGIN_ID;
+       lu->retries  = 0;
+       lu->has_sdev = false;
+       lu->blocked  = false;
+       ++tgt->dont_block;
+       INIT_LIST_HEAD(&lu->orb_list);
+       INIT_DELAYED_WORK(&lu->work, sbp2_login);
+
+       list_add_tail(&lu->link, &tgt->lu_list);
+       return 0;
+}
+
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value))
+               if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
+                   sbp2_add_logical_unit(tgt, value) < 0)
+                       return -ENOMEM;
+       return 0;
+}
+
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+                             u32 *model, u32 *firmware_revision)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+       unsigned int timeout;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+
+               case CSR_DEPENDENT_INFO | CSR_OFFSET:
+                       tgt->management_agent_address =
+                                       CSR_REGISTER_BASE + 4 * value;
+                       break;
+
+               case CSR_DIRECTORY_ID:
+                       tgt->directory_id = value;
+                       break;
+
+               case CSR_MODEL:
+                       *model = value;
+                       break;
+
+               case SBP2_CSR_FIRMWARE_REVISION:
+                       *firmware_revision = value;
+                       break;
+
+               case SBP2_CSR_UNIT_CHARACTERISTICS:
+                       /* the timeout value is stored in 500ms units */
+                       timeout = ((unsigned int) value >> 8 & 0xff) * 500;
+                       timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
+                       tgt->mgt_orb_timeout =
+                                 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
+
+                       if (timeout > tgt->mgt_orb_timeout)
+                               fw_notify("%s: config rom contains %ds "
+                                         "management ORB timeout, limiting "
+                                         "to %ds\n", tgt->bus_id,
+                                         timeout / 1000,
+                                         tgt->mgt_orb_timeout / 1000);
+                       break;
+
+               case SBP2_CSR_LOGICAL_UNIT_NUMBER:
+                       if (sbp2_add_logical_unit(tgt, value) < 0)
+                               return -ENOMEM;
+                       break;
+
+               case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
+                       /* Adjust for the increment in the iterator */
+                       if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
+                               return -ENOMEM;
+                       break;
+               }
+       }
+       return 0;
+}
+
+static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
+                                 u32 firmware_revision)
+{
+       int i;
+       unsigned int w = sbp2_param_workarounds;
+
+       if (w)
+               fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
+                         "if you need the workarounds parameter for %s\n",
+                         tgt->bus_id);
+
+       if (w & SBP2_WORKAROUND_OVERRIDE)
+               goto out;
+
+       for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+
+               if (sbp2_workarounds_table[i].firmware_revision !=
+                   (firmware_revision & 0xffffff00))
+                       continue;
+
+               if (sbp2_workarounds_table[i].model != model &&
+                   sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
+                       continue;
+
+               w |= sbp2_workarounds_table[i].workarounds;
+               break;
+       }
+ out:
+       if (w)
+               fw_notify("Workarounds for %s: 0x%x "
+                         "(firmware_revision 0x%06x, model_id 0x%06x)\n",
+                         tgt->bus_id, w, firmware_revision, model);
+       tgt->workarounds = w;
+}
+
+static struct scsi_host_template scsi_driver_template;
+
+static int sbp2_probe(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_device *device = fw_device(unit->device.parent);
+       struct sbp2_target *tgt;
+       struct sbp2_logical_unit *lu;
+       struct Scsi_Host *shost;
+       u32 model, firmware_revision;
+
+       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
+               BUG_ON(dma_set_max_seg_size(device->card->device,
+                                           SBP2_MAX_SEG_SIZE));
+
+       shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
+       if (shost == NULL)
+               return -ENOMEM;
+
+       tgt = (struct sbp2_target *)shost->hostdata;
+       unit->device.driver_data = tgt;
+       tgt->unit = unit;
+       kref_init(&tgt->kref);
+       INIT_LIST_HEAD(&tgt->lu_list);
+       tgt->bus_id = dev_name(&unit->device);
+       tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+
+       if (fw_device_enable_phys_dma(device) < 0)
+               goto fail_shost_put;
+
+       if (scsi_add_host(shost, &unit->device) < 0)
+               goto fail_shost_put;
+
+       fw_device_get(device);
+       fw_unit_get(unit);
+
+       /* implicit directory ID */
+       tgt->directory_id = ((unit->directory - device->config_rom) * 4
+                            + CSR_CONFIG_ROM) & 0xffffff;
+
+       firmware_revision = SBP2_ROM_VALUE_MISSING;
+       model             = SBP2_ROM_VALUE_MISSING;
+
+       if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
+                              &firmware_revision) < 0)
+               goto fail_tgt_put;
+
+       sbp2_init_workarounds(tgt, model, firmware_revision);
+
+       /*
+        * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
+        * and so on up to 4096 bytes.  The SBP-2 max_payload field
+        * specifies the max payload size as 2 ^ (max_payload + 2), so
+        * if we set this to max_speed + 7, we get the right value.
+        */
+       tgt->max_payload = min(device->max_speed + 7, 10U);
+       tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+
+       /* Do the login in a workqueue so we can easily reschedule retries. */
+       list_for_each_entry(lu, &tgt->lu_list, link)
+               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+       return 0;
+
+ fail_tgt_put:
+       sbp2_target_put(tgt);
+       return -ENOMEM;
+
+ fail_shost_put:
+       scsi_host_put(shost);
+       return -ENOMEM;
+}
+
+static int sbp2_remove(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct sbp2_target *tgt = unit->device.driver_data;
+
+       sbp2_target_put(tgt);
+       return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+       struct sbp2_logical_unit *lu =
+               container_of(work, struct sbp2_logical_unit, work.work);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = fw_device(tgt->unit->device.parent);
+       int generation, node_id, local_node_id;
+
+       if (fw_device_is_shutdown(device))
+               goto out;
+
+       generation    = device->generation;
+       smp_rmb();    /* node IDs must not be older than generation */
+       node_id       = device->node_id;
+       local_node_id = device->card->node_id;
+
+       if (sbp2_send_management_orb(lu, node_id, generation,
+                                    SBP2_RECONNECT_REQUEST,
+                                    lu->login_id, NULL) < 0) {
+               /*
+                * If reconnect was impossible even though we are in the
+                * current generation, fall back and try to log in again.
+                *
+                * We could check for "Function rejected" status, but
+                * looking at the bus generation as simpler and more general.
+                */
+               smp_rmb(); /* get current card generation */
+               if (generation == device->card->generation ||
+                   lu->retries++ >= 5) {
+                       fw_error("%s: failed to reconnect\n", tgt->bus_id);
+                       lu->retries = 0;
+                       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+               }
+               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+               goto out;
+       }
+
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       smp_wmb();        /* node IDs must not be older than generation */
+       lu->generation    = generation;
+
+       fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
+
+       sbp2_agent_reset(lu);
+       sbp2_cancel_orbs(lu);
+       sbp2_conditionally_unblock(lu);
+ out:
+       sbp2_target_put(tgt);
+}
+
+static void sbp2_update(struct fw_unit *unit)
+{
+       struct sbp2_target *tgt = unit->device.driver_data;
+       struct sbp2_logical_unit *lu;
+
+       fw_device_enable_phys_dma(fw_device(unit->device.parent));
+
+       /*
+        * Fw-core serializes sbp2_update() against sbp2_remove().
+        * Iteration over tgt->lu_list is therefore safe here.
+        */
+       list_for_each_entry(lu, &tgt->lu_list, link) {
+               sbp2_conditionally_block(lu);
+               lu->retries = 0;
+               sbp2_queue_work(lu, 0);
+       }
+}
+
+#define SBP2_UNIT_SPEC_ID_ENTRY        0x0000609e
+#define SBP2_SW_VERSION_ENTRY  0x00010483
+
+static const struct ieee1394_device_id sbp2_id_table[] = {
+       {
+               .match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
+                               IEEE1394_MATCH_VERSION,
+               .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
+               .version      = SBP2_SW_VERSION_ENTRY,
+       },
+       { }
+};
+
+static struct fw_driver sbp2_driver = {
+       .driver   = {
+               .owner  = THIS_MODULE,
+               .name   = sbp2_driver_name,
+               .bus    = &fw_bus_type,
+               .probe  = sbp2_probe,
+               .remove = sbp2_remove,
+       },
+       .update   = sbp2_update,
+       .id_table = sbp2_id_table,
+};
+
+static void sbp2_unmap_scatterlist(struct device *card_device,
+                                  struct sbp2_command_orb *orb)
+{
+       if (scsi_sg_count(orb->cmd))
+               dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
+                            scsi_sg_count(orb->cmd),
+                            orb->cmd->sc_data_direction);
+
+       if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
+               dma_unmap_single(card_device, orb->page_table_bus,
+                                sizeof(orb->page_table), DMA_TO_DEVICE);
+}
+
+static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+{
+       int sam_status;
+
+       sense_data[0] = 0x70;
+       sense_data[1] = 0x0;
+       sense_data[2] = sbp2_status[1];
+       sense_data[3] = sbp2_status[4];
+       sense_data[4] = sbp2_status[5];
+       sense_data[5] = sbp2_status[6];
+       sense_data[6] = sbp2_status[7];
+       sense_data[7] = 10;
+       sense_data[8] = sbp2_status[8];
+       sense_data[9] = sbp2_status[9];
+       sense_data[10] = sbp2_status[10];
+       sense_data[11] = sbp2_status[11];
+       sense_data[12] = sbp2_status[2];
+       sense_data[13] = sbp2_status[3];
+       sense_data[14] = sbp2_status[12];
+       sense_data[15] = sbp2_status[13];
+
+       sam_status = sbp2_status[0] & 0x3f;
+
+       switch (sam_status) {
+       case SAM_STAT_GOOD:
+       case SAM_STAT_CHECK_CONDITION:
+       case SAM_STAT_CONDITION_MET:
+       case SAM_STAT_BUSY:
+       case SAM_STAT_RESERVATION_CONFLICT:
+       case SAM_STAT_COMMAND_TERMINATED:
+               return DID_OK << 16 | sam_status;
+
+       default:
+               return DID_ERROR << 16;
+       }
+}
+
+static void complete_command_orb(struct sbp2_orb *base_orb,
+                                struct sbp2_status *status)
+{
+       struct sbp2_command_orb *orb =
+               container_of(base_orb, struct sbp2_command_orb, base);
+       struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
+       int result;
+
+       if (status != NULL) {
+               if (STATUS_GET_DEAD(*status))
+                       sbp2_agent_reset_no_wait(orb->lu);
+
+               switch (STATUS_GET_RESPONSE(*status)) {
+               case SBP2_STATUS_REQUEST_COMPLETE:
+                       result = DID_OK << 16;
+                       break;
+               case SBP2_STATUS_TRANSPORT_FAILURE:
+                       result = DID_BUS_BUSY << 16;
+                       break;
+               case SBP2_STATUS_ILLEGAL_REQUEST:
+               case SBP2_STATUS_VENDOR_DEPENDENT:
+               default:
+                       result = DID_ERROR << 16;
+                       break;
+               }
+
+               if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
+                       result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
+                                                          orb->cmd->sense_buffer);
+       } else {
+               /*
+                * If the orb completes with status == NULL, something
+                * went wrong, typically a bus reset happened mid-orb
+                * or when sending the write (less likely).
+                */
+               result = DID_BUS_BUSY << 16;
+               sbp2_conditionally_block(orb->lu);
+       }
+
+       dma_unmap_single(device->card->device, orb->base.request_bus,
+                        sizeof(orb->request), DMA_TO_DEVICE);
+       sbp2_unmap_scatterlist(device->card->device, orb);
+
+       orb->cmd->result = result;
+       orb->done(orb->cmd);
+}
+
+static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
+               struct fw_device *device, struct sbp2_logical_unit *lu)
+{
+       struct scatterlist *sg = scsi_sglist(orb->cmd);
+       int i, n;
+
+       n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
+                      orb->cmd->sc_data_direction);
+       if (n == 0)
+               goto fail;
+
+       /*
+        * Handle the special case where there is only one element in
+        * the scatter list by converting it to an immediate block
+        * request. This is also a workaround for broken devices such
+        * as the second generation iPod which doesn't support page
+        * tables.
+        */
+       if (n == 1) {
+               orb->request.data_descriptor.high =
+                       cpu_to_be32(lu->tgt->address_high);
+               orb->request.data_descriptor.low  =
+                       cpu_to_be32(sg_dma_address(sg));
+               orb->request.misc |=
+                       cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
+               return 0;
+       }
+
+       for_each_sg(sg, sg, n, i) {
+               orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
+               orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
+       }
+
+       orb->page_table_bus =
+               dma_map_single(device->card->device, orb->page_table,
+                              sizeof(orb->page_table), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->page_table_bus))
+               goto fail_page_table;
+
+       /*
+        * The data_descriptor pointer is the one case where we need
+        * to fill in the node ID part of the address.  All other
+        * pointers assume that the data referenced reside on the
+        * initiator (i.e. us), but data_descriptor can refer to data
+        * on other nodes so we need to put our ID in descriptor.high.
+        */
+       orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
+       orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
+       orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
+                                        COMMAND_ORB_DATA_SIZE(n));
+
+       return 0;
+
+ fail_page_table:
+       dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+                    scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
+ fail:
+       return -ENOMEM;
+}
+
+/* SCSI stack integration */
+
+static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+{
+       struct sbp2_logical_unit *lu = cmd->device->hostdata;
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       struct sbp2_command_orb *orb;
+       int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
+
+       /*
+        * Bidirectional commands are not yet implemented, and unknown
+        * transfer direction not handled.
+        */
+       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
+               fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
+               cmd->result = DID_ERROR << 16;
+               done(cmd);
+               return 0;
+       }
+
+       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+       if (orb == NULL) {
+               fw_notify("failed to alloc orb\n");
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       /* Initialize rcode to something not RCODE_COMPLETE. */
+       orb->base.rcode = -1;
+       kref_init(&orb->base.kref);
+
+       orb->lu   = lu;
+       orb->done = done;
+       orb->cmd  = cmd;
+
+       orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
+       orb->request.misc = cpu_to_be32(
+               COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
+               COMMAND_ORB_SPEED(device->max_speed) |
+               COMMAND_ORB_NOTIFY);
+
+       if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+               orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
+
+       generation = device->generation;
+       smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */
+
+       if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
+               goto out;
+
+       memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
+
+       orb->base.callback = complete_command_orb;
+       orb->base.request_bus =
+               dma_map_single(device->card->device, &orb->request,
+                              sizeof(orb->request), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
+               sbp2_unmap_scatterlist(device->card->device, orb);
+               goto out;
+       }
+
+       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
+                     lu->command_block_agent_address + SBP2_ORB_POINTER);
+       retval = 0;
+ out:
+       kref_put(&orb->base.kref, free_orb);
+       return retval;
+}
+
+static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+{
+       struct sbp2_logical_unit *lu = sdev->hostdata;
+
+       /* (Re-)Adding logical units via the SCSI stack is not supported. */
+       if (!lu)
+               return -ENOSYS;
+
+       sdev->allow_restart = 1;
+
+       /* SBP-2 requires quadlet alignment of the data buffers. */
+       blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+               sdev->inquiry_len = 36;
+
+       return 0;
+}
+
+static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+{
+       struct sbp2_logical_unit *lu = sdev->hostdata;
+
+       sdev->use_10_for_rw = 1;
+
+       if (sbp2_param_exclusive_login)
+               sdev->manage_start_stop = 1;
+
+       if (sdev->type == TYPE_ROM)
+               sdev->use_10_for_ms = 1;
+
+       if (sdev->type == TYPE_DISK &&
+           lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+               sdev->skip_ms_page_8 = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
+               sdev->start_stop_pwr_cond = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
+               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+
+       blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
+
+       return 0;
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.  Usually
+ * called when a command has timed-out for some reason.
+ */
+static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
+{
+       struct sbp2_logical_unit *lu = cmd->device->hostdata;
+
+       fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
+       sbp2_agent_reset(lu);
+       sbp2_cancel_orbs(lu);
+
+       return SUCCESS;
+}
+
+/*
+ * Format of /sys/bus/scsi/devices/.../ieee1394_id:
+ * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
+ *
+ * This is the concatenation of target port identifier and logical unit
+ * identifier as per SAM-2...SAM-4 annex A.
+ */
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct sbp2_logical_unit *lu;
+
+       if (!sdev)
+               return 0;
+
+       lu = sdev->hostdata;
+
+       return sprintf(buf, "%016llx:%06x:%04x\n",
+                       (unsigned long long)lu->tgt->guid,
+                       lu->tgt->directory_id, lu->lun);
+}
+
+static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
+
+static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
+       &dev_attr_ieee1394_id,
+       NULL
+};
+
+static struct scsi_host_template scsi_driver_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "SBP-2 IEEE-1394",
+       .proc_name              = sbp2_driver_name,
+       .queuecommand           = sbp2_scsi_queuecommand,
+       .slave_alloc            = sbp2_scsi_slave_alloc,
+       .slave_configure        = sbp2_scsi_slave_configure,
+       .eh_abort_handler       = sbp2_scsi_abort,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .cmd_per_lun            = 1,
+       .can_queue              = 1,
+       .sdev_attrs             = sbp2_scsi_sysfs_attrs,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("SCSI over IEEE1394");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_SBP2_MODULE
+MODULE_ALIAS("sbp2");
+#endif
+
+static int __init sbp2_init(void)
+{
+       sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
+       if (!sbp2_wq)
+               return -ENOMEM;
+
+       return driver_register(&sbp2_driver.driver);
+}
+
+static void __exit sbp2_cleanup(void)
+{
+       driver_unregister(&sbp2_driver.driver);
+       destroy_workqueue(sbp2_wq);
+}
+
+module_init(sbp2_init);
+module_exit(sbp2_cleanup);