2 * PCI Backend Operations - respond to PCI requests from Frontend
4 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 #include <linux/module.h>
7 #include <linux/wait.h>
8 #include <linux/bitops.h>
10 #include <xen/events.h>
12 #include <xen/evtchn.h>
14 #include <linux/sched.h>
18 module_param(verbose_request, int, 0644);
21 static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
23 /* Ensure a device is has the fake IRQ handler "turned on/off" and is
24 * ready to be exported. This MUST be run after xen_pcibk_reset_device
25 * which does the actual PCI device enable/disable.
27 static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
29 struct xen_pcibk_dev_data *dev_data;
33 dev_data = pci_get_drvdata(dev);
37 /* We don't deal with bridges */
38 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
42 dev_data->enable_intx = 0;
43 dev_data->ack_intr = 0;
45 enable = dev_data->enable_intx;
47 /* Asked to disable, but ISR isn't runnig */
48 if (!enable && !dev_data->isr_on)
51 /* Squirrel away the IRQs in the dev_data. We need this
52 * b/c when device transitions to MSI, the dev->irq is
53 * overwritten with the MSI vector.
56 dev_data->irq = dev->irq;
59 * SR-IOV devices in all use MSI-X and have no legacy
60 * interrupts, so inhibit creating a fake IRQ handler for them.
62 if (dev_data->irq == 0)
65 dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
68 pci_is_enabled(dev) ? "on" : "off",
69 dev->msi_enabled ? "MSI" : "",
70 dev->msix_enabled ? "MSI/X" : "",
71 dev_data->isr_on ? "enable" : "disable",
72 enable ? "enable" : "disable");
75 rc = request_irq(dev_data->irq,
76 xen_pcibk_guest_interrupt, IRQF_SHARED,
77 dev_data->irq_name, dev);
79 dev_err(&dev->dev, "%s: failed to install fake IRQ " \
80 "handler for IRQ %d! (rc:%d)\n",
81 dev_data->irq_name, dev_data->irq, rc);
85 free_irq(dev_data->irq, dev);
88 dev_data->isr_on = enable;
89 dev_data->ack_intr = enable;
91 dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
94 pci_is_enabled(dev) ? "on" : "off",
95 dev->msi_enabled ? "MSI" : "",
96 dev->msix_enabled ? "MSI/X" : "",
97 enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
98 (dev_data->isr_on ? "failed to disable" : "disabled"));
102 /* Ensure a device is "turned off" and ready to be exported.
103 * (Also see xen_pcibk_config_reset to ensure virtual configuration space is
104 * ready to be re-exported)
106 void xen_pcibk_reset_device(struct pci_dev *dev)
111 xen_pcibk_control_isr(dev, 1 /* reset device */);
114 /* Disable devices (but not bridges) */
115 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
116 #ifdef CONFIG_PCI_MSI
117 /* The guest could have been abruptly killed without
118 * disabling MSI/MSI-X interrupts.*/
119 if (dev->msix_enabled)
120 pci_disable_msix(dev);
121 if (dev->msi_enabled)
122 pci_disable_msi(dev);
124 pci_disable_device(dev);
126 pci_write_config_word(dev, PCI_COMMAND, 0);
129 atomic_set(&dev->enable_cnt, 0);
131 dev->is_busmaster = 0;
133 pci_read_config_word(dev, PCI_COMMAND, &cmd);
134 if (cmd & (PCI_COMMAND_INVALIDATE)) {
135 cmd &= ~(PCI_COMMAND_INVALIDATE);
136 pci_write_config_word(dev, PCI_COMMAND, cmd);
138 dev->is_busmaster = 0;
143 #ifdef CONFIG_PCI_MSI
145 int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
146 struct pci_dev *dev, struct xen_pci_op *op)
149 struct xen_pcibk_dev_data *dev_data;
151 int otherend = pdev->xdev->otherend_id;
154 if (unlikely(verbose_request))
155 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
157 status = pci_enable_msi(dev);
160 printk(KERN_ERR "error enable msi for guest %x status %x\n",
163 return XEN_PCI_ERR_op_failed;
166 /* The value the guest needs is actually the IDT vector, not the
167 * the local domain's IRQ number. */
170 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
172 op->value = dev->irq;
174 if (unlikely(verbose_request))
175 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
179 dev_data = pci_get_drvdata(dev);
181 dev_data->ack_intr = 0;
188 int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
189 struct pci_dev *dev, struct xen_pci_op *op)
192 struct xen_pcibk_dev_data *dev_data;
195 if (unlikely(verbose_request))
196 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
198 pci_disable_msi(dev);
201 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
203 op->value = dev->irq;
205 if (unlikely(verbose_request))
206 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
209 dev_data = pci_get_drvdata(dev);
211 dev_data->ack_intr = 1;
217 int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
218 struct pci_dev *dev, struct xen_pci_op *op)
221 struct xen_pcibk_dev_data *dev_data;
224 struct msix_entry *entries;
226 if (unlikely(verbose_request))
227 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
229 if (op->value > SH_INFO_MAX_VEC)
232 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
236 for (i = 0; i < op->value; i++) {
237 entries[i].entry = op->msix_entries[i].entry;
238 entries[i].vector = op->msix_entries[i].vector;
241 result = pci_enable_msix(dev, entries, op->value);
244 for (i = 0; i < op->value; i++) {
245 op->msix_entries[i].entry = entries[i].entry;
247 if (entries[i].vector)
248 op->msix_entries[i].vector =
249 xen_pirq_from_irq(entries[i].vector);
251 op->msix_entries[i].vector = entries[i].vector;
253 if (unlikely(verbose_request))
254 printk(KERN_DEBUG DRV_NAME ": %s: " \
257 op->msix_entries[i].vector);
260 printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
261 pci_name(dev), result);
267 dev_data = pci_get_drvdata(dev);
269 dev_data->ack_intr = 0;
272 return result > 0 ? 0 : result;
276 int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
277 struct pci_dev *dev, struct xen_pci_op *op)
280 struct xen_pcibk_dev_data *dev_data;
283 if (unlikely(verbose_request))
284 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
286 pci_disable_msix(dev);
290 * SR-IOV devices (which don't have any legacy IRQ) have
291 * an undefined IRQ value of zero.
293 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
294 if (unlikely(verbose_request))
295 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
297 dev_data = pci_get_drvdata(dev);
299 dev_data->ack_intr = 1;
301 op->value = dev->irq;
307 * Now the same evtchn is used for both pcifront conf_read_write request
308 * as well as pcie aer front end ack. We use a new work_queue to schedule
309 * xen_pcibk conf_read_write service for avoiding confict with aer_core
310 * do_recovery job which also use the system default work_queue
312 void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
314 /* Check that frontend is requesting an operation and that we are not
315 * already processing a request */
316 if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
317 && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
318 queue_work(xen_pcibk_wq, &pdev->op_work);
320 /*_XEN_PCIB_active should have been cleared by pcifront. And also make
321 sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
322 if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
323 && test_bit(_PCIB_op_pending, &pdev->flags)) {
324 wake_up(&xen_pcibk_aer_wait_queue);
328 /* Performing the configuration space reads/writes must not be done in atomic
329 * context because some of the pci_* functions can sleep (mostly due to ACPI
330 * use of semaphores). This function is intended to be called from a work
331 * queue in process context taking a struct xen_pcibk_device as a parameter */
333 void xen_pcibk_do_op(struct work_struct *data)
335 struct xen_pcibk_device *pdev =
336 container_of(data, struct xen_pcibk_device, op_work);
338 struct xen_pcibk_dev_data *dev_data = NULL;
339 struct xen_pci_op *op = &pdev->sh_info->op;
342 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
345 op->err = XEN_PCI_ERR_dev_not_found;
348 dev_data = pci_get_drvdata(dev);
350 test_intx = dev_data->enable_intx;
356 case XEN_PCI_OP_conf_read:
357 op->err = xen_pcibk_config_read(dev,
358 op->offset, op->size, &op->value);
360 case XEN_PCI_OP_conf_write:
361 op->err = xen_pcibk_config_write(dev,
362 op->offset, op->size, op->value);
364 #ifdef CONFIG_PCI_MSI
365 case XEN_PCI_OP_enable_msi:
366 op->err = xen_pcibk_enable_msi(pdev, dev, op);
368 case XEN_PCI_OP_disable_msi:
369 op->err = xen_pcibk_disable_msi(pdev, dev, op);
371 case XEN_PCI_OP_enable_msix:
372 op->err = xen_pcibk_enable_msix(pdev, dev, op);
374 case XEN_PCI_OP_disable_msix:
375 op->err = xen_pcibk_disable_msix(pdev, dev, op);
379 op->err = XEN_PCI_ERR_not_implemented;
384 if (!op->err && dev && dev_data) {
385 /* Transition detected */
386 if ((dev_data->enable_intx != test_intx))
387 xen_pcibk_control_isr(dev, 0 /* no reset */);
390 /* Tell the driver domain that we're done. */
392 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
393 notify_remote_via_irq(pdev->evtchn_irq);
395 /* Mark that we're done. */
396 smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
397 clear_bit(_PDEVF_op_active, &pdev->flags);
398 smp_mb__after_clear_bit(); /* /before/ final check for work */
400 /* Check to see if the driver domain tried to start another request in
401 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
403 xen_pcibk_test_and_schedule_op(pdev);
406 irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
408 struct xen_pcibk_device *pdev = dev_id;
410 xen_pcibk_test_and_schedule_op(pdev);
416 static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
418 struct pci_dev *dev = (struct pci_dev *)dev_id;
419 struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
421 if (dev_data->isr_on && dev_data->ack_intr) {
423 if ((dev_data->handled % 1000) == 0) {
424 if (xen_test_irq_shared(irq)) {
425 printk(KERN_INFO "%s IRQ line is not shared "
426 "with other domains. Turning ISR off\n",
428 dev_data->ack_intr = 0;