2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
28 const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31 EXPORT_SYMBOL_GPL(pci_power_names);
33 int isa_dma_bridge_buggy;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy);
37 EXPORT_SYMBOL(pci_pci_problems);
39 unsigned int pci_pm_d3_delay;
41 static void pci_pme_list_scan(struct work_struct *work);
43 static LIST_HEAD(pci_pme_list);
44 static DEFINE_MUTEX(pci_pme_list_mutex);
45 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47 struct pci_pme_device {
48 struct list_head list;
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
54 static void pci_dev_d3_sleep(struct pci_dev *dev)
56 unsigned int delay = dev->d3_delay;
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported = 1;
68 #define DEFAULT_CARDBUS_IO_SIZE (256)
69 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74 #define DEFAULT_HOTPLUG_IO_SIZE (256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
83 * The default CLS is used if arch didn't set CLS explicitly and not
84 * all pci devices agree on the same value. Arch can override either
85 * the dfl or actual value as it sees fit. Don't forget this is
86 * measured in 32-bit words, not bytes.
88 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
89 u8 pci_cache_line_size;
92 * If we set up a device for bus mastering, we need to check the latency
93 * timer as certain BIOSes forget to set it properly.
95 unsigned int pcibios_max_latency = 255;
97 /* If set, the PCIe ARI capability will not be used. */
98 static bool pcie_ari_disabled;
101 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
102 * @bus: pointer to PCI bus structure to search
104 * Given a PCI bus, returns the highest PCI bus number present in the set
105 * including the given PCI bus and its list of child PCI buses.
107 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
109 struct list_head *tmp;
110 unsigned char max, n;
112 max = bus->subordinate;
113 list_for_each(tmp, &bus->children) {
114 n = pci_bus_max_busnr(pci_bus_b(tmp));
120 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
122 #ifdef CONFIG_HAS_IOMEM
123 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
126 * Make sure the BAR is actually a memory resource, not an IO resource
128 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
132 return ioremap_nocache(pci_resource_start(pdev, bar),
133 pci_resource_len(pdev, bar));
135 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
140 * pci_max_busnr - returns maximum PCI bus number
142 * Returns the highest PCI bus number present in the system global list of
145 unsigned char __devinit
148 struct pci_bus *bus = NULL;
149 unsigned char max, n;
152 while ((bus = pci_find_next_bus(bus)) != NULL) {
153 n = pci_bus_max_busnr(bus);
162 #define PCI_FIND_CAP_TTL 48
164 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
165 u8 pos, int cap, int *ttl)
170 pci_bus_read_config_byte(bus, devfn, pos, &pos);
174 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
180 pos += PCI_CAP_LIST_NEXT;
185 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
188 int ttl = PCI_FIND_CAP_TTL;
190 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
193 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195 return __pci_find_next_cap(dev->bus, dev->devfn,
196 pos + PCI_CAP_LIST_NEXT, cap);
198 EXPORT_SYMBOL_GPL(pci_find_next_capability);
200 static int __pci_bus_find_cap_start(struct pci_bus *bus,
201 unsigned int devfn, u8 hdr_type)
205 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
206 if (!(status & PCI_STATUS_CAP_LIST))
210 case PCI_HEADER_TYPE_NORMAL:
211 case PCI_HEADER_TYPE_BRIDGE:
212 return PCI_CAPABILITY_LIST;
213 case PCI_HEADER_TYPE_CARDBUS:
214 return PCI_CB_CAPABILITY_LIST;
223 * pci_find_capability - query for devices' capabilities
224 * @dev: PCI device to query
225 * @cap: capability code
227 * Tell if a device supports a given PCI capability.
228 * Returns the address of the requested capability structure within the
229 * device's PCI configuration space or 0 in case the device does not
230 * support it. Possible values for @cap:
232 * %PCI_CAP_ID_PM Power Management
233 * %PCI_CAP_ID_AGP Accelerated Graphics Port
234 * %PCI_CAP_ID_VPD Vital Product Data
235 * %PCI_CAP_ID_SLOTID Slot Identification
236 * %PCI_CAP_ID_MSI Message Signalled Interrupts
237 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
238 * %PCI_CAP_ID_PCIX PCI-X
239 * %PCI_CAP_ID_EXP PCI Express
241 int pci_find_capability(struct pci_dev *dev, int cap)
245 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
253 * pci_bus_find_capability - query for devices' capabilities
254 * @bus: the PCI bus to query
255 * @devfn: PCI device to query
256 * @cap: capability code
258 * Like pci_find_capability() but works for pci devices that do not have a
259 * pci_dev structure set up yet.
261 * Returns the address of the requested capability structure within the
262 * device's PCI configuration space or 0 in case the device does not
265 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
270 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 pos = __pci_find_next_cap(bus, devfn, pos, cap);
280 * pci_find_ext_capability - Find an extended capability
281 * @dev: PCI device to query
282 * @cap: capability code
284 * Returns the address of the requested extended capability structure
285 * within the device's PCI configuration space or 0 if the device does
286 * not support it. Possible values for @cap:
288 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
289 * %PCI_EXT_CAP_ID_VC Virtual Channel
290 * %PCI_EXT_CAP_ID_DSN Device Serial Number
291 * %PCI_EXT_CAP_ID_PWR Power Budgeting
293 int pci_find_ext_capability(struct pci_dev *dev, int cap)
297 int pos = PCI_CFG_SPACE_SIZE;
299 /* minimum 8 bytes per capability */
300 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
302 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
305 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
309 * If we have no capabilities, this is indicated by cap ID,
310 * cap version and next pointer all being 0.
316 if (PCI_EXT_CAP_ID(header) == cap)
319 pos = PCI_EXT_CAP_NEXT(header);
320 if (pos < PCI_CFG_SPACE_SIZE)
323 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
329 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
332 * pci_bus_find_ext_capability - find an extended capability
333 * @bus: the PCI bus to query
334 * @devfn: PCI device to query
335 * @cap: capability code
337 * Like pci_find_ext_capability() but works for pci devices that do not have a
338 * pci_dev structure set up yet.
340 * Returns the address of the requested capability structure within the
341 * device's PCI configuration space or 0 in case the device does not
344 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
349 int pos = PCI_CFG_SPACE_SIZE;
351 /* minimum 8 bytes per capability */
352 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
354 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
356 if (header == 0xffffffff || header == 0)
360 if (PCI_EXT_CAP_ID(header) == cap)
363 pos = PCI_EXT_CAP_NEXT(header);
364 if (pos < PCI_CFG_SPACE_SIZE)
367 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
374 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
376 int rc, ttl = PCI_FIND_CAP_TTL;
379 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
380 mask = HT_3BIT_CAP_MASK;
382 mask = HT_5BIT_CAP_MASK;
384 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
385 PCI_CAP_ID_HT, &ttl);
387 rc = pci_read_config_byte(dev, pos + 3, &cap);
388 if (rc != PCIBIOS_SUCCESSFUL)
391 if ((cap & mask) == ht_cap)
394 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
395 pos + PCI_CAP_LIST_NEXT,
396 PCI_CAP_ID_HT, &ttl);
402 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
403 * @dev: PCI device to query
404 * @pos: Position from which to continue searching
405 * @ht_cap: Hypertransport capability code
407 * To be used in conjunction with pci_find_ht_capability() to search for
408 * all capabilities matching @ht_cap. @pos should always be a value returned
409 * from pci_find_ht_capability().
411 * NB. To be 100% safe against broken PCI devices, the caller should take
412 * steps to avoid an infinite loop.
414 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
416 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
418 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
421 * pci_find_ht_capability - query a device's Hypertransport capabilities
422 * @dev: PCI device to query
423 * @ht_cap: Hypertransport capability code
425 * Tell if a device supports a given Hypertransport capability.
426 * Returns an address within the device's PCI configuration space
427 * or 0 in case the device does not support the request capability.
428 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
429 * which has a Hypertransport capability matching @ht_cap.
431 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
435 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
437 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
441 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
444 * pci_find_parent_resource - return resource region of parent bus of given region
445 * @dev: PCI device structure contains resources to be searched
446 * @res: child resource record for which parent is sought
448 * For given resource region of given device, return the resource
449 * region of parent bus the given region is contained in or where
450 * it should be allocated from.
453 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
455 const struct pci_bus *bus = dev->bus;
457 struct resource *best = NULL, *r;
459 pci_bus_for_each_resource(bus, r, i) {
462 if (res->start && !(res->start >= r->start && res->end <= r->end))
463 continue; /* Not contained */
464 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
465 continue; /* Wrong type */
466 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
467 return r; /* Exact match */
468 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
469 if (r->flags & IORESOURCE_PREFETCH)
471 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
479 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
480 * @dev: PCI device to have its BARs restored
482 * Restore the BAR values for a given device, so as to make it
483 * accessible by its driver.
488 EXPORT_SYMBOL_GPL(pci_restore_bars);
491 pci_restore_bars(struct pci_dev *dev)
495 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
496 pci_update_resource(dev, i);
499 static struct pci_platform_pm_ops *pci_platform_pm;
501 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
503 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
504 || !ops->sleep_wake || !ops->can_wakeup)
506 pci_platform_pm = ops;
510 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
512 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
515 static inline int platform_pci_set_power_state(struct pci_dev *dev,
518 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
521 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
523 return pci_platform_pm ?
524 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
527 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
529 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
532 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
534 return pci_platform_pm ?
535 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
538 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
540 return pci_platform_pm ?
541 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
545 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
547 * @dev: PCI device to handle.
548 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
551 * -EINVAL if the requested state is invalid.
552 * -EIO if device does not support PCI PM or its PM capabilities register has a
553 * wrong version, or device doesn't support the requested state.
554 * 0 if device already is in the requested state.
555 * 0 if device's power state has been successfully changed.
557 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
560 bool need_restore = false;
562 /* Check if we're already there */
563 if (dev->current_state == state)
569 if (state < PCI_D0 || state > PCI_D3hot)
572 /* Validate current state:
573 * Can enter D0 from any state, but if we can only go deeper
574 * to sleep if we're already in a low power state
576 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
577 && dev->current_state > state) {
578 dev_err(&dev->dev, "invalid power transition "
579 "(from state %d to %d)\n", dev->current_state, state);
583 /* check if this device supports the desired state */
584 if ((state == PCI_D1 && !dev->d1_support)
585 || (state == PCI_D2 && !dev->d2_support))
588 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
590 /* If we're (effectively) in D3, force entire word to 0.
591 * This doesn't affect PME_Status, disables PME_En, and
592 * sets PowerState to 0.
594 switch (dev->current_state) {
598 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
603 case PCI_UNKNOWN: /* Boot-up */
604 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
605 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
607 /* Fall-through: force to D0 */
613 /* enter specified state */
614 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
616 /* Mandatory power management transition delays */
617 /* see PCI PM 1.1 5.6.1 table 18 */
618 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
619 pci_dev_d3_sleep(dev);
620 else if (state == PCI_D2 || dev->current_state == PCI_D2)
621 udelay(PCI_PM_D2_DELAY);
623 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
624 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
625 if (dev->current_state != state && printk_ratelimit())
626 dev_info(&dev->dev, "Refused to change power state, "
627 "currently in D%d\n", dev->current_state);
629 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
630 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
631 * from D3hot to D0 _may_ perform an internal reset, thereby
632 * going to "D0 Uninitialized" rather than "D0 Initialized".
633 * For example, at least some versions of the 3c905B and the
634 * 3c556B exhibit this behaviour.
636 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
637 * devices in a D3hot state at boot. Consequently, we need to
638 * restore at least the BARs so that the device will be
639 * accessible to its driver.
642 pci_restore_bars(dev);
645 pcie_aspm_pm_state_change(dev->bus->self);
651 * pci_update_current_state - Read PCI power state of given device from its
652 * PCI PM registers and cache it
653 * @dev: PCI device to handle.
654 * @state: State to cache in case the device doesn't have the PM capability
656 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
661 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
662 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
664 dev->current_state = state;
669 * pci_platform_power_transition - Use platform to change device power state
670 * @dev: PCI device to handle.
671 * @state: State to put the device into.
673 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
677 if (platform_pci_power_manageable(dev)) {
678 error = platform_pci_set_power_state(dev, state);
680 pci_update_current_state(dev, state);
681 /* Fall back to PCI_D0 if native PM is not supported */
683 dev->current_state = PCI_D0;
686 /* Fall back to PCI_D0 if native PM is not supported */
688 dev->current_state = PCI_D0;
695 * __pci_start_power_transition - Start power transition of a PCI device
696 * @dev: PCI device to handle.
697 * @state: State to put the device into.
699 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
702 pci_platform_power_transition(dev, PCI_D0);
706 * __pci_complete_power_transition - Complete power transition of a PCI device
707 * @dev: PCI device to handle.
708 * @state: State to put the device into.
710 * This function should not be called directly by device drivers.
712 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
714 return state >= PCI_D0 ?
715 pci_platform_power_transition(dev, state) : -EINVAL;
717 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
720 * pci_set_power_state - Set the power state of a PCI device
721 * @dev: PCI device to handle.
722 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
724 * Transition a device to a new power state, using the platform firmware and/or
725 * the device's PCI PM registers.
728 * -EINVAL if the requested state is invalid.
729 * -EIO if device does not support PCI PM or its PM capabilities register has a
730 * wrong version, or device doesn't support the requested state.
731 * 0 if device already is in the requested state.
732 * 0 if device's power state has been successfully changed.
734 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
738 /* bound the state we're entering */
739 if (state > PCI_D3hot)
741 else if (state < PCI_D0)
743 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
745 * If the device or the parent bridge do not support PCI PM,
746 * ignore the request if we're doing anything other than putting
747 * it into D0 (which would only happen on boot).
751 __pci_start_power_transition(dev, state);
753 /* This device is quirked not to be put into D3, so
754 don't put it in D3 */
755 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
758 error = pci_raw_set_power_state(dev, state);
760 if (!__pci_complete_power_transition(dev, state))
763 * When aspm_policy is "powersave" this call ensures
764 * that ASPM is configured.
766 if (!error && dev->bus->self)
767 pcie_aspm_powersave_config_link(dev->bus->self);
773 * pci_choose_state - Choose the power state of a PCI device
774 * @dev: PCI device to be suspended
775 * @state: target sleep state for the whole system. This is the value
776 * that is passed to suspend() function.
778 * Returns PCI power state suitable for given device and given system
782 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
786 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
789 ret = platform_pci_choose_state(dev);
790 if (ret != PCI_POWER_ERROR)
793 switch (state.event) {
796 case PM_EVENT_FREEZE:
797 case PM_EVENT_PRETHAW:
798 /* REVISIT both freeze and pre-thaw "should" use D0 */
799 case PM_EVENT_SUSPEND:
800 case PM_EVENT_HIBERNATE:
803 dev_info(&dev->dev, "unrecognized suspend event %d\n",
810 EXPORT_SYMBOL(pci_choose_state);
812 #define PCI_EXP_SAVE_REGS 7
814 #define pcie_cap_has_devctl(type, flags) 1
815 #define pcie_cap_has_lnkctl(type, flags) \
816 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
817 (type == PCI_EXP_TYPE_ROOT_PORT || \
818 type == PCI_EXP_TYPE_ENDPOINT || \
819 type == PCI_EXP_TYPE_LEG_END))
820 #define pcie_cap_has_sltctl(type, flags) \
821 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
822 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
823 (type == PCI_EXP_TYPE_DOWNSTREAM && \
824 (flags & PCI_EXP_FLAGS_SLOT))))
825 #define pcie_cap_has_rtctl(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
827 (type == PCI_EXP_TYPE_ROOT_PORT || \
828 type == PCI_EXP_TYPE_RC_EC))
829 #define pcie_cap_has_devctl2(type, flags) \
830 ((flags & PCI_EXP_FLAGS_VERS) > 1)
831 #define pcie_cap_has_lnkctl2(type, flags) \
832 ((flags & PCI_EXP_FLAGS_VERS) > 1)
833 #define pcie_cap_has_sltctl2(type, flags) \
834 ((flags & PCI_EXP_FLAGS_VERS) > 1)
836 static struct pci_cap_saved_state *pci_find_saved_cap(
837 struct pci_dev *pci_dev, char cap)
839 struct pci_cap_saved_state *tmp;
840 struct hlist_node *pos;
842 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
843 if (tmp->cap.cap_nr == cap)
849 static int pci_save_pcie_state(struct pci_dev *dev)
852 struct pci_cap_saved_state *save_state;
856 pos = pci_pcie_cap(dev);
860 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
862 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
865 cap = (u16 *)&save_state->cap.data[0];
867 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
869 if (pcie_cap_has_devctl(dev->pcie_type, flags))
870 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
871 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
872 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
873 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
874 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
875 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
876 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
877 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
878 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
879 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
880 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
881 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
882 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
887 static void pci_restore_pcie_state(struct pci_dev *dev)
890 struct pci_cap_saved_state *save_state;
894 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
895 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
896 if (!save_state || pos <= 0)
898 cap = (u16 *)&save_state->cap.data[0];
900 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
902 if (pcie_cap_has_devctl(dev->pcie_type, flags))
903 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
904 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
905 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
906 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
907 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
908 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
909 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
910 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
911 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
912 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
913 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
914 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
915 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
919 static int pci_save_pcix_state(struct pci_dev *dev)
922 struct pci_cap_saved_state *save_state;
924 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
928 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
930 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
934 pci_read_config_word(dev, pos + PCI_X_CMD,
935 (u16 *)save_state->cap.data);
940 static void pci_restore_pcix_state(struct pci_dev *dev)
943 struct pci_cap_saved_state *save_state;
946 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
947 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
948 if (!save_state || pos <= 0)
950 cap = (u16 *)&save_state->cap.data[0];
952 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
957 * pci_save_state - save the PCI configuration space of a device before suspending
958 * @dev: - PCI device that we're dealing with
961 pci_save_state(struct pci_dev *dev)
964 /* XXX: 100% dword access ok here? */
965 for (i = 0; i < 16; i++)
966 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
967 dev->state_saved = true;
968 if ((i = pci_save_pcie_state(dev)) != 0)
970 if ((i = pci_save_pcix_state(dev)) != 0)
975 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
976 u32 saved_val, int retry)
980 pci_read_config_dword(pdev, offset, &val);
981 if (val == saved_val)
985 dev_dbg(&pdev->dev, "restoring config space at offset "
986 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
987 pci_write_config_dword(pdev, offset, saved_val);
991 pci_read_config_dword(pdev, offset, &val);
992 if (val == saved_val)
999 static void pci_restore_config_space_range(struct pci_dev *pdev,
1000 int start, int end, int retry)
1004 for (index = end; index >= start; index--)
1005 pci_restore_config_dword(pdev, 4 * index,
1006 pdev->saved_config_space[index],
1010 static void pci_restore_config_space(struct pci_dev *pdev)
1012 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1013 pci_restore_config_space_range(pdev, 10, 15, 0);
1014 /* Restore BARs before the command register. */
1015 pci_restore_config_space_range(pdev, 4, 9, 10);
1016 pci_restore_config_space_range(pdev, 0, 3, 0);
1018 pci_restore_config_space_range(pdev, 0, 15, 0);
1023 * pci_restore_state - Restore the saved state of a PCI device
1024 * @dev: - PCI device that we're dealing with
1026 void pci_restore_state(struct pci_dev *dev)
1028 if (!dev->state_saved)
1031 /* PCI Express register must be restored first */
1032 pci_restore_pcie_state(dev);
1033 pci_restore_ats_state(dev);
1035 pci_restore_config_space(dev);
1037 pci_restore_pcix_state(dev);
1038 pci_restore_msi_state(dev);
1039 pci_restore_iov_state(dev);
1041 dev->state_saved = false;
1044 struct pci_saved_state {
1045 u32 config_space[16];
1046 struct pci_cap_saved_data cap[0];
1050 * pci_store_saved_state - Allocate and return an opaque struct containing
1051 * the device saved state.
1052 * @dev: PCI device that we're dealing with
1054 * Rerturn NULL if no state or error.
1056 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1058 struct pci_saved_state *state;
1059 struct pci_cap_saved_state *tmp;
1060 struct pci_cap_saved_data *cap;
1061 struct hlist_node *pos;
1064 if (!dev->state_saved)
1067 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1069 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1070 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1072 state = kzalloc(size, GFP_KERNEL);
1076 memcpy(state->config_space, dev->saved_config_space,
1077 sizeof(state->config_space));
1080 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1081 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1082 memcpy(cap, &tmp->cap, len);
1083 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1085 /* Empty cap_save terminates list */
1089 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1092 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1093 * @dev: PCI device that we're dealing with
1094 * @state: Saved state returned from pci_store_saved_state()
1096 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1098 struct pci_cap_saved_data *cap;
1100 dev->state_saved = false;
1105 memcpy(dev->saved_config_space, state->config_space,
1106 sizeof(state->config_space));
1110 struct pci_cap_saved_state *tmp;
1112 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1113 if (!tmp || tmp->cap.size != cap->size)
1116 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1117 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1118 sizeof(struct pci_cap_saved_data) + cap->size);
1121 dev->state_saved = true;
1124 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1127 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1128 * and free the memory allocated for it.
1129 * @dev: PCI device that we're dealing with
1130 * @state: Pointer to saved state returned from pci_store_saved_state()
1132 int pci_load_and_free_saved_state(struct pci_dev *dev,
1133 struct pci_saved_state **state)
1135 int ret = pci_load_saved_state(dev, *state);
1140 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1142 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1146 err = pci_set_power_state(dev, PCI_D0);
1147 if (err < 0 && err != -EIO)
1149 err = pcibios_enable_device(dev, bars);
1152 pci_fixup_device(pci_fixup_enable, dev);
1158 * pci_reenable_device - Resume abandoned device
1159 * @dev: PCI device to be resumed
1161 * Note this function is a backend of pci_default_resume and is not supposed
1162 * to be called by normal code, write proper resume handler and use it instead.
1164 int pci_reenable_device(struct pci_dev *dev)
1166 if (pci_is_enabled(dev))
1167 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1171 static int __pci_enable_device_flags(struct pci_dev *dev,
1172 resource_size_t flags)
1178 * Power state could be unknown at this point, either due to a fresh
1179 * boot or a device removal call. So get the current power state
1180 * so that things like MSI message writing will behave as expected
1181 * (e.g. if the device really is in D0 at enable time).
1185 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1186 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1189 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1190 return 0; /* already enabled */
1192 /* only skip sriov related */
1193 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1194 if (dev->resource[i].flags & flags)
1196 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1197 if (dev->resource[i].flags & flags)
1200 err = do_pci_enable_device(dev, bars);
1202 atomic_dec(&dev->enable_cnt);
1207 * pci_enable_device_io - Initialize a device for use with IO space
1208 * @dev: PCI device to be initialized
1210 * Initialize device before it's used by a driver. Ask low-level code
1211 * to enable I/O resources. Wake up the device if it was suspended.
1212 * Beware, this function can fail.
1214 int pci_enable_device_io(struct pci_dev *dev)
1216 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1220 * pci_enable_device_mem - Initialize a device for use with Memory space
1221 * @dev: PCI device to be initialized
1223 * Initialize device before it's used by a driver. Ask low-level code
1224 * to enable Memory resources. Wake up the device if it was suspended.
1225 * Beware, this function can fail.
1227 int pci_enable_device_mem(struct pci_dev *dev)
1229 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1233 * pci_enable_device - Initialize device before it's used by a driver.
1234 * @dev: PCI device to be initialized
1236 * Initialize device before it's used by a driver. Ask low-level code
1237 * to enable I/O and memory. Wake up the device if it was suspended.
1238 * Beware, this function can fail.
1240 * Note we don't actually enable the device many times if we call
1241 * this function repeatedly (we just increment the count).
1243 int pci_enable_device(struct pci_dev *dev)
1245 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1249 * Managed PCI resources. This manages device on/off, intx/msi/msix
1250 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1251 * there's no need to track it separately. pci_devres is initialized
1252 * when a device is enabled using managed PCI device enable interface.
1255 unsigned int enabled:1;
1256 unsigned int pinned:1;
1257 unsigned int orig_intx:1;
1258 unsigned int restore_intx:1;
1262 static void pcim_release(struct device *gendev, void *res)
1264 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1265 struct pci_devres *this = res;
1268 if (dev->msi_enabled)
1269 pci_disable_msi(dev);
1270 if (dev->msix_enabled)
1271 pci_disable_msix(dev);
1273 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1274 if (this->region_mask & (1 << i))
1275 pci_release_region(dev, i);
1277 if (this->restore_intx)
1278 pci_intx(dev, this->orig_intx);
1280 if (this->enabled && !this->pinned)
1281 pci_disable_device(dev);
1284 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1286 struct pci_devres *dr, *new_dr;
1288 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1292 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1295 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1298 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1300 if (pci_is_managed(pdev))
1301 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1306 * pcim_enable_device - Managed pci_enable_device()
1307 * @pdev: PCI device to be initialized
1309 * Managed pci_enable_device().
1311 int pcim_enable_device(struct pci_dev *pdev)
1313 struct pci_devres *dr;
1316 dr = get_pci_dr(pdev);
1322 rc = pci_enable_device(pdev);
1324 pdev->is_managed = 1;
1331 * pcim_pin_device - Pin managed PCI device
1332 * @pdev: PCI device to pin
1334 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1335 * driver detach. @pdev must have been enabled with
1336 * pcim_enable_device().
1338 void pcim_pin_device(struct pci_dev *pdev)
1340 struct pci_devres *dr;
1342 dr = find_pci_dr(pdev);
1343 WARN_ON(!dr || !dr->enabled);
1349 * pcibios_disable_device - disable arch specific PCI resources for device dev
1350 * @dev: the PCI device to disable
1352 * Disables architecture specific PCI resources for the device. This
1353 * is the default implementation. Architecture implementations can
1356 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1358 static void do_pci_disable_device(struct pci_dev *dev)
1362 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1363 if (pci_command & PCI_COMMAND_MASTER) {
1364 pci_command &= ~PCI_COMMAND_MASTER;
1365 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1368 pcibios_disable_device(dev);
1372 * pci_disable_enabled_device - Disable device without updating enable_cnt
1373 * @dev: PCI device to disable
1375 * NOTE: This function is a backend of PCI power management routines and is
1376 * not supposed to be called drivers.
1378 void pci_disable_enabled_device(struct pci_dev *dev)
1380 if (pci_is_enabled(dev))
1381 do_pci_disable_device(dev);
1385 * pci_disable_device - Disable PCI device after use
1386 * @dev: PCI device to be disabled
1388 * Signal to the system that the PCI device is not in use by the system
1389 * anymore. This only involves disabling PCI bus-mastering, if active.
1391 * Note we don't actually disable the device until all callers of
1392 * pci_enable_device() have called pci_disable_device().
1395 pci_disable_device(struct pci_dev *dev)
1397 struct pci_devres *dr;
1399 dr = find_pci_dr(dev);
1403 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1406 do_pci_disable_device(dev);
1408 dev->is_busmaster = 0;
1412 * pcibios_set_pcie_reset_state - set reset state for device dev
1413 * @dev: the PCIe device reset
1414 * @state: Reset state to enter into
1417 * Sets the PCIe reset state for the device. This is the default
1418 * implementation. Architecture implementations can override this.
1420 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1421 enum pcie_reset_state state)
1427 * pci_set_pcie_reset_state - set reset state for device dev
1428 * @dev: the PCIe device reset
1429 * @state: Reset state to enter into
1432 * Sets the PCI reset state for the device.
1434 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1436 return pcibios_set_pcie_reset_state(dev, state);
1440 * pci_check_pme_status - Check if given device has generated PME.
1441 * @dev: Device to check.
1443 * Check the PME status of the device and if set, clear it and clear PME enable
1444 * (if set). Return 'true' if PME status and PME enable were both set or
1445 * 'false' otherwise.
1447 bool pci_check_pme_status(struct pci_dev *dev)
1456 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1457 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1458 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1461 /* Clear PME status. */
1462 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1463 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1464 /* Disable PME to avoid interrupt flood. */
1465 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1469 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1475 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1476 * @dev: Device to handle.
1477 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1479 * Check if @dev has generated PME and queue a resume request for it in that
1482 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1484 if (pme_poll_reset && dev->pme_poll)
1485 dev->pme_poll = false;
1487 if (pci_check_pme_status(dev)) {
1488 pci_wakeup_event(dev);
1489 pm_request_resume(&dev->dev);
1495 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1496 * @bus: Top bus of the subtree to walk.
1498 void pci_pme_wakeup_bus(struct pci_bus *bus)
1501 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1505 * pci_pme_capable - check the capability of PCI device to generate PME#
1506 * @dev: PCI device to handle.
1507 * @state: PCI state from which device will issue PME#.
1509 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1514 return !!(dev->pme_support & (1 << state));
1517 static void pci_pme_list_scan(struct work_struct *work)
1519 struct pci_pme_device *pme_dev, *n;
1521 mutex_lock(&pci_pme_list_mutex);
1522 if (!list_empty(&pci_pme_list)) {
1523 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1524 if (pme_dev->dev->pme_poll) {
1525 pci_pme_wakeup(pme_dev->dev, NULL);
1527 list_del(&pme_dev->list);
1531 if (!list_empty(&pci_pme_list))
1532 schedule_delayed_work(&pci_pme_work,
1533 msecs_to_jiffies(PME_TIMEOUT));
1535 mutex_unlock(&pci_pme_list_mutex);
1539 * pci_pme_active - enable or disable PCI device's PME# function
1540 * @dev: PCI device to handle.
1541 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1543 * The caller must verify that the device is capable of generating PME# before
1544 * calling this function with @enable equal to 'true'.
1546 void pci_pme_active(struct pci_dev *dev, bool enable)
1553 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1554 /* Clear PME_Status by writing 1 to it and enable PME# */
1555 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1557 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1559 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1561 /* PCI (as opposed to PCIe) PME requires that the device have
1562 its PME# line hooked up correctly. Not all hardware vendors
1563 do this, so the PME never gets delivered and the device
1564 remains asleep. The easiest way around this is to
1565 periodically walk the list of suspended devices and check
1566 whether any have their PME flag set. The assumption is that
1567 we'll wake up often enough anyway that this won't be a huge
1568 hit, and the power savings from the devices will still be a
1571 if (dev->pme_poll) {
1572 struct pci_pme_device *pme_dev;
1574 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1579 mutex_lock(&pci_pme_list_mutex);
1580 list_add(&pme_dev->list, &pci_pme_list);
1581 if (list_is_singular(&pci_pme_list))
1582 schedule_delayed_work(&pci_pme_work,
1583 msecs_to_jiffies(PME_TIMEOUT));
1584 mutex_unlock(&pci_pme_list_mutex);
1586 mutex_lock(&pci_pme_list_mutex);
1587 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1588 if (pme_dev->dev == dev) {
1589 list_del(&pme_dev->list);
1594 mutex_unlock(&pci_pme_list_mutex);
1599 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1603 * __pci_enable_wake - enable PCI device as wakeup event source
1604 * @dev: PCI device affected
1605 * @state: PCI state from which device will issue wakeup events
1606 * @runtime: True if the events are to be generated at run time
1607 * @enable: True to enable event generation; false to disable
1609 * This enables the device as a wakeup event source, or disables it.
1610 * When such events involves platform-specific hooks, those hooks are
1611 * called automatically by this routine.
1613 * Devices with legacy power management (no standard PCI PM capabilities)
1614 * always require such platform hooks.
1617 * 0 is returned on success
1618 * -EINVAL is returned if device is not supposed to wake up the system
1619 * Error code depending on the platform is returned if both the platform and
1620 * the native mechanism fail to enable the generation of wake-up events
1622 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1623 bool runtime, bool enable)
1627 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1630 /* Don't do the same thing twice in a row for one device. */
1631 if (!!enable == !!dev->wakeup_prepared)
1635 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1636 * Anderson we should be doing PME# wake enable followed by ACPI wake
1637 * enable. To disable wake-up we call the platform first, for symmetry.
1643 if (pci_pme_capable(dev, state))
1644 pci_pme_active(dev, true);
1647 error = runtime ? platform_pci_run_wake(dev, true) :
1648 platform_pci_sleep_wake(dev, true);
1652 dev->wakeup_prepared = true;
1655 platform_pci_run_wake(dev, false);
1657 platform_pci_sleep_wake(dev, false);
1658 pci_pme_active(dev, false);
1659 dev->wakeup_prepared = false;
1664 EXPORT_SYMBOL(__pci_enable_wake);
1667 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1668 * @dev: PCI device to prepare
1669 * @enable: True to enable wake-up event generation; false to disable
1671 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1672 * and this function allows them to set that up cleanly - pci_enable_wake()
1673 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1674 * ordering constraints.
1676 * This function only returns error code if the device is not capable of
1677 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1678 * enable wake-up power for it.
1680 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1682 return pci_pme_capable(dev, PCI_D3cold) ?
1683 pci_enable_wake(dev, PCI_D3cold, enable) :
1684 pci_enable_wake(dev, PCI_D3hot, enable);
1688 * pci_target_state - find an appropriate low power state for a given PCI dev
1691 * Use underlying platform code to find a supported low power state for @dev.
1692 * If the platform can't manage @dev, return the deepest state from which it
1693 * can generate wake events, based on any available PME info.
1695 pci_power_t pci_target_state(struct pci_dev *dev)
1697 pci_power_t target_state = PCI_D3hot;
1699 if (platform_pci_power_manageable(dev)) {
1701 * Call the platform to choose the target state of the device
1702 * and enable wake-up from this state if supported.
1704 pci_power_t state = platform_pci_choose_state(dev);
1707 case PCI_POWER_ERROR:
1712 if (pci_no_d1d2(dev))
1715 target_state = state;
1717 } else if (!dev->pm_cap) {
1718 target_state = PCI_D0;
1719 } else if (device_may_wakeup(&dev->dev)) {
1721 * Find the deepest state from which the device can generate
1722 * wake-up events, make it the target state and enable device
1725 if (dev->pme_support) {
1727 && !(dev->pme_support & (1 << target_state)))
1732 return target_state;
1736 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1737 * @dev: Device to handle.
1739 * Choose the power state appropriate for the device depending on whether
1740 * it can wake up the system and/or is power manageable by the platform
1741 * (PCI_D3hot is the default) and put the device into that state.
1743 int pci_prepare_to_sleep(struct pci_dev *dev)
1745 pci_power_t target_state = pci_target_state(dev);
1748 if (target_state == PCI_POWER_ERROR)
1751 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1753 error = pci_set_power_state(dev, target_state);
1756 pci_enable_wake(dev, target_state, false);
1762 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1763 * @dev: Device to handle.
1765 * Disable device's system wake-up capability and put it into D0.
1767 int pci_back_from_sleep(struct pci_dev *dev)
1769 pci_enable_wake(dev, PCI_D0, false);
1770 return pci_set_power_state(dev, PCI_D0);
1774 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1775 * @dev: PCI device being suspended.
1777 * Prepare @dev to generate wake-up events at run time and put it into a low
1780 int pci_finish_runtime_suspend(struct pci_dev *dev)
1782 pci_power_t target_state = pci_target_state(dev);
1785 if (target_state == PCI_POWER_ERROR)
1788 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1790 error = pci_set_power_state(dev, target_state);
1793 __pci_enable_wake(dev, target_state, true, false);
1799 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1800 * @dev: Device to check.
1802 * Return true if the device itself is cabable of generating wake-up events
1803 * (through the platform or using the native PCIe PME) or if the device supports
1804 * PME and one of its upstream bridges can generate wake-up events.
1806 bool pci_dev_run_wake(struct pci_dev *dev)
1808 struct pci_bus *bus = dev->bus;
1810 if (device_run_wake(&dev->dev))
1813 if (!dev->pme_support)
1816 while (bus->parent) {
1817 struct pci_dev *bridge = bus->self;
1819 if (device_run_wake(&bridge->dev))
1825 /* We have reached the root bus. */
1827 return device_run_wake(bus->bridge);
1831 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1834 * pci_pm_init - Initialize PM functions of given PCI device
1835 * @dev: PCI device to handle.
1837 void pci_pm_init(struct pci_dev *dev)
1842 pm_runtime_forbid(&dev->dev);
1843 device_enable_async_suspend(&dev->dev);
1844 dev->wakeup_prepared = false;
1848 /* find PCI PM capability in list */
1849 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1852 /* Check device's ability to generate PME# */
1853 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1855 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1856 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1857 pmc & PCI_PM_CAP_VER_MASK);
1862 dev->d3_delay = PCI_PM_D3_WAIT;
1864 dev->d1_support = false;
1865 dev->d2_support = false;
1866 if (!pci_no_d1d2(dev)) {
1867 if (pmc & PCI_PM_CAP_D1)
1868 dev->d1_support = true;
1869 if (pmc & PCI_PM_CAP_D2)
1870 dev->d2_support = true;
1872 if (dev->d1_support || dev->d2_support)
1873 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1874 dev->d1_support ? " D1" : "",
1875 dev->d2_support ? " D2" : "");
1878 pmc &= PCI_PM_CAP_PME_MASK;
1880 dev_printk(KERN_DEBUG, &dev->dev,
1881 "PME# supported from%s%s%s%s%s\n",
1882 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1883 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1884 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1885 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1886 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1887 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1888 dev->pme_poll = true;
1890 * Make device's PM flags reflect the wake-up capability, but
1891 * let the user space enable it to wake up the system as needed.
1893 device_set_wakeup_capable(&dev->dev, true);
1894 /* Disable the PME# generation functionality */
1895 pci_pme_active(dev, false);
1897 dev->pme_support = 0;
1902 * platform_pci_wakeup_init - init platform wakeup if present
1905 * Some devices don't have PCI PM caps but can still generate wakeup
1906 * events through platform methods (like ACPI events). If @dev supports
1907 * platform wakeup events, set the device flag to indicate as much. This
1908 * may be redundant if the device also supports PCI PM caps, but double
1909 * initialization should be safe in that case.
1911 void platform_pci_wakeup_init(struct pci_dev *dev)
1913 if (!platform_pci_can_wakeup(dev))
1916 device_set_wakeup_capable(&dev->dev, true);
1917 platform_pci_sleep_wake(dev, false);
1920 static void pci_add_saved_cap(struct pci_dev *pci_dev,
1921 struct pci_cap_saved_state *new_cap)
1923 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1927 * pci_add_save_buffer - allocate buffer for saving given capability registers
1928 * @dev: the PCI device
1929 * @cap: the capability to allocate the buffer for
1930 * @size: requested size of the buffer
1932 static int pci_add_cap_save_buffer(
1933 struct pci_dev *dev, char cap, unsigned int size)
1936 struct pci_cap_saved_state *save_state;
1938 pos = pci_find_capability(dev, cap);
1942 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1946 save_state->cap.cap_nr = cap;
1947 save_state->cap.size = size;
1948 pci_add_saved_cap(dev, save_state);
1954 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1955 * @dev: the PCI device
1957 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1961 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1962 PCI_EXP_SAVE_REGS * sizeof(u16));
1965 "unable to preallocate PCI Express save buffer\n");
1967 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1970 "unable to preallocate PCI-X save buffer\n");
1973 void pci_free_cap_save_buffers(struct pci_dev *dev)
1975 struct pci_cap_saved_state *tmp;
1976 struct hlist_node *pos, *n;
1978 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1983 * pci_enable_ari - enable ARI forwarding if hardware support it
1984 * @dev: the PCI device
1986 void pci_enable_ari(struct pci_dev *dev)
1991 struct pci_dev *bridge;
1993 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1996 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2000 bridge = dev->bus->self;
2001 if (!bridge || !pci_is_pcie(bridge))
2004 pos = pci_pcie_cap(bridge);
2008 /* ARI is a PCIe v2 feature */
2009 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2010 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2013 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2014 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2017 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2018 ctrl |= PCI_EXP_DEVCTL2_ARI;
2019 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2021 bridge->ari_enabled = 1;
2025 * pci_enable_ido - enable ID-based ordering on a device
2026 * @dev: the PCI device
2027 * @type: which types of IDO to enable
2029 * Enable ID-based ordering on @dev. @type can contain the bits
2030 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2031 * which types of transactions are allowed to be re-ordered.
2033 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2038 pos = pci_pcie_cap(dev);
2042 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2043 if (type & PCI_EXP_IDO_REQUEST)
2044 ctrl |= PCI_EXP_IDO_REQ_EN;
2045 if (type & PCI_EXP_IDO_COMPLETION)
2046 ctrl |= PCI_EXP_IDO_CMP_EN;
2047 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2049 EXPORT_SYMBOL(pci_enable_ido);
2052 * pci_disable_ido - disable ID-based ordering on a device
2053 * @dev: the PCI device
2054 * @type: which types of IDO to disable
2056 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2061 if (!pci_is_pcie(dev))
2064 pos = pci_pcie_cap(dev);
2068 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2069 if (type & PCI_EXP_IDO_REQUEST)
2070 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2071 if (type & PCI_EXP_IDO_COMPLETION)
2072 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2073 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2075 EXPORT_SYMBOL(pci_disable_ido);
2078 * pci_enable_obff - enable optimized buffer flush/fill
2080 * @type: type of signaling to use
2082 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2083 * signaling if possible, falling back to message signaling only if
2084 * WAKE# isn't supported. @type should indicate whether the PCIe link
2085 * be brought out of L0s or L1 to send the message. It should be either
2086 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2088 * If your device can benefit from receiving all messages, even at the
2089 * power cost of bringing the link back up from a low power state, use
2090 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2094 * Zero on success, appropriate error number on failure.
2096 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2103 if (!pci_is_pcie(dev))
2106 pos = pci_pcie_cap(dev);
2110 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2111 if (!(cap & PCI_EXP_OBFF_MASK))
2112 return -ENOTSUPP; /* no OBFF support at all */
2114 /* Make sure the topology supports OBFF as well */
2116 ret = pci_enable_obff(dev->bus->self, type);
2121 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2122 if (cap & PCI_EXP_OBFF_WAKE)
2123 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2126 case PCI_EXP_OBFF_SIGNAL_L0:
2127 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2128 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2130 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2131 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2132 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2135 WARN(1, "bad OBFF signal type\n");
2139 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2143 EXPORT_SYMBOL(pci_enable_obff);
2146 * pci_disable_obff - disable optimized buffer flush/fill
2149 * Disable OBFF on @dev.
2151 void pci_disable_obff(struct pci_dev *dev)
2156 if (!pci_is_pcie(dev))
2159 pos = pci_pcie_cap(dev);
2163 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2164 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2165 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2167 EXPORT_SYMBOL(pci_disable_obff);
2170 * pci_ltr_supported - check whether a device supports LTR
2174 * True if @dev supports latency tolerance reporting, false otherwise.
2176 bool pci_ltr_supported(struct pci_dev *dev)
2181 if (!pci_is_pcie(dev))
2184 pos = pci_pcie_cap(dev);
2188 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2190 return cap & PCI_EXP_DEVCAP2_LTR;
2192 EXPORT_SYMBOL(pci_ltr_supported);
2195 * pci_enable_ltr - enable latency tolerance reporting
2198 * Enable LTR on @dev if possible, which means enabling it first on
2202 * Zero on success, errno on failure.
2204 int pci_enable_ltr(struct pci_dev *dev)
2210 if (!pci_ltr_supported(dev))
2213 pos = pci_pcie_cap(dev);
2217 /* Only primary function can enable/disable LTR */
2218 if (PCI_FUNC(dev->devfn) != 0)
2221 /* Enable upstream ports first */
2223 ret = pci_enable_ltr(dev->bus->self);
2228 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2229 ctrl |= PCI_EXP_LTR_EN;
2230 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2234 EXPORT_SYMBOL(pci_enable_ltr);
2237 * pci_disable_ltr - disable latency tolerance reporting
2240 void pci_disable_ltr(struct pci_dev *dev)
2245 if (!pci_ltr_supported(dev))
2248 pos = pci_pcie_cap(dev);
2252 /* Only primary function can enable/disable LTR */
2253 if (PCI_FUNC(dev->devfn) != 0)
2256 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2257 ctrl &= ~PCI_EXP_LTR_EN;
2258 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2260 EXPORT_SYMBOL(pci_disable_ltr);
2262 static int __pci_ltr_scale(int *val)
2266 while (*val > 1023) {
2267 *val = (*val + 31) / 32;
2274 * pci_set_ltr - set LTR latency values
2276 * @snoop_lat_ns: snoop latency in nanoseconds
2277 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2279 * Figure out the scale and set the LTR values accordingly.
2281 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2283 int pos, ret, snoop_scale, nosnoop_scale;
2286 if (!pci_ltr_supported(dev))
2289 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2290 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2292 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2293 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2296 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2297 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2300 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2304 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2305 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2309 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2310 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2316 EXPORT_SYMBOL(pci_set_ltr);
2318 static int pci_acs_enable;
2321 * pci_request_acs - ask for ACS to be enabled if supported
2323 void pci_request_acs(void)
2329 * pci_enable_acs - enable ACS if hardware support it
2330 * @dev: the PCI device
2332 void pci_enable_acs(struct pci_dev *dev)
2338 if (!pci_acs_enable)
2341 if (!pci_is_pcie(dev))
2344 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2348 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2349 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2351 /* Source Validation */
2352 ctrl |= (cap & PCI_ACS_SV);
2354 /* P2P Request Redirect */
2355 ctrl |= (cap & PCI_ACS_RR);
2357 /* P2P Completion Redirect */
2358 ctrl |= (cap & PCI_ACS_CR);
2360 /* Upstream Forwarding */
2361 ctrl |= (cap & PCI_ACS_UF);
2363 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2367 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2368 * @dev: the PCI device
2369 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2371 * Perform INTx swizzling for a device behind one level of bridge. This is
2372 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2373 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2374 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2375 * the PCI Express Base Specification, Revision 2.1)
2377 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2381 if (pci_ari_enabled(dev->bus))
2384 slot = PCI_SLOT(dev->devfn);
2386 return (((pin - 1) + slot) % 4) + 1;
2390 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2398 while (!pci_is_root_bus(dev->bus)) {
2399 pin = pci_swizzle_interrupt_pin(dev, pin);
2400 dev = dev->bus->self;
2407 * pci_common_swizzle - swizzle INTx all the way to root bridge
2408 * @dev: the PCI device
2409 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2411 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2412 * bridges all the way up to a PCI root bus.
2414 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2418 while (!pci_is_root_bus(dev->bus)) {
2419 pin = pci_swizzle_interrupt_pin(dev, pin);
2420 dev = dev->bus->self;
2423 return PCI_SLOT(dev->devfn);
2427 * pci_release_region - Release a PCI bar
2428 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2429 * @bar: BAR to release
2431 * Releases the PCI I/O and memory resources previously reserved by a
2432 * successful call to pci_request_region. Call this function only
2433 * after all use of the PCI regions has ceased.
2435 void pci_release_region(struct pci_dev *pdev, int bar)
2437 struct pci_devres *dr;
2439 if (pci_resource_len(pdev, bar) == 0)
2441 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2442 release_region(pci_resource_start(pdev, bar),
2443 pci_resource_len(pdev, bar));
2444 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2445 release_mem_region(pci_resource_start(pdev, bar),
2446 pci_resource_len(pdev, bar));
2448 dr = find_pci_dr(pdev);
2450 dr->region_mask &= ~(1 << bar);
2454 * __pci_request_region - Reserved PCI I/O and memory resource
2455 * @pdev: PCI device whose resources are to be reserved
2456 * @bar: BAR to be reserved
2457 * @res_name: Name to be associated with resource.
2458 * @exclusive: whether the region access is exclusive or not
2460 * Mark the PCI region associated with PCI device @pdev BR @bar as
2461 * being reserved by owner @res_name. Do not access any
2462 * address inside the PCI regions unless this call returns
2465 * If @exclusive is set, then the region is marked so that userspace
2466 * is explicitly not allowed to map the resource via /dev/mem or
2467 * sysfs MMIO access.
2469 * Returns 0 on success, or %EBUSY on error. A warning
2470 * message is also printed on failure.
2472 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2475 struct pci_devres *dr;
2477 if (pci_resource_len(pdev, bar) == 0)
2480 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2481 if (!request_region(pci_resource_start(pdev, bar),
2482 pci_resource_len(pdev, bar), res_name))
2485 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2486 if (!__request_mem_region(pci_resource_start(pdev, bar),
2487 pci_resource_len(pdev, bar), res_name,
2492 dr = find_pci_dr(pdev);
2494 dr->region_mask |= 1 << bar;
2499 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2500 &pdev->resource[bar]);
2505 * pci_request_region - Reserve PCI I/O and memory resource
2506 * @pdev: PCI device whose resources are to be reserved
2507 * @bar: BAR to be reserved
2508 * @res_name: Name to be associated with resource
2510 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2511 * being reserved by owner @res_name. Do not access any
2512 * address inside the PCI regions unless this call returns
2515 * Returns 0 on success, or %EBUSY on error. A warning
2516 * message is also printed on failure.
2518 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2520 return __pci_request_region(pdev, bar, res_name, 0);
2524 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2525 * @pdev: PCI device whose resources are to be reserved
2526 * @bar: BAR to be reserved
2527 * @res_name: Name to be associated with resource.
2529 * Mark the PCI region associated with PCI device @pdev BR @bar as
2530 * being reserved by owner @res_name. Do not access any
2531 * address inside the PCI regions unless this call returns
2534 * Returns 0 on success, or %EBUSY on error. A warning
2535 * message is also printed on failure.
2537 * The key difference that _exclusive makes it that userspace is
2538 * explicitly not allowed to map the resource via /dev/mem or
2541 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2543 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2546 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2547 * @pdev: PCI device whose resources were previously reserved
2548 * @bars: Bitmask of BARs to be released
2550 * Release selected PCI I/O and memory resources previously reserved.
2551 * Call this function only after all use of the PCI regions has ceased.
2553 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2557 for (i = 0; i < 6; i++)
2558 if (bars & (1 << i))
2559 pci_release_region(pdev, i);
2562 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2563 const char *res_name, int excl)
2567 for (i = 0; i < 6; i++)
2568 if (bars & (1 << i))
2569 if (__pci_request_region(pdev, i, res_name, excl))
2575 if (bars & (1 << i))
2576 pci_release_region(pdev, i);
2583 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2584 * @pdev: PCI device whose resources are to be reserved
2585 * @bars: Bitmask of BARs to be requested
2586 * @res_name: Name to be associated with resource
2588 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2589 const char *res_name)
2591 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2594 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2595 int bars, const char *res_name)
2597 return __pci_request_selected_regions(pdev, bars, res_name,
2598 IORESOURCE_EXCLUSIVE);
2602 * pci_release_regions - Release reserved PCI I/O and memory resources
2603 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2605 * Releases all PCI I/O and memory resources previously reserved by a
2606 * successful call to pci_request_regions. Call this function only
2607 * after all use of the PCI regions has ceased.
2610 void pci_release_regions(struct pci_dev *pdev)
2612 pci_release_selected_regions(pdev, (1 << 6) - 1);
2616 * pci_request_regions - Reserved PCI I/O and memory resources
2617 * @pdev: PCI device whose resources are to be reserved
2618 * @res_name: Name to be associated with resource.
2620 * Mark all PCI regions associated with PCI device @pdev as
2621 * being reserved by owner @res_name. Do not access any
2622 * address inside the PCI regions unless this call returns
2625 * Returns 0 on success, or %EBUSY on error. A warning
2626 * message is also printed on failure.
2628 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2630 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2634 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2635 * @pdev: PCI device whose resources are to be reserved
2636 * @res_name: Name to be associated with resource.
2638 * Mark all PCI regions associated with PCI device @pdev as
2639 * being reserved by owner @res_name. Do not access any
2640 * address inside the PCI regions unless this call returns
2643 * pci_request_regions_exclusive() will mark the region so that
2644 * /dev/mem and the sysfs MMIO access will not be allowed.
2646 * Returns 0 on success, or %EBUSY on error. A warning
2647 * message is also printed on failure.
2649 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2651 return pci_request_selected_regions_exclusive(pdev,
2652 ((1 << 6) - 1), res_name);
2655 static void __pci_set_master(struct pci_dev *dev, bool enable)
2659 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2661 cmd = old_cmd | PCI_COMMAND_MASTER;
2663 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2664 if (cmd != old_cmd) {
2665 dev_dbg(&dev->dev, "%s bus mastering\n",
2666 enable ? "enabling" : "disabling");
2667 pci_write_config_word(dev, PCI_COMMAND, cmd);
2669 dev->is_busmaster = enable;
2673 * pcibios_set_master - enable PCI bus-mastering for device dev
2674 * @dev: the PCI device to enable
2676 * Enables PCI bus-mastering for the device. This is the default
2677 * implementation. Architecture specific implementations can override
2678 * this if necessary.
2680 void __weak pcibios_set_master(struct pci_dev *dev)
2684 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2685 if (pci_is_pcie(dev))
2688 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2690 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2691 else if (lat > pcibios_max_latency)
2692 lat = pcibios_max_latency;
2695 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2696 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2700 * pci_set_master - enables bus-mastering for device dev
2701 * @dev: the PCI device to enable
2703 * Enables bus-mastering on the device and calls pcibios_set_master()
2704 * to do the needed arch specific settings.
2706 void pci_set_master(struct pci_dev *dev)
2708 __pci_set_master(dev, true);
2709 pcibios_set_master(dev);
2713 * pci_clear_master - disables bus-mastering for device dev
2714 * @dev: the PCI device to disable
2716 void pci_clear_master(struct pci_dev *dev)
2718 __pci_set_master(dev, false);
2722 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2723 * @dev: the PCI device for which MWI is to be enabled
2725 * Helper function for pci_set_mwi.
2726 * Originally copied from drivers/net/acenic.c.
2727 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2729 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2731 int pci_set_cacheline_size(struct pci_dev *dev)
2735 if (!pci_cache_line_size)
2738 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2739 equal to or multiple of the right value. */
2740 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2741 if (cacheline_size >= pci_cache_line_size &&
2742 (cacheline_size % pci_cache_line_size) == 0)
2745 /* Write the correct value. */
2746 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2748 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2749 if (cacheline_size == pci_cache_line_size)
2752 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2753 "supported\n", pci_cache_line_size << 2);
2757 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2759 #ifdef PCI_DISABLE_MWI
2760 int pci_set_mwi(struct pci_dev *dev)
2765 int pci_try_set_mwi(struct pci_dev *dev)
2770 void pci_clear_mwi(struct pci_dev *dev)
2777 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2778 * @dev: the PCI device for which MWI is enabled
2780 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2782 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2785 pci_set_mwi(struct pci_dev *dev)
2790 rc = pci_set_cacheline_size(dev);
2794 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2795 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2796 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2797 cmd |= PCI_COMMAND_INVALIDATE;
2798 pci_write_config_word(dev, PCI_COMMAND, cmd);
2805 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2806 * @dev: the PCI device for which MWI is enabled
2808 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2809 * Callers are not required to check the return value.
2811 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2813 int pci_try_set_mwi(struct pci_dev *dev)
2815 int rc = pci_set_mwi(dev);
2820 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2821 * @dev: the PCI device to disable
2823 * Disables PCI Memory-Write-Invalidate transaction on the device
2826 pci_clear_mwi(struct pci_dev *dev)
2830 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2831 if (cmd & PCI_COMMAND_INVALIDATE) {
2832 cmd &= ~PCI_COMMAND_INVALIDATE;
2833 pci_write_config_word(dev, PCI_COMMAND, cmd);
2836 #endif /* ! PCI_DISABLE_MWI */
2839 * pci_intx - enables/disables PCI INTx for device dev
2840 * @pdev: the PCI device to operate on
2841 * @enable: boolean: whether to enable or disable PCI INTx
2843 * Enables/disables PCI INTx for device dev
2846 pci_intx(struct pci_dev *pdev, int enable)
2848 u16 pci_command, new;
2850 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2853 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2855 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2858 if (new != pci_command) {
2859 struct pci_devres *dr;
2861 pci_write_config_word(pdev, PCI_COMMAND, new);
2863 dr = find_pci_dr(pdev);
2864 if (dr && !dr->restore_intx) {
2865 dr->restore_intx = 1;
2866 dr->orig_intx = !enable;
2872 * pci_intx_mask_supported - probe for INTx masking support
2873 * @dev: the PCI device to operate on
2875 * Check if the device dev support INTx masking via the config space
2878 bool pci_intx_mask_supported(struct pci_dev *dev)
2880 bool mask_supported = false;
2883 pci_cfg_access_lock(dev);
2885 pci_read_config_word(dev, PCI_COMMAND, &orig);
2886 pci_write_config_word(dev, PCI_COMMAND,
2887 orig ^ PCI_COMMAND_INTX_DISABLE);
2888 pci_read_config_word(dev, PCI_COMMAND, &new);
2891 * There's no way to protect against hardware bugs or detect them
2892 * reliably, but as long as we know what the value should be, let's
2893 * go ahead and check it.
2895 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2896 dev_err(&dev->dev, "Command register changed from "
2897 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2898 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2899 mask_supported = true;
2900 pci_write_config_word(dev, PCI_COMMAND, orig);
2903 pci_cfg_access_unlock(dev);
2904 return mask_supported;
2906 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2908 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2910 struct pci_bus *bus = dev->bus;
2911 bool mask_updated = true;
2912 u32 cmd_status_dword;
2913 u16 origcmd, newcmd;
2914 unsigned long flags;
2918 * We do a single dword read to retrieve both command and status.
2919 * Document assumptions that make this possible.
2921 BUILD_BUG_ON(PCI_COMMAND % 4);
2922 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2924 raw_spin_lock_irqsave(&pci_lock, flags);
2926 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2928 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2931 * Check interrupt status register to see whether our device
2932 * triggered the interrupt (when masking) or the next IRQ is
2933 * already pending (when unmasking).
2935 if (mask != irq_pending) {
2936 mask_updated = false;
2940 origcmd = cmd_status_dword;
2941 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2943 newcmd |= PCI_COMMAND_INTX_DISABLE;
2944 if (newcmd != origcmd)
2945 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2948 raw_spin_unlock_irqrestore(&pci_lock, flags);
2950 return mask_updated;
2954 * pci_check_and_mask_intx - mask INTx on pending interrupt
2955 * @dev: the PCI device to operate on
2957 * Check if the device dev has its INTx line asserted, mask it and
2958 * return true in that case. False is returned if not interrupt was
2961 bool pci_check_and_mask_intx(struct pci_dev *dev)
2963 return pci_check_and_set_intx_mask(dev, true);
2965 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2968 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2969 * @dev: the PCI device to operate on
2971 * Check if the device dev has its INTx line asserted, unmask it if not
2972 * and return true. False is returned and the mask remains active if
2973 * there was still an interrupt pending.
2975 bool pci_check_and_unmask_intx(struct pci_dev *dev)
2977 return pci_check_and_set_intx_mask(dev, false);
2979 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2982 * pci_msi_off - disables any msi or msix capabilities
2983 * @dev: the PCI device to operate on
2985 * If you want to use msi see pci_enable_msi and friends.
2986 * This is a lower level primitive that allows us to disable
2987 * msi operation at the device level.
2989 void pci_msi_off(struct pci_dev *dev)
2994 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2996 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2997 control &= ~PCI_MSI_FLAGS_ENABLE;
2998 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3000 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3002 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3003 control &= ~PCI_MSIX_FLAGS_ENABLE;
3004 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3007 EXPORT_SYMBOL_GPL(pci_msi_off);
3009 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3011 return dma_set_max_seg_size(&dev->dev, size);
3013 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3015 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3017 return dma_set_seg_boundary(&dev->dev, mask);
3019 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3021 static int pcie_flr(struct pci_dev *dev, int probe)
3026 u16 status, control;
3028 pos = pci_pcie_cap(dev);
3032 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3033 if (!(cap & PCI_EXP_DEVCAP_FLR))
3039 /* Wait for Transaction Pending bit clean */
3040 for (i = 0; i < 4; i++) {
3042 msleep((1 << (i - 1)) * 100);
3044 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3045 if (!(status & PCI_EXP_DEVSTA_TRPND))
3049 dev_err(&dev->dev, "transaction is not cleared; "
3050 "proceeding with reset anyway\n");
3053 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3054 control |= PCI_EXP_DEVCTL_BCR_FLR;
3055 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3062 static int pci_af_flr(struct pci_dev *dev, int probe)
3069 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3073 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3074 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3080 /* Wait for Transaction Pending bit clean */
3081 for (i = 0; i < 4; i++) {
3083 msleep((1 << (i - 1)) * 100);
3085 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3086 if (!(status & PCI_AF_STATUS_TP))
3090 dev_err(&dev->dev, "transaction is not cleared; "
3091 "proceeding with reset anyway\n");
3094 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3101 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3102 * @dev: Device to reset.
3103 * @probe: If set, only check if the device can be reset this way.
3105 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3106 * unset, it will be reinitialized internally when going from PCI_D3hot to
3107 * PCI_D0. If that's the case and the device is not in a low-power state
3108 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3110 * NOTE: This causes the caller to sleep for twice the device power transition
3111 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3112 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3113 * Moreover, only devices in D0 can be reset by this function.
3115 static int pci_pm_reset(struct pci_dev *dev, int probe)
3122 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3123 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3129 if (dev->current_state != PCI_D0)
3132 csr &= ~PCI_PM_CTRL_STATE_MASK;
3134 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3135 pci_dev_d3_sleep(dev);
3137 csr &= ~PCI_PM_CTRL_STATE_MASK;
3139 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3140 pci_dev_d3_sleep(dev);
3145 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3148 struct pci_dev *pdev;
3150 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3153 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3160 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3161 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3162 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3165 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3166 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3172 static int pci_dev_reset(struct pci_dev *dev, int probe)
3179 pci_cfg_access_lock(dev);
3180 /* block PM suspend, driver probe, etc. */
3181 device_lock(&dev->dev);
3184 rc = pci_dev_specific_reset(dev, probe);
3188 rc = pcie_flr(dev, probe);
3192 rc = pci_af_flr(dev, probe);
3196 rc = pci_pm_reset(dev, probe);
3200 rc = pci_parent_bus_reset(dev, probe);
3203 device_unlock(&dev->dev);
3204 pci_cfg_access_unlock(dev);
3211 * __pci_reset_function - reset a PCI device function
3212 * @dev: PCI device to reset
3214 * Some devices allow an individual function to be reset without affecting
3215 * other functions in the same device. The PCI device must be responsive
3216 * to PCI config space in order to use this function.
3218 * The device function is presumed to be unused when this function is called.
3219 * Resetting the device will make the contents of PCI configuration space
3220 * random, so any caller of this must be prepared to reinitialise the
3221 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3224 * Returns 0 if the device function was successfully reset or negative if the
3225 * device doesn't support resetting a single function.
3227 int __pci_reset_function(struct pci_dev *dev)
3229 return pci_dev_reset(dev, 0);
3231 EXPORT_SYMBOL_GPL(__pci_reset_function);
3234 * __pci_reset_function_locked - reset a PCI device function while holding
3235 * the @dev mutex lock.
3236 * @dev: PCI device to reset
3238 * Some devices allow an individual function to be reset without affecting
3239 * other functions in the same device. The PCI device must be responsive
3240 * to PCI config space in order to use this function.
3242 * The device function is presumed to be unused and the caller is holding
3243 * the device mutex lock when this function is called.
3244 * Resetting the device will make the contents of PCI configuration space
3245 * random, so any caller of this must be prepared to reinitialise the
3246 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3249 * Returns 0 if the device function was successfully reset or negative if the
3250 * device doesn't support resetting a single function.
3252 int __pci_reset_function_locked(struct pci_dev *dev)
3254 return pci_dev_reset(dev, 1);
3256 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3259 * pci_probe_reset_function - check whether the device can be safely reset
3260 * @dev: PCI device to reset
3262 * Some devices allow an individual function to be reset without affecting
3263 * other functions in the same device. The PCI device must be responsive
3264 * to PCI config space in order to use this function.
3266 * Returns 0 if the device function can be reset or negative if the
3267 * device doesn't support resetting a single function.
3269 int pci_probe_reset_function(struct pci_dev *dev)
3271 return pci_dev_reset(dev, 1);
3275 * pci_reset_function - quiesce and reset a PCI device function
3276 * @dev: PCI device to reset
3278 * Some devices allow an individual function to be reset without affecting
3279 * other functions in the same device. The PCI device must be responsive
3280 * to PCI config space in order to use this function.
3282 * This function does not just reset the PCI portion of a device, but
3283 * clears all the state associated with the device. This function differs
3284 * from __pci_reset_function in that it saves and restores device state
3287 * Returns 0 if the device function was successfully reset or negative if the
3288 * device doesn't support resetting a single function.
3290 int pci_reset_function(struct pci_dev *dev)
3294 rc = pci_dev_reset(dev, 1);
3298 pci_save_state(dev);
3301 * both INTx and MSI are disabled after the Interrupt Disable bit
3302 * is set and the Bus Master bit is cleared.
3304 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3306 rc = pci_dev_reset(dev, 0);
3308 pci_restore_state(dev);
3312 EXPORT_SYMBOL_GPL(pci_reset_function);
3315 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3316 * @dev: PCI device to query
3318 * Returns mmrbc: maximum designed memory read count in bytes
3319 * or appropriate error value.
3321 int pcix_get_max_mmrbc(struct pci_dev *dev)
3326 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3330 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3333 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3335 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3338 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3339 * @dev: PCI device to query
3341 * Returns mmrbc: maximum memory read count in bytes
3342 * or appropriate error value.
3344 int pcix_get_mmrbc(struct pci_dev *dev)
3349 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3353 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3356 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3358 EXPORT_SYMBOL(pcix_get_mmrbc);
3361 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3362 * @dev: PCI device to query
3363 * @mmrbc: maximum memory read count in bytes
3364 * valid values are 512, 1024, 2048, 4096
3366 * If possible sets maximum memory read byte count, some bridges have erratas
3367 * that prevent this.
3369 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3375 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3378 v = ffs(mmrbc) - 10;
3380 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3384 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3387 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3390 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3393 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3395 if (v > o && dev->bus &&
3396 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3399 cmd &= ~PCI_X_CMD_MAX_READ;
3401 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3406 EXPORT_SYMBOL(pcix_set_mmrbc);
3409 * pcie_get_readrq - get PCI Express read request size
3410 * @dev: PCI device to query
3412 * Returns maximum memory read request in bytes
3413 * or appropriate error value.
3415 int pcie_get_readrq(struct pci_dev *dev)
3420 cap = pci_pcie_cap(dev);
3424 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3426 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3430 EXPORT_SYMBOL(pcie_get_readrq);
3433 * pcie_set_readrq - set PCI Express maximum memory read request
3434 * @dev: PCI device to query
3435 * @rq: maximum memory read count in bytes
3436 * valid values are 128, 256, 512, 1024, 2048, 4096
3438 * If possible sets maximum memory read request in bytes
3440 int pcie_set_readrq(struct pci_dev *dev, int rq)
3442 int cap, err = -EINVAL;
3445 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3448 cap = pci_pcie_cap(dev);
3452 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3456 * If using the "performance" PCIe config, we clamp the
3457 * read rq size to the max packet size to prevent the
3458 * host bridge generating requests larger than we can
3461 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3462 int mps = pcie_get_mps(dev);
3470 v = (ffs(rq) - 8) << 12;
3472 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3473 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3475 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3481 EXPORT_SYMBOL(pcie_set_readrq);
3484 * pcie_get_mps - get PCI Express maximum payload size
3485 * @dev: PCI device to query
3487 * Returns maximum payload size in bytes
3488 * or appropriate error value.
3490 int pcie_get_mps(struct pci_dev *dev)
3495 cap = pci_pcie_cap(dev);
3499 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3501 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3507 * pcie_set_mps - set PCI Express maximum payload size
3508 * @dev: PCI device to query
3509 * @mps: maximum payload size in bytes
3510 * valid values are 128, 256, 512, 1024, 2048, 4096
3512 * If possible sets maximum payload size
3514 int pcie_set_mps(struct pci_dev *dev, int mps)
3516 int cap, err = -EINVAL;
3519 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3523 if (v > dev->pcie_mpss)
3527 cap = pci_pcie_cap(dev);
3531 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3535 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3536 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3538 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3545 * pci_select_bars - Make BAR mask from the type of resource
3546 * @dev: the PCI device for which BAR mask is made
3547 * @flags: resource type mask to be selected
3549 * This helper routine makes bar mask from the type of resource.
3551 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3554 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3555 if (pci_resource_flags(dev, i) & flags)
3561 * pci_resource_bar - get position of the BAR associated with a resource
3562 * @dev: the PCI device
3563 * @resno: the resource number
3564 * @type: the BAR type to be filled in
3566 * Returns BAR position in config space, or 0 if the BAR is invalid.
3568 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3572 if (resno < PCI_ROM_RESOURCE) {
3573 *type = pci_bar_unknown;
3574 return PCI_BASE_ADDRESS_0 + 4 * resno;
3575 } else if (resno == PCI_ROM_RESOURCE) {
3576 *type = pci_bar_mem32;
3577 return dev->rom_base_reg;
3578 } else if (resno < PCI_BRIDGE_RESOURCES) {
3579 /* device specific resource */
3580 reg = pci_iov_resource_bar(dev, resno, type);
3585 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3589 /* Some architectures require additional programming to enable VGA */
3590 static arch_set_vga_state_t arch_set_vga_state;
3592 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3594 arch_set_vga_state = func; /* NULL disables */
3597 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3598 unsigned int command_bits, u32 flags)
3600 if (arch_set_vga_state)
3601 return arch_set_vga_state(dev, decode, command_bits,
3607 * pci_set_vga_state - set VGA decode state on device and parents if requested
3608 * @dev: the PCI device
3609 * @decode: true = enable decoding, false = disable decoding
3610 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3611 * @flags: traverse ancestors and change bridges
3612 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3614 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3615 unsigned int command_bits, u32 flags)
3617 struct pci_bus *bus;
3618 struct pci_dev *bridge;
3622 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3624 /* ARCH specific VGA enables */
3625 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3629 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3630 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3632 cmd |= command_bits;
3634 cmd &= ~command_bits;
3635 pci_write_config_word(dev, PCI_COMMAND, cmd);
3638 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3645 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3648 cmd |= PCI_BRIDGE_CTL_VGA;
3650 cmd &= ~PCI_BRIDGE_CTL_VGA;
3651 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3659 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3660 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3661 static DEFINE_SPINLOCK(resource_alignment_lock);
3664 * pci_specified_resource_alignment - get resource alignment specified by user.
3665 * @dev: the PCI device to get
3667 * RETURNS: Resource alignment if it is specified.
3668 * Zero if it is not specified.
3670 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3672 int seg, bus, slot, func, align_order, count;
3673 resource_size_t align = 0;
3676 spin_lock(&resource_alignment_lock);
3677 p = resource_alignment_param;
3680 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3686 if (sscanf(p, "%x:%x:%x.%x%n",
3687 &seg, &bus, &slot, &func, &count) != 4) {
3689 if (sscanf(p, "%x:%x.%x%n",
3690 &bus, &slot, &func, &count) != 3) {
3691 /* Invalid format */
3692 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3698 if (seg == pci_domain_nr(dev->bus) &&
3699 bus == dev->bus->number &&
3700 slot == PCI_SLOT(dev->devfn) &&
3701 func == PCI_FUNC(dev->devfn)) {
3702 if (align_order == -1) {
3705 align = 1 << align_order;
3710 if (*p != ';' && *p != ',') {
3711 /* End of param or invalid format */
3716 spin_unlock(&resource_alignment_lock);
3721 * pci_is_reassigndev - check if specified PCI is target device to reassign
3722 * @dev: the PCI device to check
3724 * RETURNS: non-zero for PCI device is a target device to reassign,
3727 int pci_is_reassigndev(struct pci_dev *dev)
3729 #ifdef CONFIG_PCI_GUESTDEV
3732 result = pci_is_guestdev_to_reassign(dev);
3735 #endif /* CONFIG_PCI_GUESTDEV */
3736 return (pci_specified_resource_alignment(dev) != 0);
3740 * This function disables memory decoding and releases memory resources
3741 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3742 * It also rounds up size to specified alignment.
3743 * Later on, the kernel will assign page-aligned memory resource back
3746 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3750 resource_size_t align, size;
3753 if (!pci_is_reassigndev(dev))
3756 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3757 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3759 "Can't reassign resources to host bridge.\n");
3764 "Disabling memory decoding and releasing memory resources.\n");
3765 pci_read_config_word(dev, PCI_COMMAND, &command);
3766 command &= ~PCI_COMMAND_MEMORY;
3767 pci_write_config_word(dev, PCI_COMMAND, command);
3769 align = pci_specified_resource_alignment(dev);
3770 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3771 r = &dev->resource[i];
3772 if (!(r->flags & IORESOURCE_MEM))
3774 size = resource_size(r);
3778 "Rounding up size of resource #%d to %#llx.\n",
3779 i, (unsigned long long)size);
3784 /* Need to disable bridge's resource window,
3785 * to enable the kernel to reassign new resource
3788 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3789 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3790 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3791 r = &dev->resource[i];
3792 if (!(r->flags & IORESOURCE_MEM))
3794 r->end = resource_size(r) - 1;
3797 pci_disable_bridge_window(dev);
3801 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3803 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3804 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3805 spin_lock(&resource_alignment_lock);
3806 strncpy(resource_alignment_param, buf, count);
3807 resource_alignment_param[count] = '\0';
3808 spin_unlock(&resource_alignment_lock);
3812 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3815 spin_lock(&resource_alignment_lock);
3816 count = snprintf(buf, size, "%s", resource_alignment_param);
3817 spin_unlock(&resource_alignment_lock);
3821 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3823 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3826 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3827 const char *buf, size_t count)
3829 return pci_set_resource_alignment_param(buf, count);
3832 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3833 pci_resource_alignment_store);
3835 static int __init pci_resource_alignment_sysfs_init(void)
3837 return bus_create_file(&pci_bus_type,
3838 &bus_attr_resource_alignment);
3841 late_initcall(pci_resource_alignment_sysfs_init);
3843 static void __devinit pci_no_domains(void)
3845 #ifdef CONFIG_PCI_DOMAINS
3846 pci_domains_supported = 0;
3851 * pci_ext_cfg_enabled - can we access extended PCI config space?
3852 * @dev: The PCI device of the root bridge.
3854 * Returns 1 if we can access PCI extended config space (offsets
3855 * greater than 0xff). This is the default implementation. Architecture
3856 * implementations can override this.
3858 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3863 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3866 EXPORT_SYMBOL(pci_fixup_cardbus);
3868 static int __init pci_setup(char *str)
3871 char *k = strchr(str, ',');
3874 if (*str && (str = pcibios_setup(str)) && *str) {
3875 if (!strcmp(str, "nomsi")) {
3877 } else if (!strcmp(str, "noaer")) {
3879 } else if (!strncmp(str, "realloc=", 8)) {
3880 pci_realloc_get_opt(str + 8);
3881 } else if (!strncmp(str, "realloc", 7)) {
3882 pci_realloc_get_opt("on");
3883 } else if (!strcmp(str, "nodomains")) {
3885 } else if (!strncmp(str, "noari", 5)) {
3886 pcie_ari_disabled = true;
3887 } else if (!strncmp(str, "cbiosize=", 9)) {
3888 pci_cardbus_io_size = memparse(str + 9, &str);
3889 } else if (!strncmp(str, "cbmemsize=", 10)) {
3890 pci_cardbus_mem_size = memparse(str + 10, &str);
3891 } else if (!strncmp(str, "resource_alignment=", 19)) {
3892 pci_set_resource_alignment_param(str + 19,
3894 } else if (!strncmp(str, "ecrc=", 5)) {
3895 pcie_ecrc_get_policy(str + 5);
3896 } else if (!strncmp(str, "hpiosize=", 9)) {
3897 pci_hotplug_io_size = memparse(str + 9, &str);
3898 } else if (!strncmp(str, "hpmemsize=", 10)) {
3899 pci_hotplug_mem_size = memparse(str + 10, &str);
3900 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3901 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3902 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3903 pcie_bus_config = PCIE_BUS_SAFE;
3904 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3905 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3906 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3907 pcie_bus_config = PCIE_BUS_PEER2PEER;
3909 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3917 early_param("pci", pci_setup);
3919 EXPORT_SYMBOL(pci_reenable_device);
3920 EXPORT_SYMBOL(pci_enable_device_io);
3921 EXPORT_SYMBOL(pci_enable_device_mem);
3922 EXPORT_SYMBOL(pci_enable_device);
3923 EXPORT_SYMBOL(pcim_enable_device);
3924 EXPORT_SYMBOL(pcim_pin_device);
3925 EXPORT_SYMBOL(pci_disable_device);
3926 EXPORT_SYMBOL(pci_find_capability);
3927 EXPORT_SYMBOL(pci_bus_find_capability);
3928 EXPORT_SYMBOL(pci_release_regions);
3929 EXPORT_SYMBOL(pci_request_regions);
3930 EXPORT_SYMBOL(pci_request_regions_exclusive);
3931 EXPORT_SYMBOL(pci_release_region);
3932 EXPORT_SYMBOL(pci_request_region);
3933 EXPORT_SYMBOL(pci_request_region_exclusive);
3934 EXPORT_SYMBOL(pci_release_selected_regions);
3935 EXPORT_SYMBOL(pci_request_selected_regions);
3936 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3937 EXPORT_SYMBOL(pci_set_master);
3938 EXPORT_SYMBOL(pci_clear_master);
3939 EXPORT_SYMBOL(pci_set_mwi);
3940 EXPORT_SYMBOL(pci_try_set_mwi);
3941 EXPORT_SYMBOL(pci_clear_mwi);
3942 EXPORT_SYMBOL_GPL(pci_intx);
3943 EXPORT_SYMBOL(pci_assign_resource);
3944 EXPORT_SYMBOL(pci_find_parent_resource);
3945 EXPORT_SYMBOL(pci_select_bars);
3947 EXPORT_SYMBOL(pci_set_power_state);
3948 EXPORT_SYMBOL(pci_save_state);
3949 EXPORT_SYMBOL(pci_restore_state);
3950 EXPORT_SYMBOL(pci_pme_capable);
3951 EXPORT_SYMBOL(pci_pme_active);
3952 EXPORT_SYMBOL(pci_wake_from_d3);
3953 EXPORT_SYMBOL(pci_target_state);
3954 EXPORT_SYMBOL(pci_prepare_to_sleep);
3955 EXPORT_SYMBOL(pci_back_from_sleep);
3956 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);