2 #include <linux/acpi.h>
3 #include <linux/init.h>
6 #include <linux/slab.h>
8 #include <asm/pci_x86.h>
10 struct pci_root_info {
11 struct acpi_device *bridge;
19 static bool pci_use_crs = true;
21 static int __init set_use_crs(const struct dmi_system_id *id)
27 static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
30 .callback = set_use_crs,
31 .ident = "IBM System x3800",
33 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
37 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38 /* 2006 AMD HT/VIA system with two host bridges */
40 .callback = set_use_crs,
41 .ident = "ASRock ALiveSATA2-GLAN",
43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
46 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47 /* 2006 AMD HT/VIA system with two host bridges */
49 .callback = set_use_crs,
50 .ident = "ASUS M2V-MX SE",
52 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
53 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
54 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
57 /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
59 .callback = set_use_crs,
60 .ident = "MSI MS-7253",
62 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
63 DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
64 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
70 void __init pci_acpi_crs_quirks(void)
74 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
77 dmi_check_system(pci_use_crs_table);
80 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
81 * takes precedence over anything we figured out above.
83 if (pci_probe & PCI_ROOT_NO_CRS)
85 else if (pci_probe & PCI_USE__CRS)
88 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
89 "if necessary, use \"pci=%s\" and report a bug\n",
90 pci_use_crs ? "Using" : "Ignoring",
91 pci_use_crs ? "nocrs" : "use_crs");
95 resource_to_addr(struct acpi_resource *resource,
96 struct acpi_resource_address64 *addr)
99 struct acpi_resource_memory24 *memory24;
100 struct acpi_resource_memory32 *memory32;
101 struct acpi_resource_fixed_memory32 *fixed_memory32;
103 memset(addr, 0, sizeof(*addr));
104 switch (resource->type) {
105 case ACPI_RESOURCE_TYPE_MEMORY24:
106 memory24 = &resource->data.memory24;
107 addr->resource_type = ACPI_MEMORY_RANGE;
108 addr->minimum = memory24->minimum;
109 addr->address_length = memory24->address_length;
110 addr->maximum = addr->minimum + addr->address_length - 1;
112 case ACPI_RESOURCE_TYPE_MEMORY32:
113 memory32 = &resource->data.memory32;
114 addr->resource_type = ACPI_MEMORY_RANGE;
115 addr->minimum = memory32->minimum;
116 addr->address_length = memory32->address_length;
117 addr->maximum = addr->minimum + addr->address_length - 1;
119 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
120 fixed_memory32 = &resource->data.fixed_memory32;
121 addr->resource_type = ACPI_MEMORY_RANGE;
122 addr->minimum = fixed_memory32->address;
123 addr->address_length = fixed_memory32->address_length;
124 addr->maximum = addr->minimum + addr->address_length - 1;
126 case ACPI_RESOURCE_TYPE_ADDRESS16:
127 case ACPI_RESOURCE_TYPE_ADDRESS32:
128 case ACPI_RESOURCE_TYPE_ADDRESS64:
129 status = acpi_resource_to_address64(resource, addr);
130 if (ACPI_SUCCESS(status) &&
131 (addr->resource_type == ACPI_MEMORY_RANGE ||
132 addr->resource_type == ACPI_IO_RANGE) &&
133 addr->address_length > 0) {
142 count_resource(struct acpi_resource *acpi_res, void *data)
144 struct pci_root_info *info = data;
145 struct acpi_resource_address64 addr;
148 status = resource_to_addr(acpi_res, &addr);
149 if (ACPI_SUCCESS(status))
155 setup_resource(struct acpi_resource *acpi_res, void *data)
157 struct pci_root_info *info = data;
158 struct resource *res;
159 struct acpi_resource_address64 addr;
162 u64 start, orig_end, end;
164 status = resource_to_addr(acpi_res, &addr);
165 if (!ACPI_SUCCESS(status))
168 if (addr.resource_type == ACPI_MEMORY_RANGE) {
169 flags = IORESOURCE_MEM;
170 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
171 flags |= IORESOURCE_PREFETCH;
172 } else if (addr.resource_type == ACPI_IO_RANGE) {
173 flags = IORESOURCE_IO;
177 start = addr.minimum + addr.translation_offset;
178 orig_end = end = addr.maximum + addr.translation_offset;
180 /* Exclude non-addressable range or non-addressable portion of range */
181 end = min(end, (u64)iomem_resource.end);
183 dev_info(&info->bridge->dev,
184 "host bridge window [%#llx-%#llx] "
185 "(ignored, not CPU addressable)\n", start, orig_end);
187 } else if (orig_end != end) {
188 dev_info(&info->bridge->dev,
189 "host bridge window [%#llx-%#llx] "
190 "([%#llx-%#llx] ignored, not CPU addressable)\n",
191 start, orig_end, end + 1, orig_end);
194 res = &info->res[info->res_num];
195 res->name = info->name;
202 dev_printk(KERN_DEBUG, &info->bridge->dev,
203 "host bridge window %pR (ignored)\n", res);
208 if (addr.translation_offset)
209 dev_info(&info->bridge->dev, "host bridge window %pR "
210 "(PCI address [%#llx-%#llx])\n",
211 res, res->start - addr.translation_offset,
212 res->end - addr.translation_offset);
214 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
219 static bool resource_contains(struct resource *res, resource_size_t point)
221 if (res->start <= point && point <= res->end)
226 static void coalesce_windows(struct pci_root_info *info, unsigned long type)
229 struct resource *res1, *res2;
231 for (i = 0; i < info->res_num; i++) {
232 res1 = &info->res[i];
233 if (!(res1->flags & type))
236 for (j = i + 1; j < info->res_num; j++) {
237 res2 = &info->res[j];
238 if (!(res2->flags & type))
242 * I don't like throwing away windows because then
243 * our resources no longer match the ACPI _CRS, but
244 * the kernel resource tree doesn't allow overlaps.
246 if (resource_contains(res1, res2->start) ||
247 resource_contains(res1, res2->end) ||
248 resource_contains(res2, res1->start) ||
249 resource_contains(res2, res1->end)) {
250 res1->start = min(res1->start, res2->start);
251 res1->end = max(res1->end, res2->end);
252 dev_info(&info->bridge->dev,
253 "host bridge window expanded to %pR; %pR ignored\n",
261 static void add_resources(struct pci_root_info *info)
264 struct resource *res, *root, *conflict;
269 coalesce_windows(info, IORESOURCE_MEM);
270 coalesce_windows(info, IORESOURCE_IO);
272 for (i = 0; i < info->res_num; i++) {
275 if (res->flags & IORESOURCE_MEM)
276 root = &iomem_resource;
277 else if (res->flags & IORESOURCE_IO)
278 root = &ioport_resource;
282 conflict = insert_resource_conflict(root, res);
284 dev_info(&info->bridge->dev,
285 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
286 res, conflict->name, conflict);
288 pci_bus_add_resource(info->bus, res, 0);
293 get_current_resources(struct acpi_device *device, int busnum,
294 int domain, struct pci_bus *bus)
296 struct pci_root_info info;
300 pci_bus_remove_resources(bus);
302 info.bridge = device;
305 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
310 size = sizeof(*info.res) * info.res_num;
311 info.res = kmalloc(size, GFP_KERNEL);
315 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
317 goto name_alloc_fail;
320 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
323 add_resources(&info);
332 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
334 struct acpi_device *device = root->device;
335 int domain = root->segment;
336 int busnum = root->secondary.start;
338 struct pci_sysdata *sd;
340 #ifdef CONFIG_ACPI_NUMA
344 if (domain && !pci_domains_supported) {
345 printk(KERN_WARNING "pci_bus %04x:%02x: "
346 "ignored (multiple domains not supported)\n",
352 #ifdef CONFIG_ACPI_NUMA
353 pxm = acpi_get_pxm(device->handle);
355 node = pxm_to_node(pxm);
357 set_mp_bus_to_node(busnum, node);
360 node = get_mp_bus_to_node(busnum);
362 if (node != -1 && !node_online(node))
365 /* Allocate per-root-bus (not per bus) arch-specific data.
366 * TODO: leak; this memory is never freed.
367 * It's arguable whether it's worth the trouble to care.
369 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
371 printk(KERN_WARNING "pci_bus %04x:%02x: "
372 "ignored (out of memory)\n", domain, busnum);
379 * Maybe the desired pci bus has been already scanned. In such case
380 * it is unnecessary to scan the pci bus with the given domain,busnum.
382 bus = pci_find_bus(domain, busnum);
385 * If the desired bus exits, the content of bus->sysdata will
388 memcpy(bus->sysdata, sd, sizeof(*sd));
391 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
393 get_current_resources(device, busnum, domain, bus);
394 bus->subordinate = pci_scan_child_bus(bus);
398 /* After the PCI-E bus has been walked and all devices discovered,
399 * configure any settings of the fabric that might be necessary.
402 struct pci_bus *child;
403 list_for_each_entry(child, &bus->children, node) {
404 struct pci_dev *self = child->self;
408 pcie_bus_configure_settings(child, self->pcie_mpss);
415 if (bus && node != -1) {
416 #ifdef CONFIG_ACPI_NUMA
418 dev_printk(KERN_DEBUG, &bus->dev,
419 "on NUMA node %d (pxm %d)\n", node, pxm);
421 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
428 int __init pci_acpi_init(void)
430 struct pci_dev *dev = NULL;
435 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
436 acpi_irq_penalty_init();
437 pcibios_enable_irq = acpi_pci_irq_enable;
438 pcibios_disable_irq = acpi_pci_irq_disable;
439 x86_init.pci.init_irq = x86_init_noop;
443 * PCI IRQ routing is set up by pci_enable_device(), but we
444 * also do it here in case there are still broken drivers that
445 * don't use pci_enable_device().
447 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
448 for_each_pci_dev(dev)
449 acpi_pci_irq_enable(dev);