f8348ab103245a775a4f48b828bfcb4bbe8b2f70
[linux-flexiantxendom0.git] / arch / x86 / pci / acpi.c
1 #include <linux/pci.h>
2 #include <linux/acpi.h>
3 #include <linux/init.h>
4 #include <linux/irq.h>
5 #include <linux/dmi.h>
6 #include <linux/slab.h>
7 #include <asm/numa.h>
8 #include <asm/pci_x86.h>
9
10 struct pci_root_info {
11         struct acpi_device *bridge;
12         char *name;
13         unsigned int res_num;
14         struct resource *res;
15         struct pci_bus *bus;
16         int busnum;
17 };
18
19 static bool pci_use_crs = true;
20
21 static int __init set_use_crs(const struct dmi_system_id *id)
22 {
23         pci_use_crs = true;
24         return 0;
25 }
26
27 static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28         /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
29         {
30                 .callback = set_use_crs,
31                 .ident = "IBM System x3800",
32                 .matches = {
33                         DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34                         DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
35                 },
36         },
37         /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38         /* 2006 AMD HT/VIA system with two host bridges */
39         {
40                 .callback = set_use_crs,
41                 .ident = "ASRock ALiveSATA2-GLAN",
42                 .matches = {
43                         DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44                 },
45         },
46         /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47         /* 2006 AMD HT/VIA system with two host bridges */
48         {
49                 .callback = set_use_crs,
50                 .ident = "ASUS M2V-MX SE",
51                 .matches = {
52                         DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
53                         DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
54                         DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
55                 },
56         },
57         {}
58 };
59
60 void __init pci_acpi_crs_quirks(void)
61 {
62         int year;
63
64         if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
65                 pci_use_crs = false;
66
67         dmi_check_system(pci_use_crs_table);
68
69         /*
70          * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
71          * takes precedence over anything we figured out above.
72          */
73         if (pci_probe & PCI_ROOT_NO_CRS)
74                 pci_use_crs = false;
75         else if (pci_probe & PCI_USE__CRS)
76                 pci_use_crs = true;
77
78         printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
79                "if necessary, use \"pci=%s\" and report a bug\n",
80                pci_use_crs ? "Using" : "Ignoring",
81                pci_use_crs ? "nocrs" : "use_crs");
82 }
83
84 static acpi_status
85 resource_to_addr(struct acpi_resource *resource,
86                         struct acpi_resource_address64 *addr)
87 {
88         acpi_status status;
89         struct acpi_resource_memory24 *memory24;
90         struct acpi_resource_memory32 *memory32;
91         struct acpi_resource_fixed_memory32 *fixed_memory32;
92
93         memset(addr, 0, sizeof(*addr));
94         switch (resource->type) {
95         case ACPI_RESOURCE_TYPE_MEMORY24:
96                 memory24 = &resource->data.memory24;
97                 addr->resource_type = ACPI_MEMORY_RANGE;
98                 addr->minimum = memory24->minimum;
99                 addr->address_length = memory24->address_length;
100                 addr->maximum = addr->minimum + addr->address_length - 1;
101                 return AE_OK;
102         case ACPI_RESOURCE_TYPE_MEMORY32:
103                 memory32 = &resource->data.memory32;
104                 addr->resource_type = ACPI_MEMORY_RANGE;
105                 addr->minimum = memory32->minimum;
106                 addr->address_length = memory32->address_length;
107                 addr->maximum = addr->minimum + addr->address_length - 1;
108                 return AE_OK;
109         case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
110                 fixed_memory32 = &resource->data.fixed_memory32;
111                 addr->resource_type = ACPI_MEMORY_RANGE;
112                 addr->minimum = fixed_memory32->address;
113                 addr->address_length = fixed_memory32->address_length;
114                 addr->maximum = addr->minimum + addr->address_length - 1;
115                 return AE_OK;
116         case ACPI_RESOURCE_TYPE_ADDRESS16:
117         case ACPI_RESOURCE_TYPE_ADDRESS32:
118         case ACPI_RESOURCE_TYPE_ADDRESS64:
119                 status = acpi_resource_to_address64(resource, addr);
120                 if (ACPI_SUCCESS(status) &&
121                     (addr->resource_type == ACPI_MEMORY_RANGE ||
122                     addr->resource_type == ACPI_IO_RANGE) &&
123                     addr->address_length > 0) {
124                         return AE_OK;
125                 }
126                 break;
127         }
128         return AE_ERROR;
129 }
130
131 static acpi_status
132 count_resource(struct acpi_resource *acpi_res, void *data)
133 {
134         struct pci_root_info *info = data;
135         struct acpi_resource_address64 addr;
136         acpi_status status;
137
138         status = resource_to_addr(acpi_res, &addr);
139         if (ACPI_SUCCESS(status))
140                 info->res_num++;
141         return AE_OK;
142 }
143
144 static acpi_status
145 setup_resource(struct acpi_resource *acpi_res, void *data)
146 {
147         struct pci_root_info *info = data;
148         struct resource *res;
149         struct acpi_resource_address64 addr;
150         acpi_status status;
151         unsigned long flags;
152         u64 start, orig_end, end;
153
154         status = resource_to_addr(acpi_res, &addr);
155         if (!ACPI_SUCCESS(status))
156                 return AE_OK;
157
158         if (addr.resource_type == ACPI_MEMORY_RANGE) {
159                 flags = IORESOURCE_MEM;
160                 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
161                         flags |= IORESOURCE_PREFETCH;
162         } else if (addr.resource_type == ACPI_IO_RANGE) {
163                 flags = IORESOURCE_IO;
164         } else
165                 return AE_OK;
166
167         start = addr.minimum + addr.translation_offset;
168         orig_end = end = addr.maximum + addr.translation_offset;
169
170         /* Exclude non-addressable range or non-addressable portion of range */
171         end = min(end, (u64)iomem_resource.end);
172         if (end <= start) {
173                 dev_info(&info->bridge->dev,
174                         "host bridge window [%#llx-%#llx] "
175                         "(ignored, not CPU addressable)\n", start, orig_end);
176                 return AE_OK;
177         } else if (orig_end != end) {
178                 dev_info(&info->bridge->dev,
179                         "host bridge window [%#llx-%#llx] "
180                         "([%#llx-%#llx] ignored, not CPU addressable)\n",
181                         start, orig_end, end + 1, orig_end);
182         }
183
184         res = &info->res[info->res_num];
185         res->name = info->name;
186         res->flags = flags;
187         res->start = start;
188         res->end = end;
189         res->child = NULL;
190
191         if (!pci_use_crs) {
192                 dev_printk(KERN_DEBUG, &info->bridge->dev,
193                            "host bridge window %pR (ignored)\n", res);
194                 return AE_OK;
195         }
196
197         info->res_num++;
198         if (addr.translation_offset)
199                 dev_info(&info->bridge->dev, "host bridge window %pR "
200                          "(PCI address [%#llx-%#llx])\n",
201                          res, res->start - addr.translation_offset,
202                          res->end - addr.translation_offset);
203         else
204                 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
205
206         return AE_OK;
207 }
208
209 static bool resource_contains(struct resource *res, resource_size_t point)
210 {
211         if (res->start <= point && point <= res->end)
212                 return true;
213         return false;
214 }
215
216 static void coalesce_windows(struct pci_root_info *info, unsigned long type)
217 {
218         int i, j;
219         struct resource *res1, *res2;
220
221         for (i = 0; i < info->res_num; i++) {
222                 res1 = &info->res[i];
223                 if (!(res1->flags & type))
224                         continue;
225
226                 for (j = i + 1; j < info->res_num; j++) {
227                         res2 = &info->res[j];
228                         if (!(res2->flags & type))
229                                 continue;
230
231                         /*
232                          * I don't like throwing away windows because then
233                          * our resources no longer match the ACPI _CRS, but
234                          * the kernel resource tree doesn't allow overlaps.
235                          */
236                         if (resource_contains(res1, res2->start) ||
237                             resource_contains(res1, res2->end) ||
238                             resource_contains(res2, res1->start) ||
239                             resource_contains(res2, res1->end)) {
240                                 res1->start = min(res1->start, res2->start);
241                                 res1->end = max(res1->end, res2->end);
242                                 dev_info(&info->bridge->dev,
243                                          "host bridge window expanded to %pR; %pR ignored\n",
244                                          res1, res2);
245                                 res2->flags = 0;
246                         }
247                 }
248         }
249 }
250
251 static void add_resources(struct pci_root_info *info)
252 {
253         int i;
254         struct resource *res, *root, *conflict;
255
256         if (!pci_use_crs)
257                 return;
258
259         coalesce_windows(info, IORESOURCE_MEM);
260         coalesce_windows(info, IORESOURCE_IO);
261
262         for (i = 0; i < info->res_num; i++) {
263                 res = &info->res[i];
264
265                 if (res->flags & IORESOURCE_MEM)
266                         root = &iomem_resource;
267                 else if (res->flags & IORESOURCE_IO)
268                         root = &ioport_resource;
269                 else
270                         continue;
271
272                 conflict = insert_resource_conflict(root, res);
273                 if (conflict)
274                         dev_info(&info->bridge->dev,
275                                  "ignoring host bridge window %pR (conflicts with %s %pR)\n",
276                                  res, conflict->name, conflict);
277                 else
278                         pci_bus_add_resource(info->bus, res, 0);
279         }
280 }
281
282 static void
283 get_current_resources(struct acpi_device *device, int busnum,
284                         int domain, struct pci_bus *bus)
285 {
286         struct pci_root_info info;
287         size_t size;
288
289         if (pci_use_crs)
290                 pci_bus_remove_resources(bus);
291
292         info.bridge = device;
293         info.bus = bus;
294         info.res_num = 0;
295         acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
296                                 &info);
297         if (!info.res_num)
298                 return;
299
300         size = sizeof(*info.res) * info.res_num;
301         info.res = kmalloc(size, GFP_KERNEL);
302         if (!info.res)
303                 goto res_alloc_fail;
304
305         info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
306         if (!info.name)
307                 goto name_alloc_fail;
308
309         info.res_num = 0;
310         acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
311                                 &info);
312
313         add_resources(&info);
314         return;
315
316 name_alloc_fail:
317         kfree(info.res);
318 res_alloc_fail:
319         return;
320 }
321
322 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
323 {
324         struct acpi_device *device = root->device;
325         int domain = root->segment;
326         int busnum = root->secondary.start;
327         struct pci_bus *bus;
328         struct pci_sysdata *sd;
329         int node;
330 #ifdef CONFIG_ACPI_NUMA
331         int pxm;
332 #endif
333
334         if (domain && !pci_domains_supported) {
335                 printk(KERN_WARNING "pci_bus %04x:%02x: "
336                        "ignored (multiple domains not supported)\n",
337                        domain, busnum);
338                 return NULL;
339         }
340
341         node = -1;
342 #ifdef CONFIG_ACPI_NUMA
343         pxm = acpi_get_pxm(device->handle);
344         if (pxm >= 0)
345                 node = pxm_to_node(pxm);
346         if (node != -1)
347                 set_mp_bus_to_node(busnum, node);
348         else
349 #endif
350                 node = get_mp_bus_to_node(busnum);
351
352         if (node != -1 && !node_online(node))
353                 node = -1;
354
355         /* Allocate per-root-bus (not per bus) arch-specific data.
356          * TODO: leak; this memory is never freed.
357          * It's arguable whether it's worth the trouble to care.
358          */
359         sd = kzalloc(sizeof(*sd), GFP_KERNEL);
360         if (!sd) {
361                 printk(KERN_WARNING "pci_bus %04x:%02x: "
362                        "ignored (out of memory)\n", domain, busnum);
363                 return NULL;
364         }
365
366         sd->domain = domain;
367         sd->node = node;
368         /*
369          * Maybe the desired pci bus has been already scanned. In such case
370          * it is unnecessary to scan the pci bus with the given domain,busnum.
371          */
372         bus = pci_find_bus(domain, busnum);
373         if (bus) {
374                 /*
375                  * If the desired bus exits, the content of bus->sysdata will
376                  * be replaced by sd.
377                  */
378                 memcpy(bus->sysdata, sd, sizeof(*sd));
379                 kfree(sd);
380         } else {
381                 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
382                 if (bus) {
383                         get_current_resources(device, busnum, domain, bus);
384                         bus->subordinate = pci_scan_child_bus(bus);
385                 }
386         }
387
388         /* After the PCI-E bus has been walked and all devices discovered,
389          * configure any settings of the fabric that might be necessary.
390          */
391         if (bus) {
392                 struct pci_bus *child;
393                 list_for_each_entry(child, &bus->children, node) {
394                         struct pci_dev *self = child->self;
395                         if (!self)
396                                 continue;
397
398                         pcie_bus_configure_settings(child, self->pcie_mpss);
399                 }
400         }
401
402         if (!bus)
403                 kfree(sd);
404
405         if (bus && node != -1) {
406 #ifdef CONFIG_ACPI_NUMA
407                 if (pxm >= 0)
408                         dev_printk(KERN_DEBUG, &bus->dev,
409                                    "on NUMA node %d (pxm %d)\n", node, pxm);
410 #else
411                 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
412 #endif
413         }
414
415         return bus;
416 }
417
418 int __init pci_acpi_init(void)
419 {
420         struct pci_dev *dev = NULL;
421
422         if (acpi_noirq)
423                 return -ENODEV;
424
425         printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
426         acpi_irq_penalty_init();
427         pcibios_enable_irq = acpi_pci_irq_enable;
428         pcibios_disable_irq = acpi_pci_irq_disable;
429         x86_init.pci.init_irq = x86_init_noop;
430
431         if (pci_routeirq) {
432                 /*
433                  * PCI IRQ routing is set up by pci_enable_device(), but we
434                  * also do it here in case there are still broken drivers that
435                  * don't use pci_enable_device().
436                  */
437                 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
438                 for_each_pci_dev(dev)
439                         acpi_pci_irq_enable(dev);
440         }
441
442         return 0;
443 }