2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
9 #include <linux/vmalloc.h>
10 #include <linux/slab.h>
11 #include <asm/sn/sgi.h>
12 #include <asm/sn/pci/pci_bus_cvlink.h>
13 #include <asm/sn/sn_cpuid.h>
14 #include <asm/sn/simulator.h>
16 extern int bridge_rev_b_data_check_disable;
18 vertex_hdl_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
19 nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
20 void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
21 unsigned char num_bridges;
22 static int done_probing;
23 extern irqpda_t *irqpdaindr;
25 static int pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t io_moduleid);
26 vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
28 extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
30 static struct sn_flush_device_list *sn_dma_flush_init(unsigned long start,
32 int idx, int pin, int slot);
33 extern int cbrick_type_get_nasid(nasid_t);
34 extern void ioconfig_bus_new_entries(void);
35 extern void ioconfig_get_busnum(char *, int *);
36 extern int iomoduleid_get(nasid_t);
37 extern int pcibr_widget_to_bus(vertex_hdl_t);
38 extern int isIO9(int);
40 #define IS_OPUS(nasid) (cbrick_type_get_nasid(nasid) == MODULE_OPUSBRICK)
41 #define IS_ALTIX(nasid) (cbrick_type_get_nasid(nasid) == MODULE_CBRICK)
44 * Init the provider asic for a given device
47 static inline void __init
48 set_pci_provider(struct sn_device_sysdata *device_sysdata)
50 pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
52 device_sysdata->pci_provider = pciio_info_pops_get(pciio_info);
56 * pci_bus_cvlink_init() - To be called once during initialization before
57 * SGI IO Infrastructure init is called.
60 pci_bus_cvlink_init(void)
63 extern int ioconfig_bus_init(void);
65 memset(busnum_to_pcibr_vhdl, 0x0, sizeof(vertex_hdl_t) * MAX_PCI_XWIDGET);
66 memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
68 memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
72 return ioconfig_bus_init();
76 * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
77 * pci bus vertex from the SGI IO Infrastructure.
79 static inline vertex_hdl_t
80 pci_bus_to_vertex(unsigned char busnum)
83 vertex_hdl_t pci_bus = NULL;
87 * First get the xwidget vertex.
89 pci_bus = busnum_to_pcibr_vhdl[busnum];
94 * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
95 * and function numbers.
98 devfn_to_vertex(unsigned char busnum, unsigned int devfn)
104 vertex_hdl_t pci_bus = NULL;
105 vertex_hdl_t device_vertex = (vertex_hdl_t)NULL;
108 * Go get the pci bus vertex.
110 pci_bus = pci_bus_to_vertex(busnum);
113 * During probing, the Linux pci code invents non-existent
114 * bus numbers and pci_dev structures and tries to access
115 * them to determine existence. Don't crib during probing.
118 printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
124 * Go get the slot&function vertex.
125 * Should call pciio_slot_func_to_name() when ready.
127 slot = PCI_SLOT(devfn);
128 func = PCI_FUNC(devfn);
131 * For a NON Multi-function card the name of the device looks like:
132 * ../pci/1, ../pci/2 ..
135 sprintf(name, "%d", slot);
136 if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
139 return(device_vertex);
145 * This maybe a multifunction card. It's names look like:
146 * ../pci/1a, ../pci/1b, etc.
148 sprintf(name, "%d%c", slot, 'a'+func);
149 if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
150 if (!device_vertex) {
155 return(device_vertex);
159 * sn_alloc_pci_sysdata() - This routine allocates a pci controller
160 * which is expected as the pci_dev and pci_bus sysdata by the Linux
161 * PCI infrastructure.
163 static struct pci_controller *
164 sn_alloc_pci_sysdata(void)
166 struct pci_controller *pci_sysdata;
168 pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
172 memset(pci_sysdata, 0, sizeof(*pci_sysdata));
177 * sn_pci_fixup_bus() - This routine sets up a bus's resources
178 * consistent with the Linux PCI abstraction layer.
181 sn_pci_fixup_bus(struct pci_bus *bus)
183 struct pci_controller *pci_sysdata;
184 struct sn_widget_sysdata *widget_sysdata;
186 pci_sysdata = sn_alloc_pci_sysdata();
188 printk(KERN_WARNING "sn_pci_fixup_bus(): Unable to "
189 "allocate memory for pci_sysdata\n");
192 widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata),
194 if (!widget_sysdata) {
195 printk(KERN_WARNING "sn_pci_fixup_bus(): Unable to "
196 "allocate memory for widget_sysdata\n");
201 widget_sysdata->vhdl = pci_bus_to_vertex(bus->number);
202 pci_sysdata->platform_data = (void *)widget_sysdata;
203 bus->sysdata = pci_sysdata;
209 * sn_pci_fixup_slot() - This routine sets up a slot's resources
210 * consistent with the Linux PCI abstraction layer. Resources acquired
211 * from our PCI provider include PIO maps to BAR space and interrupt
215 sn_pci_fixup_slot(struct pci_dev *dev)
217 extern int bit_pos_to_irq(int);
223 struct pci_controller *pci_sysdata;
224 struct sn_device_sysdata *device_sysdata;
225 pciio_intr_line_t lines = 0;
226 vertex_hdl_t device_vertex;
227 pciio_provider_t *pci_provider;
228 pciio_intr_t intr_handle;
230 /* Allocate a controller structure */
231 pci_sysdata = sn_alloc_pci_sysdata();
233 printk(KERN_WARNING "sn_pci_fixup_slot: Unable to "
234 "allocate memory for pci_sysdata\n");
238 /* Set the device vertex */
239 device_sysdata = kmalloc(sizeof(struct sn_device_sysdata), GFP_KERNEL);
240 if (!device_sysdata) {
241 printk(KERN_WARNING "sn_pci_fixup_slot: Unable to "
242 "allocate memory for device_sysdata\n");
247 device_sysdata->vhdl = devfn_to_vertex(dev->bus->number, dev->devfn);
248 pci_sysdata->platform_data = (void *) device_sysdata;
249 dev->sysdata = pci_sysdata;
250 set_pci_provider(device_sysdata);
252 pci_read_config_word(dev, PCI_COMMAND, &cmd);
255 * Set the resources address correctly. The assumption here
256 * is that the addresses in the resource structure has been
257 * read from the card and it was set in the card by our
258 * Infrastructure. NOTE: PIC and TIOCP don't have big-window
259 * upport for PCI I/O space. So by mapping the I/O space
260 * first we will attempt to use Device(x) registers for I/O
261 * BARs (which can't use big windows like MEM BARs can).
263 vhdl = device_sysdata->vhdl;
265 /* Allocate the IORESOURCE_IO space first */
266 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
267 unsigned long start, end, addr;
269 device_sysdata->pio_map[idx] = NULL;
271 if (!(dev->resource[idx].flags & IORESOURCE_IO))
274 start = dev->resource[idx].start;
275 end = dev->resource[idx].end;
280 addr = (unsigned long)pciio_pio_addr(vhdl, 0,
281 PCIIO_SPACE_WIN(idx), 0, size,
282 &device_sysdata->pio_map[idx], 0);
285 dev->resource[idx].start = 0;
286 dev->resource[idx].end = 0;
287 printk("sn_pci_fixup(): pio map failure for "
288 "%s bar%d\n", dev->slot_name, idx);
290 addr |= __IA64_UNCACHED_OFFSET;
291 dev->resource[idx].start = addr;
292 dev->resource[idx].end = addr + size;
295 if (dev->resource[idx].flags & IORESOURCE_IO)
296 cmd |= PCI_COMMAND_IO;
299 /* Allocate the IORESOURCE_MEM space next */
300 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
301 unsigned long start, end, addr;
303 if ((dev->resource[idx].flags & IORESOURCE_IO))
306 start = dev->resource[idx].start;
307 end = dev->resource[idx].end;
312 addr = (unsigned long)pciio_pio_addr(vhdl, 0,
313 PCIIO_SPACE_WIN(idx), 0, size,
314 &device_sysdata->pio_map[idx], 0);
317 dev->resource[idx].start = 0;
318 dev->resource[idx].end = 0;
319 printk("sn_pci_fixup(): pio map failure for "
320 "%s bar%d\n", dev->slot_name, idx);
322 addr |= __IA64_UNCACHED_OFFSET;
323 dev->resource[idx].start = addr;
324 dev->resource[idx].end = addr + size;
327 if (dev->resource[idx].flags & IORESOURCE_MEM)
328 cmd |= PCI_COMMAND_MEMORY;
332 * Update the Command Word on the Card.
334 cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
335 /* bit gets dropped .. no harm */
336 pci_write_config_word(dev, PCI_COMMAND, cmd);
338 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
339 device_vertex = device_sysdata->vhdl;
340 pci_provider = device_sysdata->pci_provider;
341 device_sysdata->intr_handle = NULL;
346 irqpdaindr->curr = dev;
348 intr_handle = (pci_provider->intr_alloc)(device_vertex, NULL, lines, device_vertex);
349 if (intr_handle == NULL) {
350 printk(KERN_WARNING "sn_pci_fixup: pcibr_intr_alloc() failed\n");
352 kfree(device_sysdata);
356 device_sysdata->intr_handle = intr_handle;
357 irq = intr_handle->pi_irq;
358 irqpdaindr->device_dev[irq] = dev;
359 (pci_provider->intr_connect)(intr_handle, (intr_func_t)0, (intr_arg_t)0);
362 register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
364 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
365 int ibits = ((pcibr_intr_t)intr_handle)->bi_ibits;
368 size = dev->resource[idx].end -
369 dev->resource[idx].start;
370 if (size == 0) continue;
372 for (i=0; i<8; i++) {
373 if (ibits & (1 << i) ) {
374 extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
375 device_sysdata->dma_flush_list =
376 sn_dma_flush_init(dev->resource[idx].start,
377 dev->resource[idx].end,
380 PCIBR_INFO_SLOT_GET_EXT(pcibr_info_get(device_sysdata->vhdl)));
387 struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
389 /* Initialize the data structures for flushing write buffers after a PIO read.
391 * Take an unused int. pin and associate it with a pin that is in use.
392 * After a PIO read, force an interrupt on the unused pin, forcing a write buffer flush
393 * on the in use pin. This will prevent the race condition between PIO read responses and
396 static struct sn_flush_device_list *
397 sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot)
400 unsigned long dnasid;
403 struct sn_flush_device_list *p;
408 nasid = NASID_GET(start);
409 wid_num = SWIN_WIDGETNUM(start);
410 bus = (start >> 23) & 0x1;
411 bwin = BWIN_WINDOWNUM(start);
413 if (flush_nasid_list[nasid].widget_p == NULL) {
414 flush_nasid_list[nasid].widget_p = (struct sn_flush_device_list **)kmalloc((HUB_WIDGET_ID_MAX+1) *
415 sizeof(struct sn_flush_device_list *), GFP_KERNEL);
416 if (!flush_nasid_list[nasid].widget_p) {
417 printk(KERN_WARNING "sn_dma_flush_init: Cannot allocate memory for nasid list\n");
420 memset(flush_nasid_list[nasid].widget_p, 0, (HUB_WIDGET_ID_MAX+1) * sizeof(struct sn_flush_device_list *));
423 int itte_index = bwin - 1;
426 itte = HUB_L(IIO_ITTE_GET(nasid, itte_index));
427 flush_nasid_list[nasid].iio_itte[bwin] = itte;
428 wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT)
429 & IIO_ITTE_WIDGET_MASK;
430 bus = itte & IIO_ITTE_OFFSET_MASK;
431 if (bus == 0x4 || bus == 0x8) {
438 /* if it's IO9, bus 1, we don't care about slots 1 and 4. This is
439 * because these are the IOC4 slots and we don't flush them.
441 if (isIO9(nasid) && bus == 0 && (slot == 1 || slot == 4)) {
444 if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) {
445 flush_nasid_list[nasid].widget_p[wid_num] = (struct sn_flush_device_list *)kmalloc(
446 DEV_PER_WIDGET * sizeof (struct sn_flush_device_list), GFP_KERNEL);
447 if (!flush_nasid_list[nasid].widget_p[wid_num]) {
448 printk(KERN_WARNING "sn_dma_flush_init: Cannot allocate memory for nasid sub-list\n");
451 memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
452 DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
453 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
454 for (i=0; i<DEV_PER_WIDGET;i++) {
462 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
463 for (i=0;i<DEV_PER_WIDGET; i++) {
464 if (p->pin == pin && p->bus == bus && p->slot == slot) break;
474 for (i=0; i<PCI_ROM_RESOURCE; i++) {
475 if (p->bar_list[i].start == 0) {
476 p->bar_list[i].start = start;
477 p->bar_list[i].end = end;
481 b = (void *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
483 /* If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
484 * To see this is non-trivial. By drawing pictures and reading manuals and talking
485 * to HW guys, we can see that on IO9 bus 1, slots 7 and 8 are always unused.
486 * Further, since we short-circuit slots 1, 3, and 4 above, we only have to worry
487 * about the case when there is a card in slot 2. A multifunction card will appear
488 * to be in slot 6 (from an interrupt point of view) also. That's the most we'll
489 * have to worry about. A four function card will overload the interrupt lines in
491 * We also need to special case the 12160 device in slot 3. Fortunately, we have
492 * a spare intr. line for pin 4, so we'll use that for the 12160.
493 * All other buses have slot 3 and 4 and slots 7 and 8 unused. Since we can only
494 * see slots 1 and 2 and slots 5 and 6 coming through here for those buses (this
495 * is true only on Pxbricks with 2 physical slots per bus), we just need to add
496 * 2 to the slot number to find an unused slot.
497 * We have convinced ourselves that we will never see a case where two different cards
498 * in two different slots will ever share an interrupt line, so there is no need to
502 if (isIO9(nasid) && ( (IS_ALTIX(nasid) && wid_num == 0xc)
503 || (IS_OPUS(nasid) && wid_num == 0xf) )
506 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 6);
507 pcireg_bridge_intr_device_bit_set(b, (1<<18));
508 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
509 pcireg_bridge_intr_addr_set(b, 6, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
510 (dnasid << 36) | (0xfUL << 48)));
511 } else if (pin == 2) { /* 12160 SCSI device in IO9 */
512 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 4);
513 pcireg_bridge_intr_device_bit_set(b, (2<<12));
514 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
515 pcireg_bridge_intr_addr_set(b, 4,
516 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
517 (dnasid << 36) | (0xfUL << 48)));
518 } else { /* slot == 6 */
519 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 7);
520 pcireg_bridge_intr_device_bit_set(b, (5<<21));
521 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
522 pcireg_bridge_intr_addr_set(b, 7,
523 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
524 (dnasid << 36) | (0xfUL << 48)));
527 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, (pin +2));
528 pcireg_bridge_intr_device_bit_set(b, (pin << (pin * 3)));
529 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
530 pcireg_bridge_intr_addr_set(b, (pin + 2),
531 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
532 (dnasid << 36) | (0xfUL << 48)));
538 * linux_bus_cvlink() Creates a link between the Linux PCI Bus number
539 * to the actual hardware component that it represents:
540 * /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
542 * The bus vertex, when called to devfs_generate_path() returns:
543 * hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
544 * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/0
545 * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/1
548 linux_bus_cvlink(void)
553 for (index=0; index < MAX_PCI_XWIDGET; index++) {
554 if (!busnum_to_pcibr_vhdl[index])
557 sprintf(name, "%x", index);
558 (void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
564 * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
566 * Linux PCI Bus numbers are assigned from lowest module_id numbers
570 pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
573 int basebus_num, bus_number;
574 vertex_hdl_t pci_bus = softlistp->bl_vhdl;
575 char moduleid_str[16];
577 memset(moduleid_str, 0, 16);
578 format_module_id(moduleid_str, moduleid, MODULE_FORMAT_BRIEF);
579 (void) ioconfig_get_busnum((char *)moduleid_str, &basebus_num);
582 * Assign the correct bus number and also the nasid of this
585 bus_number = basebus_num + pcibr_widget_to_bus(pci_bus);
588 char hwpath[MAXDEVNAME] = "\0";
589 extern int hwgraph_vertex_name_get(vertex_hdl_t, char *, uint);
591 pcibr_soft_t pcibr_soft = softlistp->bl_soft;
592 hwgraph_vertex_name_get(pci_bus, hwpath, MAXDEVNAME);
593 printk("%s:\n\tbus_num %d, basebus_num %d, brick_bus %d, "
594 "bus_vhdl 0x%lx, brick_type %d\n", hwpath, bus_number,
595 basebus_num, pcibr_widget_to_bus(pci_bus),
596 (uint64_t)pci_bus, pcibr_soft->bs_bricktype);
599 busnum_to_pcibr_vhdl[bus_number] = pci_bus;
602 * Pre assign DMA maps needed for 32 Bits Page Map DMA.
604 busnum_to_atedmamaps[bus_number] = (void *) vmalloc(
605 sizeof(struct pcibr_dmamap_s)*MAX_ATE_MAPS);
606 if (busnum_to_atedmamaps[bus_number] <= 0) {
607 printk("pci_bus_map_create: Cannot allocate memory for ate maps\n");
610 memset(busnum_to_atedmamaps[bus_number], 0x0,
611 sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
616 * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
617 * initialization has completed to set up the mappings between PCI BRIDGE
618 * ASIC and logical pci bus numbers.
620 * Must be called before pci_init() is invoked.
623 pci_bus_to_hcl_cvlink(void)
626 extern pcibr_list_p pcibr_list;
628 for (i = 0; i < nummodules; i++) {
629 struct pcibr_list_s *softlistp = pcibr_list;
630 struct pcibr_list_s *first_in_list = NULL;
631 struct pcibr_list_s *last_in_list = NULL;
633 /* Walk the list of pcibr_soft structs looking for matches */
635 struct pcibr_soft_s *pcibr_soft = softlistp->bl_soft;
638 /* Is this PCI bus associated with this moduleid? */
639 moduleid = NODE_MODULEID(
640 NASID_TO_COMPACT_NODEID(pcibr_soft->bs_nasid));
641 if (modules[i]->id == moduleid) {
642 struct pcibr_list_s *new_element;
644 new_element = kmalloc(sizeof (struct pcibr_soft_s), GFP_KERNEL);
645 if (new_element == NULL) {
646 printk("%s: Couldn't allocate memory\n",__FUNCTION__);
649 new_element->bl_soft = softlistp->bl_soft;
650 new_element->bl_vhdl = softlistp->bl_vhdl;
651 new_element->bl_next = NULL;
653 /* list empty so just put it on the list */
654 if (first_in_list == NULL) {
655 first_in_list = new_element;
656 last_in_list = new_element;
657 softlistp = softlistp->bl_next;
662 * BASEIO IObricks attached to a module have
663 * a higher priority than non BASEIO IOBricks
664 * when it comes to persistant pci bus
665 * numbering, so put them on the front of the
668 if (isIO9(pcibr_soft->bs_nasid)) {
669 new_element->bl_next = first_in_list;
670 first_in_list = new_element;
672 last_in_list->bl_next = new_element;
673 last_in_list = new_element;
676 softlistp = softlistp->bl_next;
680 * We now have a list of all the pci bridges associated with
681 * the module_id, modules[i]. Call pci_bus_map_create() for
684 softlistp = first_in_list;
687 struct pcibr_list_s *next = softlistp->bl_next;
688 iobrick = iomoduleid_get(softlistp->bl_soft->bs_nasid);
689 pci_bus_map_create(softlistp, iobrick);
696 * Create the Linux PCI bus number vertex link.
698 (void)linux_bus_cvlink();
699 (void)ioconfig_bus_new_entries();
705 * Ugly hack to get PCI setup until we have a proper ACPI namespace.
708 #define PCI_BUSES_TO_SCAN 256
710 extern struct pci_ops sn_pci_ops;
715 struct pci_controller *controller;
716 struct list_head *ln;
717 struct pci_bus *pci_bus = NULL;
718 struct pci_dev *pci_dev = NULL;
721 #ifdef CONFIG_PROC_FS
722 extern void register_sn_procfs(void);
724 extern void sgi_master_io_infr_init(void);
725 extern void sn_init_cpei_timer(void);
728 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
732 * This is needed to avoid bounce limit checks in the blk layer
734 ia64_max_iommu_merge_mask = ~PAGE_MASK;
737 * set pci_raw_ops, etc.
740 sgi_master_io_infr_init();
742 for (cnode = 0; cnode < numnodes; cnode++) {
743 extern void intr_init_vecblk(cnodeid_t);
744 intr_init_vecblk(cnode);
747 sn_init_cpei_timer();
749 #ifdef CONFIG_PROC_FS
750 register_sn_procfs();
753 controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
755 memset(controller, 0, sizeof(struct pci_controller));
756 /* just allocate some devices and fill in the pci_dev structs */
757 for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
758 pci_scan_bus(i, &sn_pci_ops, controller);
762 * actually find devices and fill in hwgraph structs
768 * Initialize the pci bus vertex in the pci_bus struct.
770 for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
771 pci_bus = pci_bus_b(ln);
772 ret = sn_pci_fixup_bus(pci_bus);
775 "sn_pci_fixup: sn_pci_fixup_bus fails : error %d\n",
782 * set the root start and end so that drivers calling check_region()
783 * won't see a conflict
786 #ifdef CONFIG_IA64_SGI_SN_SIM
787 if (! IS_RUNNING_ON_SIMULATOR()) {
788 ioport_resource.start = 0xc000000000000000;
789 ioport_resource.end = 0xcfffffffffffffff;
794 * Set the root start and end for Mem Resource.
796 iomem_resource.start = 0;
797 iomem_resource.end = 0xffffffffffffffff;
800 * Initialize the device vertex in the pci_dev struct.
802 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
803 ret = sn_pci_fixup_slot(pci_dev);
806 "sn_pci_fixup: sn_pci_fixup_slot fails : error %d\n",
815 subsys_initcall(sn_pci_init);