diff -Nru a/arch/ia64/sn/io/Makefile b/arch/ia64/sn/io/Makefile
--- a/arch/ia64/sn/io/Makefile	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/Makefile	Thu Nov  6 13:42:35 2003
@@ -9,7 +9,5 @@
 # Makefile for the sn io routines.
 #
  
-obj-y += sgi_if.o xswitch.o sgi_io_sim.o cdl.o \
-	 io.o machvec/ drivers/ platform_init/ sn2/ hwgfs/
+obj-y += sgi_if.o xswitch.o cdl.o io.o snia_if.o \
+	 machvec/ drivers/ platform_init/ sn2/ hwgfs/ tio/
diff -Nru a/arch/ia64/sn/io/cdl.c b/arch/ia64/sn/io/cdl.c
--- a/arch/ia64/sn/io/cdl.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/cdl.c	Thu Nov  6 13:42:35 2003
@@ -11,17 +10,18 @@
 #include <linux/types.h>
 #include <asm/sn/sgi.h>
 #include <asm/io.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
-#include <asm/sn/pci/bridge.h>
+#include <asm/sn/pci/pic.h>
+#include <asm/sn/pci/tiocp.h>
+#include <asm/sn/tio/tioca.h>
 #include "asm/sn/ioerror_handling.h"
 #include <asm/sn/xtalk/xbow.h>
 
 /* these get called directly in cdl_add_connpt in fops bypass hack */
-extern int pcibr_attach(vertex_hdl_t);
 extern int xbow_attach(vertex_hdl_t);
 extern int pic_attach(vertex_hdl_t);
-
+extern int tiocp_attach(vertex_hdl_t);
+extern int tioca_attach(vertex_hdl_t);
 
 /*
  *    cdl: Connection and Driver List
@@ -32,14 +32,15 @@
  *	IO Infrastructure Drivers e.g. pcibr.
  */
 
-#define MAX_SGI_IO_INFRA_DRVR 7
+#define MAX_SGI_IO_INFRA_DRVR 8
 
 static struct cdl sgi_infrastructure_drivers[MAX_SGI_IO_INFRA_DRVR] =
 {
-	{ XBRIDGE_WIDGET_PART_NUM, XBRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops  */},
-	{ BRIDGE_WIDGET_PART_NUM,  BRIDGE_WIDGET_MFGR_NUM,  pcibr_attach /* &pcibr_fops */},
-	{ PIC_WIDGET_PART_NUM_BUS0,  PIC_WIDGET_MFGR_NUM,   pic_attach /* &pic_fops */},
-	{ PIC_WIDGET_PART_NUM_BUS1,  PIC_WIDGET_MFGR_NUM,   pic_attach /* &pic_fops */},
+	{ TIOCA_PART_NUM, TIOCA_MFGR_NUM, tioca_attach /* &tioca_fops */},
+	{ TIOCP_PART_NUM_CP0,  TIOCP_MFGR_NUM,   tiocp_attach /* &pcibr_fops */},
+	{ TIOCP_PART_NUM_CP1,  TIOCP_MFGR_NUM,   tiocp_attach /* &pcibr_fops */},
+	{ PIC_WIDGET_PART_NUM_BUS0,  PIC_WIDGET_MFGR_NUM,   pic_attach /* &pcibr_fops */},
+	{ PIC_WIDGET_PART_NUM_BUS1,  PIC_WIDGET_MFGR_NUM,   pic_attach /* &pcibr_fops */},
 	{ XXBOW_WIDGET_PART_NUM,   XXBOW_WIDGET_MFGR_NUM,   xbow_attach /* &xbow_fops */},
 	{ XBOW_WIDGET_PART_NUM,    XBOW_WIDGET_MFGR_NUM,    xbow_attach /* &xbow_fops */},
 	{ PXBOW_WIDGET_PART_NUM,   XXBOW_WIDGET_MFGR_NUM,   xbow_attach /* &xbow_fops */},
diff -Nru a/arch/ia64/sn/io/drivers/ioconfig_bus.c b/arch/ia64/sn/io/drivers/ioconfig_bus.c
--- a/arch/ia64/sn/io/drivers/ioconfig_bus.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/drivers/ioconfig_bus.c	Thu Nov  6 13:42:35 2003
@@ -17,12 +17,13 @@
 #include <linux/pci.h>
 
 #include <asm/sn/sgi.h>
+#include <asm/uaccess.h>
+
 #include <asm/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
-#include <asm/sn/sn_sal.h>
+#include <asm//sn/sn_sal.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/ioconfig_bus.h>
 
@@ -34,15 +35,15 @@
  */
 static vertex_hdl_t ioconfig_bus_handle;
 static unsigned long ioconfig_bus_debug;
+static struct ioconfig_parm parm;
 
+/* #define IOCONFIG_BUS_DEBUG 1 */
 #ifdef IOCONFIG_BUS_DEBUG
 #define DBG(x...)	printk(x)
 #else
 #define DBG(x...)
 #endif
 
-static u64 ioconfig_file;
-static u64 ioconfig_file_size;
 static u64 ioconfig_activated;
 static char ioconfig_kernopts[128];
 
@@ -160,8 +161,11 @@
 	struct ascii_moduleid *moduleid;
 
 	line = kmalloc(256, GFP_KERNEL);
-	memset(line, 0,256);
 	name = kmalloc(125, GFP_KERNEL);
+	if (!line || !name)
+		panic("build_moduleid_table(): Unabled to allocate memmory");
+
+	memset(line, 0,256);
 	memset(name, 0, 125);
 	moduleid = table;
 	curr = file_contents;
@@ -217,39 +221,12 @@
 ioconfig_bus_init(void)
 {
 
-	struct ia64_sal_retval ret_stuff;
-	u64	*temp;
-	int	cnode;
-
 	DBG("ioconfig_bus_init called.\n");
 
-        for (cnode = 0; cnode < numnodes; cnode++) {
-		nasid_t nasid;
-		/*
-	 	 * Make SAL call to get the address of the bus configuration table.
-	 	 */
-		ret_stuff.status = (uint64_t)0;
-		ret_stuff.v0 = (uint64_t)0;
-		ret_stuff.v1 = (uint64_t)0;
-		ret_stuff.v2 = (uint64_t)0;
-		nasid = COMPACT_TO_NASID_NODEID(cnode);
-		SAL_CALL(ret_stuff, SN_SAL_BUS_CONFIG, 0, nasid, 0, 0, 0, 0, 0);
-		temp = (u64 *)TO_NODE_CAC(nasid, ret_stuff.v0);
-		ioconfig_file = *temp;
-		DBG("ioconfig_bus_init: Nasid %d ret_stuff.v0 0x%lx\n", nasid,
-			ret_stuff.v0);
-		if (ioconfig_file) {
-			ioconfig_file_size = ret_stuff.v1;
-			ioconfig_file = (ioconfig_file | CACHEABLE_MEM_SPACE);
-			ioconfig_activated = 1;
-			break;
-		}
-	}
-
-	DBG("ioconfig_bus_init: ret_stuff.v0 %p ioconfig_file %p %d\n",
-		ret_stuff.v0, (void *)ioconfig_file, (int)ioconfig_file_size);
-
 	ioconfig_bus_table = kmalloc( 512, GFP_KERNEL );
+	if (!ioconfig_bus_table)
+		BUG(); /* Seriously, we should not be out of memory at init */
+
 	memset(ioconfig_bus_table, 0, 512);
 
 	/*
@@ -262,15 +239,6 @@
 		DBG("ioconfig_bus_init: Kernel Options given.\n");
 		(void) build_moduleid_table((char *)ioconfig_kernopts, ioconfig_bus_table);
 		(void) dump_ioconfig_table();
-		return;
-	}
-
-	if (ioconfig_activated) {
-		DBG("ioconfig_bus_init: ioconfig file given.\n");
-		(void) build_moduleid_table((char *)ioconfig_file, ioconfig_bus_table);
-		(void) dump_ioconfig_table();
-	} else {
-		DBG("ioconfig_bus_init: ioconfig command not executed in prom\n");
 	}
 
 }
@@ -278,9 +246,7 @@
 void
 ioconfig_bus_new_entries(void)
 {
-
-	
-	int index = 0;
+	int index;
 	struct ascii_moduleid *temp;
 
 	if ((ioconfig_activated) && (free_entry > new_entry)) {
@@ -302,17 +268,18 @@
 static int ioconfig_bus_ioctl(struct inode * inode, struct file * file,
         unsigned int cmd, unsigned long arg)
 {
-
-	struct ioconfig_parm parm;
-
 	/*
 	 * Copy in the parameters.
 	 */
-	copy_from_user(&parm, (char *)arg, sizeof(struct ioconfig_parm));
+	if (copy_from_user(&parm, (char *)arg, sizeof(struct ioconfig_parm)))
+		return -EFAULT;
 	parm.number = free_entry - new_entry;
 	parm.ioconfig_activated = ioconfig_activated;
-	copy_to_user((char *)arg, &parm, sizeof(struct ioconfig_parm));
-	copy_to_user((char *)parm.buffer, &ioconfig_bus_table[new_entry], sizeof(struct  ascii_moduleid) * (free_entry - new_entry));
+	if (copy_to_user((char *)arg, &parm, sizeof(struct ioconfig_parm)))
+		return -EFAULT;
+
+	if (copy_to_user((char *)parm.buffer, &ioconfig_bus_table[new_entry], sizeof(struct  ascii_moduleid) * (free_entry - new_entry)))
+		return -EFAULT;
 
 	return 0;
 }
@@ -344,9 +311,9 @@
 }
 
 struct file_operations ioconfig_bus_fops = {
-	.ioctl = ioconfig_bus_ioctl,
-	.open = ioconfig_bus_open,		/* open */
-	.release = ioconfig_bus_close	/* release */
+	.ioctl	= ioconfig_bus_ioctl,
+	.open	= ioconfig_bus_open,	/* open */
+	.release=ioconfig_bus_close	/* release */
 };
 
 
@@ -357,10 +324,8 @@
  */
 int init_ioconfig_bus(void)
 {
-	ioconfig_bus_handle = NULL;
 	ioconfig_bus_handle = hwgraph_register(hwgraph_root, ".ioconfig_bus",
-		        0, 0,
-			0, 0,
+			0, 0, 0, 0,
 			S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
 			&ioconfig_bus_fops, NULL);
 
@@ -368,13 +333,11 @@
 		panic("Unable to create SGI PERSISTENT BUS NUMBERING Driver.\n");
 	}
 
-	return(0);
-
+	return 0;
 }
 
 static int __init ioconfig_bus_setup (char *str)
 {
-
 	char *temp;
 
 	DBG("ioconfig_bus_setup: Kernel Options %s\n", str);
diff -Nru a/arch/ia64/sn/io/io.c b/arch/ia64/sn/io/io.c
--- a/arch/ia64/sn/io/io.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/io.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: io.c,v 1.2 2001/06/26 14:02:43 pfg Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -20,7 +19,6 @@
 #include <asm/sn/io.h>
 #include <asm/sn/sn_private.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/hcl_util.h>
 #include <asm/sn/intr.h>
@@ -29,7 +27,6 @@
 #include <asm/sn/sn_cpuid.h>
 
 extern xtalk_provider_t hub_provider;
-extern void hub_intr_init(vertex_hdl_t hubv);
 
 static int force_fire_and_forget = 1;
 static int ignore_conveyor_override;
@@ -86,7 +83,7 @@
 	}
 	hub_set_piomode(nasid, HUB_PIO_CONVEYOR);
 
-	mutex_spinlock_init(&hubinfo->h_bwlock);
+	spin_lock_init(&hubinfo->h_bwlock);
 	init_waitqueue_head(&hubinfo->h_bwwait);
 }
 
@@ -119,7 +116,6 @@
 	int bigwin, free_bw_index;
 	nasid_t nasid;
 	volatile hubreg_t junk;
-	unsigned long s;
 	caddr_t kvaddr;
 #ifdef PIOMAP_UNC_ACC_SPACE
 	uint64_t addr;
@@ -167,7 +163,7 @@
 	 */
 tryagain:
 	free_bw_index = -1;
-	s = mutex_spinlock(&hubinfo->h_bwlock);
+	spin_lock(&hubinfo->h_bwlock);
 	for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
 		bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);
 
@@ -187,7 +183,7 @@
 		if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
 		     widget == bw_piomap->hpio_xtalk_info.xp_target) {
 			bw_piomap->hpio_holdcnt++;
-			mutex_spinunlock(&hubinfo->h_bwlock, s);
+			spin_unlock(&hubinfo->h_bwlock);
 			return(bw_piomap);
 		}
 	}
@@ -267,7 +263,7 @@
 		bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;
 
 done:
-	mutex_spinunlock(&hubinfo->h_bwlock, s);
+	spin_unlock(&hubinfo->h_bwlock);
 	return(bw_piomap);
 }
 
@@ -287,7 +283,6 @@
 	vertex_hdl_t hubv;
 	hubinfo_t hubinfo;
 	nasid_t nasid;
-	unsigned long s;
 
 	/* 
 	 * Small windows are permanently mapped to corresponding widgets,
@@ -303,7 +298,7 @@
 	hubinfo_get(hubv, &hubinfo);
 	nasid = hubinfo->h_nasid;
 
-	s = mutex_spinlock(&hubinfo->h_bwlock);
+	spin_lock(&hubinfo->h_bwlock);
 
 	/*
 	 * If this is the last hold on this mapping, free it.
@@ -321,7 +316,7 @@
 		wake_up(&hubinfo->h_bwwait);
 	}
 
-	mutex_spinunlock(&hubinfo->h_bwlock, s);
+	spin_unlock(&hubinfo->h_bwlock);
 }
 
 /*
@@ -442,7 +437,7 @@
 hub_dmamap_free(hub_dmamap_t hub_dmamap)
 {
 	hub_dmamap->hdma_flags &= ~HUB_DMAMAP_IS_VALID;
-	kern_free(hub_dmamap);
+	kfree(hub_dmamap);
 }
 
 /*
@@ -463,12 +458,9 @@
 	if (dmamap->hdma_flags & HUB_DMAMAP_USED) {
 	    /* If the map is FIXED, re-use is OK. */
 	    if (!(dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
 		vhdl = dmamap->hdma_xtalk_info.xd_dev;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-		printk(KERN_WARNING  "%v: hub_dmamap_addr re-uses dmamap.\n",vhdl);
-#else
-		printk(KERN_WARNING  "%p: hub_dmamap_addr re-uses dmamap.\n", (void *)vhdl);
-#endif
+		printk(KERN_WARNING  "%s: hub_dmamap_addr re-uses dmamap.\n", vertex_to_name(vhdl, name, MAXDEVNAME));
 	    }
 	} else {
 		dmamap->hdma_flags |= HUB_DMAMAP_USED;
@@ -496,12 +488,9 @@
 	if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
 	    /* If the map is FIXED, re-use is OK. */
 	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
 		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-		printk(KERN_WARNING  "%v: hub_dmamap_list re-uses dmamap\n",vhdl);
-#else
-		printk(KERN_WARNING  "%p: hub_dmamap_list re-uses dmamap\n", (void *)vhdl);
-#endif
+		printk(KERN_WARNING  "%s: hub_dmamap_list re-uses dmamap\n", vertex_to_name(vhdl, name, MAXDEVNAME));
 	    }
 	} else {
 		hub_dmamap->hdma_flags |= HUB_DMAMAP_USED;
@@ -525,12 +514,9 @@
 	} else {
 	    /* If the map is FIXED, re-done is OK. */
 	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
 		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-		printk(KERN_WARNING  "%v: hub_dmamap_done already done with dmamap\n",vhdl);
-#else
-		printk(KERN_WARNING  "%p: hub_dmamap_done already done with dmamap\n", (void *)vhdl);
-#endif
+		printk(KERN_WARNING  "%s: hub_dmamap_done already done with dmamap\n", vertex_to_name(vhdl, name, MAXDEVNAME));
 	    }
 	}
 }
@@ -601,7 +587,6 @@
 hub_provider_startup(vertex_hdl_t hubv)
 {
 	hub_pio_init(hubv);
-	hub_intr_init(hubv);
 }
 
 /*
@@ -808,4 +793,3 @@
 	(xtalk_provider_startup_f *)	hub_provider_startup,
 	(xtalk_provider_shutdown_f *)	hub_provider_shutdown,
 };
-
diff -Nru a/arch/ia64/sn/io/machvec/pci.c b/arch/ia64/sn/io/machvec/pci.c
--- a/arch/ia64/sn/io/machvec/pci.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/machvec/pci.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /* 
- *
  * SNI64 specific PCI support for SNI IO.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -8,27 +7,8 @@
  *
  * Copyright (c) 1997, 1998, 2000-2003 Silicon Graphics, Inc.  All rights reserved.
  */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <asm/sn/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/bridge.h>
 
 /*
  * These routines are only used during sn_pci_init for probing each bus, and
diff -Nru a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c
--- a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c	Thu Nov  6 13:42:35 2003
@@ -6,37 +6,10 @@
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <asm/sn/types.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
 #include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
 #include <asm/sn/sn_cpuid.h>
-#include <asm/sn/arch.h>
 
 extern int bridge_rev_b_data_check_disable;
 
@@ -47,14 +20,18 @@
 static int done_probing;
 extern irqpda_t *irqpdaindr;
 
-static int pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid);
+static int pci_bus_map_create(vertex_hdl_t xtalk, int brick_type, char * io_moduleid);
 vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
 
 extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
 
 void sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot);
+extern int cbrick_type_get_nasid(nasid_t);
 
 
+#define IS_OPUS(nasid) (cbrick_type_get_nasid(nasid) == MODULE_OPUSBRICK)
+#define IS_ALTIX(nasid) (cbrick_type_get_nasid(nasid) == MODULE_CBRICK)
+
 /*
  * For the given device, initialize whether it is a PIC device.
  */
@@ -148,8 +125,8 @@
 	 */
 	if (func == 0) {
         	sprintf(name, "%d", slot);
-		if (hwgraph_traverse(pci_bus, name, &device_vertex) == 
-			GRAPH_SUCCESS) {
+		if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
+				GRAPH_SUCCESS) {
 			if (device_vertex) {
 				return(device_vertex);
 			}
@@ -170,54 +147,6 @@
 	return(device_vertex);
 }
 
-/*
- * For the given device, initialize the addresses for both the Device(x) Flush 
- * Write Buffer register and the Xbow Flush Register for the port the PCI bus 
- * is connected.
- */
-static void
-set_flush_addresses(struct pci_dev *device_dev, 
-	struct sn_device_sysdata *device_sysdata)
-{
-	pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
-	pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
-	pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    	bridge_t               *bridge = pcibr_soft->bs_base;
-	nasid_t			nasid;
-
-	/*
-	 * Get the nasid from the bridge.
-	 */
-	nasid = NASID_GET(device_sysdata->dma_buf_sync);
-	if (IS_PIC_DEVICE(device_dev)) {
-		device_sysdata->dma_buf_sync = (volatile unsigned int *)
-			&bridge->b_wr_req_buf[pciio_slot].reg;
-		device_sysdata->xbow_buf_sync = (volatile unsigned int *)
-			XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(nasid, 0),
-			pcibr_soft->bs_xid);
-	} else {
-		/*
-		 * Accessing Xbridge and Xbow register when SHUB swapoper is on!.
-		 */
-		device_sysdata->dma_buf_sync = (volatile unsigned int *)
-			((uint64_t)&(bridge->b_wr_req_buf[pciio_slot].reg)^4);
-		device_sysdata->xbow_buf_sync = (volatile unsigned int *)
-			((uint64_t)(XBOW_PRIO_LINKREGS_PTR(
-			NODE_SWIN_BASE(nasid, 0), pcibr_soft->bs_xid)) ^ 4);
-	}
-
-#ifdef DEBUG
-	printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n", 
-		device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
-
-printk("set_flush_addresses: dma_buf_sync\n");
-	while((volatile unsigned int )*device_sysdata->dma_buf_sync);
-printk("set_flush_addresses: xbow_buf_sync\n");
-	while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
-#endif
-
-}
-
 struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
 
 // Initialize the data structures for flushing write buffers after a PIO read.
@@ -233,8 +162,7 @@
 	int wid_num;
 	int bus;
 	struct sn_flush_device_list *p;
-	bridge_t *b;
-	bridgereg_t dev_sel;
+	pci_bridge_t *b;
 	extern int isIO9(int);
 	int bwin;
 	int i;
@@ -247,6 +175,9 @@
 	if (flush_nasid_list[nasid].widget_p == NULL) {
 		flush_nasid_list[nasid].widget_p = (struct sn_flush_device_list **)kmalloc((HUB_WIDGET_ID_MAX+1) *
 			sizeof(struct sn_flush_device_list *), GFP_KERNEL);
+		if (flush_nasid_list[nasid].widget_p <= 0)
+			BUG(); /* Cannot afford to run out of memory. */
+
 		memset(flush_nasid_list[nasid].widget_p, 0, (HUB_WIDGET_ID_MAX+1) * sizeof(struct sn_flush_device_list *));
 	}
 	if (bwin > 0) {
@@ -326,6 +257,9 @@
 	if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) {
 		flush_nasid_list[nasid].widget_p[wid_num] = (struct sn_flush_device_list *)kmalloc(
 			DEV_PER_WIDGET * sizeof (struct sn_flush_device_list), GFP_KERNEL);
+		if (flush_nasid_list[nasid].widget_p[wid_num] <= 0)
+			BUG(); /* Cannot afford to run out of memory. */
+
 		memset(flush_nasid_list[nasid].widget_p[wid_num], 0, 
 			DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
 		p = &flush_nasid_list[nasid].widget_p[wid_num][0];
@@ -354,7 +288,7 @@
 			break;
 		}
 	}
-	b = (bridge_t *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
+	b = (pci_bridge_t *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
 
 	// If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
 	// To see this is non-trivial.  By drawing pictures and reading manuals and talking
@@ -374,40 +308,32 @@
 	// in two different slots will ever share an interrupt line, so there is no need to
 	// special case this.
 
-	if (isIO9(nasid) && wid_num == 0xc && bus == 0) {
+	if (isIO9(nasid) && ( (IS_ALTIX(nasid) && wid_num == 0xc) || (IS_OPUS(nasid) && wid_num == 0xf) ) && bus == 0) {
 		if (slot == 2) {
-			p->force_int_addr = (unsigned long)&b->b_force_always[6].intr;
-			dev_sel = b->b_int_device;
-			dev_sel |= (1<<18);
-			b->b_int_device = dev_sel;
+			p->force_int_addr = (unsigned long)pcireg_force_always_addr_get(b, 6);
+			pcireg_intr_device_bit_set(b, (1<<18));
 			dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
-			b->p_int_addr_64[6] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) | 
-				(dnasid << 36) | (0xfUL << 48);
+			pcireg_intr_addr_set(b, 6, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+						    (dnasid << 36) | (0xfUL << 48)));
 		} else  if (slot == 3) { /* 12160 SCSI device in IO9 */
-			p->force_int_addr = (unsigned long)&b->b_force_always[4].intr;
-			dev_sel = b->b_int_device;
-			dev_sel |= (2<<12);
-			b->b_int_device = dev_sel;
+			p->force_int_addr = (unsigned long)pcireg_force_always_addr_get(b, 4);
+			pcireg_intr_device_bit_set(b, (2<<12));
 			dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
-			b->p_int_addr_64[4] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) | 
-				(dnasid << 36) | (0xfUL << 48);
+			pcireg_intr_addr_set(b, 4, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+						    (dnasid << 36) | (0xfUL << 48)));
 		} else { /* slot == 6 */
-			p->force_int_addr = (unsigned long)&b->b_force_always[7].intr;
-			dev_sel = b->b_int_device;
-			dev_sel |= (5<<21);
-			b->b_int_device = dev_sel;
+			p->force_int_addr = (unsigned long)pcireg_force_always_addr_get(b, 7);
+			pcireg_intr_device_bit_set(b, (5<<21));
 			dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
-			b->p_int_addr_64[7] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) | 
-				(dnasid << 36) | (0xfUL << 48);
+			pcireg_intr_addr_set(b, 7, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+						    (dnasid << 36) | (0xfUL << 48)));
 		}
 	} else {
-		p->force_int_addr = (unsigned long)&b->b_force_always[pin + 2].intr;
-		dev_sel = b->b_int_device;
-		dev_sel |= ((slot - 1) << ( pin * 3) );
-		b->b_int_device = dev_sel;
+		p->force_int_addr = (unsigned long)pcireg_force_always_addr_get(b, (pin +2));
+		pcireg_intr_device_bit_set(b, ((slot - 1) << ( pin * 3)));
 		dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
-		b->p_int_addr_64[pin + 2] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) | 
-			(dnasid << 36) | (0xfUL << 48);
+		pcireg_intr_addr_set(b, (pin + 2), ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+						    (dnasid << 36) | (0xfUL << 48)));
 	}
 }
 
@@ -430,6 +356,7 @@
 	int cpuid;
 	vertex_hdl_t device_vertex;
 	pciio_intr_line_t lines;
+	extern void sn_pci_find_bios(void);
 	extern int numnodes;
 	int cnode;
 
@@ -437,12 +364,9 @@
 #ifdef CONFIG_PROC_FS
 		extern void register_sn_procfs(void);
 #endif
-		extern void irix_io_init(void);
 		extern void sn_init_cpei_timer(void);
-		
-		init_hcl();
-		irix_io_init();
-		
+
+		sn_pci_find_bios();
 		for (cnode = 0; cnode < numnodes; cnode++) {
 			extern void intr_init_vecblk(cnodeid_t);
 			intr_init_vecblk(cnode);
@@ -490,25 +414,22 @@
 		unsigned int irq;
 		int idx;
 		u16 cmd;
+		vertex_hdl_t vhdl;
 		unsigned long size;
 		extern int bit_pos_to_irq(int);
 
 		/* Set the device vertex */
 
 		device_sysdata = kmalloc(sizeof(struct sn_device_sysdata),
-					 GFP_KERNEL);
+					GFP_KERNEL);
+		if (device_sysdata <= 0)
+			BUG(); /* Cannot afford to run out of memory */
+
 		device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
 		device_sysdata->isa64 = 0;
-		device_vertex = device_sysdata->vhdl;
-
 		device_dev->sysdata = (void *) device_sysdata;
 		set_isPIC(device_sysdata);
 
-		/*
-		 * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush 
-		 * register addresses.
-		 */
-		set_flush_addresses(device_dev, device_sysdata);
 		pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
 
 		/*
@@ -517,12 +438,13 @@
 		 * read from the card and it was set in the card by our
 		 * Infrastructure ..
 		 */
+		vhdl = device_sysdata->vhdl;
 		for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
 			size = 0;
 			size = device_dev->resource[idx].end -
 				device_dev->resource[idx].start;
 			if (size) {
-				device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(device_vertex, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
+				device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, 0);
 				device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
 			}
 			else
@@ -544,10 +466,11 @@
 		cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
 					   /* bit gets dropped .. no harm */
 		pci_write_config_word(device_dev, PCI_COMMAND, cmd);
-		
-		pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN,
-				     (unsigned char *)&lines);
-	 
+
+		pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
+		device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
+		device_vertex = device_sysdata->vhdl;
+ 
 		irqpdaindr->curr = device_dev;
 		intr_handle = pcibr_intr_alloc(device_vertex, NULL, lines, device_vertex);
 
@@ -557,15 +480,14 @@
 		pcibr_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
 		device_dev->irq = irq;
 		register_pcibr_intr(irq, intr_handle);
-
+ 
 		for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
 			int ibits = intr_handle->bi_ibits;
 			int i;
 
 			size = device_dev->resource[idx].end -
 				device_dev->resource[idx].start;
-			if (size == 0)
-				continue;
+			if (size == 0) continue;
 
 			for (i=0; i<8; i++) {
 				if (ibits & (1 << i) ) {
@@ -608,6 +530,40 @@
 }
 
 /*
+ * ppb_busnum_to_vhdl() - Called by pci_bus_map_create() to look for ppb buses
+ */
+void
+ppb_busnum_to_vhdl(vertex_hdl_t pci_bus)
+{
+	int ppb_bus_num;
+	char pathname[MAXDEVNAME];
+	vertex_hdl_t ppb_bus;
+	int slot, rv;
+	int reserved_buses = 9;
+
+	for (slot=0; slot < 8; slot++) {
+		/* pci-to-pci bridges start at 144 in busnum_to_vhdl */
+		for (ppb_bus_num=reserved_buses*16;
+				ppb_bus_num < MAX_PCI_XWIDGET; ppb_bus_num++) {
+			sprintf(pathname, "%d/ppb_%d", slot, ppb_bus_num);
+			rv = hwgraph_traverse(pci_bus, pathname, &ppb_bus);
+			if ( (rv != GRAPH_SUCCESS) || (!ppb_bus) ) {
+				continue;
+			}
+#ifdef DEBUG
+			printk("ppb_busnum_to_vhdl: found ppb_bus(0x%p) at %d\n",
+				ppb_bus, ppb_bus_num);
+#endif
+			if (busnum_to_pcibr_vhdl[ppb_bus_num])
+				printk("WARNING: ppb_busnum_to_vhdl: slot %d is already occupied by 0x%p\n", ppb_bus_num, busnum_to_pcibr_vhdl[ppb_bus_num]);
+			else
+				busnum_to_pcibr_vhdl[ppb_bus_num] = ppb_bus;
+		}
+		
+	}
+}
+
+/*
  * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
  *
  *	Linux PCI Bus numbers are assigned from lowest module_id numbers
@@ -630,96 +586,105 @@
  *	
  */
 static int 
-pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
+pci_bus_map_create(vertex_hdl_t xtalk, int brick_type, char * io_moduleid)
 {
 
-	vertex_hdl_t master_node_vertex = NULL;
 	vertex_hdl_t xwidget = NULL;
 	vertex_hdl_t pci_bus = NULL;
-	hubinfo_t hubinfo = NULL;
 	xwidgetnum_t widgetnum;
 	char pathname[128];
 	graph_error_t rv;
 	int bus;
 	int basebus_num;
+#ifdef DEBUG
+	char hwpath[MAXDEVNAME] = "\0";
+#endif
 	extern void ioconfig_get_busnum(char *, int *);
+	extern int  pcibr_widget_to_bus(vertex_hdl_t);
 
 	int bus_number;
 
 	/*
-	 * Loop throught this vertex and get the Xwidgets ..
+	 * PCIX devices
+	 * We number busses differently for PCI-X devices.
+	 * We start from Lowest Widget on up ..
 	 */
 
+        (void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
 
-	/* PCI devices */
+	/* XXX habeck: Coretalk is corelet 0-3, XTALK is widget 8-F.
+ 	 * This works because we dont find the hwgraph for non-existent 
+	 * corelets/widgets, but is messy!  Fix this
+	 */
+	for (widgetnum = 0; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
 
-	for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
-		sprintf(pathname, "%d", widgetnum);
-		xwidget = NULL;
-		
-		/*
-		 * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
-		 *	     /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
+		/* Do both buses */
+		/* XXX habeck: PIC has two busses under each widget, TIO
+		 * has 1.  This works because we don't find the hwgraph
+		 * for non-existent busses, but is messy!  Fix this
 		 */
-		rv = hwgraph_traverse(xtalk, pathname, &xwidget);
-		if ( (rv != GRAPH_SUCCESS) ) {
-			if (!xwidget) {
-				continue;
+		for ( bus = 0; bus < 2; bus++ ) {
+			sprintf(pathname, "%d", widgetnum);
+			xwidget = NULL;
+			/*
+			 * Example - /hw/module/001c16/slab/0/IXbrick/xtalk/15 is the xwidget
+			 *	     /hw/module/001c16/slab/0/IXbrick/xtalk/15/pci-x/0 is the bus
+			 *	     /hw/module/001c16/slab/0/IXbrick/xtalk/15/pci-x/0/1 is device
+			 */
+			rv = hwgraph_traverse(xtalk, pathname, &xwidget);
+			if ( (rv != GRAPH_SUCCESS) ) {
+				if (!xwidget) {
+					continue;
+				}
 			}
-		}
-
-		sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
-		pci_bus = NULL;
-		if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
-			if (!pci_bus) {
-				continue;
-}
-
-		/*
-		 * Assign the correct bus number and also the nasid of this 
-		 * pci Xwidget.
-		 * 
-		 * Should not be any race here ...
-		 */
-		num_bridges++;
-		busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
+	
+			if ( bus == 0 )
+				sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
+			else
+				sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
+			pci_bus = NULL;
+			if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
+				if (!pci_bus) {
+					continue;
+				}
+	
+			/*
+			 * Assign the correct bus number and also the nasid of this 
+			 * pci Xwidget.
+			 * 
+			 * Should not be any race here ...
+			 */
+			bus_number = basebus_num + pcibr_widget_to_bus(pci_bus);
+#ifdef DEBUG
+			hwgraph_vertex_name_get(pci_bus, hwpath, MAXDEVNAME);
+			printk("%s:\n\tbus_num %d, basebus_num %d, bridge_bus %d, "
+			       "brick_bus %d, bus_vhdl 0x%lx, brick_type %d\n", 
+				hwpath, bus_number, basebus_num, bus, 
+				pcibr_widget_to_bus(pci_bus), 
+				(uint64_t)pci_bus, brick_type);
+#endif
+			busnum_to_pcibr_vhdl[bus_number] = pci_bus;
 
-		/*
-		 * Get the master node and from there get the NASID.
-		 */
-		master_node_vertex = device_master_get(xwidget);
-		if (!master_node_vertex) {
-			printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
-		}
+			/*
+			 * Put any pci-to-pci bridge buses into
+			 * busnum_to_pcibr_vhdl[].
+			 */
+			ppb_busnum_to_vhdl(pci_bus);
 	
-		hubinfo_get(master_node_vertex, &hubinfo);
-		if (!hubinfo) {
-			printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
-			return(1);
-		} else {
-			busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
+			/*
+			 * Pre assign DMA maps needed for 32 Bits Page Map DMA.
+			 */
+			busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
+				sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
+			if (busnum_to_atedmamaps[bus_number] <= 0)
+				BUG(); /* Cannot afford to run out of memory. */
+	
+			memset(busnum_to_atedmamaps[bus_number], 0x0, 
+				sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
 		}
-
-		/*
-		 * Pre assign DMA maps needed for 32 Bits Page Map DMA.
-		 */
-		busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
-			sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
-		if (!busnum_to_atedmamaps[num_bridges - 1])
-			printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
-
-		memset(busnum_to_atedmamaps[num_bridges - 1], 0x0, 
-			sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
-
 	}
 
-	/*
-	 * PCIX devices
-	 * We number busses differently for PCI-X devices.
-	 * We start from Lowest Widget on up ..
-	 */
-
-        (void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
+	/* AGP/CGbrick */
 
 	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
 
@@ -729,9 +694,9 @@
 			xwidget = NULL;
 			
 			/*
-			 * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
-			 *	     /hw/module/001c16/Pbrick/xtalk/8/pci-x/0 is the bus
-			 *	     /hw/module/001c16/Pbrick/xtalk/8/pci-x/0/1 is device
+			 * Example - /hw/module/001c16/slab/0/CGbrick/xtalk/15 is the xwidget
+			 *	     /hw/module/001c16/slab/0/CGbrick/xtalk/15/agp/0 is the bus
+			 *	     /hw/module/001c16/slab/0/CGbrick/xtalk/15/agp/0/1a is device
 			 */
 			rv = hwgraph_traverse(xtalk, pathname, &xwidget);
 			if ( (rv != GRAPH_SUCCESS) ) {
@@ -741,9 +706,9 @@
 			}
 	
 			if ( bus == 0 )
-				sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
+				sprintf(pathname, "%d/"EDGE_LBL_AGP_0, widgetnum);
 			else
-				sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
+				sprintf(pathname, "%d/"EDGE_LBL_AGP_1, widgetnum);
 			pci_bus = NULL;
 			if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
 				if (!pci_bus) {
@@ -756,27 +721,29 @@
 			 * 
 			 * Should not be any race here ...
 			 */
-			bus_number = basebus_num + bus + io_brick_map_widget(MODULE_PXBRICK, widgetnum);
+			bus_number = basebus_num + bus + io_brick_map_widget(brick_type, widgetnum);
 #ifdef DEBUG
-			printk("bus_number %d basebus_num %d bus %d io %d\n", 
-				bus_number, basebus_num, bus, 
-				io_brick_map_widget(MODULE_PXBRICK, widgetnum));
+			hwgraph_vertex_name_get(pci_bus, hwpath, MAXDEVNAME);
+			printk("%s:\n\tbus_num %d, basebus_num %d, bridge_bus %d, "
+			       "brick_bus %d, bus_vhdl 0x%lx, brick_type %d\n",	    
+				hwpath, bus_number, basebus_num, bus,
+				pcibr_widget_to_bus(pci_bus), 
+				(uint64_t)pci_bus, brick_type);
 #endif
 			busnum_to_pcibr_vhdl[bus_number] = pci_bus;
-	
+
 			/*
 			 * Pre assign DMA maps needed for 32 Bits Page Map DMA.
 			 */
 			busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
 				sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
-			if (!busnum_to_atedmamaps[bus_number])
-				printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
+			if (busnum_to_atedmamaps[bus_number] <= 0)
+				BUG(); /* Cannot afford to run out of memory */
 	
 			memset(busnum_to_atedmamaps[bus_number], 0x0, 
 				sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
 		}
 	}
-
         return(0);
 }
 
@@ -795,11 +762,21 @@
 	vertex_hdl_t devfs_hdl = NULL;
 	vertex_hdl_t xtalk = NULL;
 	int rv = 0;
-	char name[256];
-	char tmp_name[256];
+	char *name;
+	char *tmp_name;
 	int i, ii, j;
 	char *brick_name;
+	char *xtalk_name;
 	extern void ioconfig_bus_new_entries(void);
+	extern int iobrick_type_get_nasid(nasid_t);
+
+	name = kmalloc(256, GFP_KERNEL);
+	if (!name)
+		BUG();
+
+	tmp_name = kmalloc(256, GFP_KERNEL);
+	if (!name)
+		 BUG();
 
 	/*
 	 * Figure out which IO Brick is connected to the Compute Bricks.
@@ -808,45 +785,72 @@
 		extern int iomoduleid_get(nasid_t);
 		moduleid_t iobrick_id;
 		nasid_t nasid = -1;
-		int nodecnt;
 		int n = 0;
 
-		nodecnt = modules[i]->nodecnt;
-		for ( n = 0; n < nodecnt; n++ ) {
+		for ( n = 0; n <= MAX_SLABS; n++ ) {
+			if (modules[i]->nodes[n] == -1)
+				continue; /* node is not alive in module */
+
 			nasid = cnodeid_to_nasid(modules[i]->nodes[n]);
 			iobrick_id = iomoduleid_get(nasid);
 			if ((int)iobrick_id > 0) { /* Valid module id */
 				char name[12];
 				memset(name, 0, 12);
 				format_module_id((char *)&(modules[i]->io[n].moduleid), iobrick_id, MODULE_FORMAT_BRIEF);
+				modules[i]->io[n].iobrick_type = (uint64_t)iobrick_type_get_nasid(nasid);
 			}
 		}
 	}
 				
 	devfs_hdl = hwgraph_path_to_vertex("hw/module");
 	for (i = 0; i < nummodules ; i++) {
-	    for ( j = 0; j < 3; j++ ) {
-		if ( j == 0 )
-			brick_name = EDGE_LBL_PBRICK;
-		else if ( j == 1 )
-			brick_name = EDGE_LBL_PXBRICK;
-		else
+	    for ( j = 0; j < 6; j++ ) {
+		/* XXX habeck.  This is really messy... If nothing else
+		 * it needs at least a comment explaining what it is
+		 * doing (ie. giving IX/IA bricks higher priority than
+		 * a PX/PA brick)
+		 */
+		if ( j == 0 ) {
 			brick_name = EDGE_LBL_IXBRICK;
+			xtalk_name = EDGE_LBL_XTALK;
+		} else if ( j == 1 ) {
+			brick_name = EDGE_LBL_IABRICK;
+			xtalk_name = EDGE_LBL_CORETALK;
+		} else if ( j == 2 ) {
+			brick_name = EDGE_LBL_PXBRICK;
+			xtalk_name = EDGE_LBL_XTALK;
+		} else if ( j == 3 ) {
+			brick_name = EDGE_LBL_PABRICK;
+			xtalk_name = EDGE_LBL_CORETALK;
+		} else if ( j == 4 ) {
+			brick_name = EDGE_LBL_OPUSBRICK;
+			xtalk_name = EDGE_LBL_XTALK;
+		} else {	/* 5 */
+			brick_name = EDGE_LBL_CGBRICK;
+			xtalk_name = EDGE_LBL_XTALK;
+		}
+
+		for ( ii = 0; ii <= MAX_SLABS ; ii++ ) {
+			if (modules[i]->nodes[ii] == -1)
+				continue; /* Missing slab */
 
-		for ( ii = 0; ii < 2 ; ii++ ) {
 			memset(name, 0, 256);
 			memset(tmp_name, 0, 256);
 			format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
-			sprintf(tmp_name, "/slab/%d/%s/xtalk", geo_slab(modules[i]->geoid[ii]), brick_name);
+			sprintf(tmp_name, "/" EDGE_LBL_SLAB "/%d/%s/%s", 
+			    geo_slab(modules[i]->geoid[ii]), brick_name, xtalk_name);
 			strcat(name, tmp_name);
 			xtalk = NULL;
 			rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
 			if ( rv == 0 ) 
-				pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
+				pci_bus_map_create(xtalk, (int)modules[i]->io[ii].iobrick_type, (char *)&(modules[i]->io[ii].moduleid));
 		}
 	    }
 	}
 
+	kfree(name);
+	kfree(tmp_name);
+
 	/*
 	 * Create the Linux PCI bus number vertex link.
 	 */
@@ -887,7 +891,6 @@
 	 * actually find devices and fill in hwgraph structs
 	 */
 	sn_pci_fixup(1);
-
 	return 0;
 }
 
diff -Nru a/arch/ia64/sn/io/machvec/pci_dma.c b/arch/ia64/sn/io/machvec/pci_dma.c
--- a/arch/ia64/sn/io/machvec/pci_dma.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/machvec/pci_dma.c	Thu Nov  6 13:42:35 2003
@@ -9,27 +9,8 @@
  * a description of how these routines should be used.
  */
 
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
 #include <linux/module.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/types.h>
-#include <asm/sn/alenlist.h>
 #include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/nag.h>
 
 /*
  * For ATE allocations
@@ -177,7 +158,6 @@
 	 * attributes or to a different memory region.
 	 */
 	*dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
-			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
 					  PCIIO_DMA_CMD);
 
         /*
@@ -196,7 +176,6 @@
 	 */
 	if (!(*dma_handle)) {
 		dma_map = pcibr_dmamap_alloc(vhdl, NULL, size,
-				((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
 					     PCIIO_DMA_CMD);
 		if (!dma_map) {
 			printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "
@@ -287,10 +266,7 @@
 		 */
 		if (IS_PCIA64(hwdev)) {
 			sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
-						       sg->length,
-			       ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
-						       PCIIO_DMA_DATA |
-						       PCIIO_DMA_A64);
+						       sg->length, PCIIO_DMA_DATA | PCIIO_DMA_A64);
 			sg->dma_length = sg->length;
 			continue;
 		}
@@ -300,9 +276,7 @@
 		 */
 		if (IS_PCI32G(hwdev)) {
 			sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
-						       sg->length,
-					((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
-						       PCIIO_DMA_DATA);
+						       sg->length, PCIIO_DMA_DATA);
 			sg->dma_length = sg->length;
 			/*
 			 * See if we got a direct map entry
@@ -317,9 +291,7 @@
 		 * It is a 32 bit card and we cannot do direct mapping,
 		 * so we use an ATE.
 		 */
-		dma_map = pcibr_dmamap_alloc(vhdl, NULL, sg->length,
-				((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
-					     PCIIO_DMA_DATA);
+		dma_map = pcibr_dmamap_alloc(vhdl, NULL, sg->length, PCIIO_DMA_DATA);
 		if (!dma_map) {
 			printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
 			       "anymore 32 bit page map entries.\n");
@@ -390,7 +362,7 @@
  * the IA64 machvec code.
  *
  * We map this to the one step pcibr_dmamap_trans interface rather than
- * the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have
+ * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
  * no way of saving the dmamap handle from the alloc to later free
  * (which is pretty much unacceptable).
  *
@@ -429,9 +401,7 @@
 	if (IS_PCIA64(hwdev)) {
 		/* This device supports 64 bit DMA addresses. */
 		dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
-		       ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
-					       PCIIO_DMA_DATA |
-					       PCIIO_DMA_A64);
+					       PCIIO_DMA_DATA | PCIIO_DMA_A64);
 		return dma_addr;
 	}
 
@@ -443,7 +413,6 @@
 	 */
 	if (IS_PCI32G(hwdev)) {
 		dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
-			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
 					       PCIIO_DMA_DATA);
 		if (dma_addr)
 			return dma_addr;
@@ -454,9 +423,7 @@
 	 * let's use the PMU instead.
 	 */
 	dma_map = NULL;
-	dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, 
-			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
-			PCIIO_DMA_DATA);
+	dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA);
 
 	if (!dma_map) {
 		printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
diff -Nru a/arch/ia64/sn/io/platform_init/irix_io_init.c b/arch/ia64/sn/io/platform_init/irix_io_init.c
--- a/arch/ia64/sn/io/platform_init/irix_io_init.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/platform_init/irix_io_init.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -22,17 +21,55 @@
 extern void klhwg_add_all_modules(vertex_hdl_t);
 extern void klhwg_add_all_nodes(vertex_hdl_t);
 
+extern int init_hcl(void);
 extern vertex_hdl_t hwgraph_root;
 extern void io_module_init(void);
 extern int pci_bus_to_hcl_cvlink(void);
-extern void mlreset(void);
 
-/* #define DEBUG_IO_INIT 1 */
-#ifdef DEBUG_IO_INIT
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_IO_INIT */
+char arg_maxnodes[4];
+char master_baseio_wid;
+nasid_t master_baseio_nasid;
+nasid_t master_nasid = INVALID_NASID;           /* This is the partition master nasid */
+nasid_t console_nasid = (nasid_t)-1;
+
+/*
+ * Return non-zero if the given variable was specified
+ */
+int
+is_specified(char *s)
+{
+	return (strlen(s) != 0);
+}
+
+int
+check_nasid_equiv(nasid_t nasida, nasid_t nasidb)
+{
+	if ((nasida == nasidb) || (nasida == NODEPDA(NASID_TO_COMPACT_NODEID(nasidb))->xbow_peer))
+		return 1;
+	else
+		return 0;
+}
+
+int
+is_master_baseio_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
+{
+
+	/*
+	 * If the widget numbers are different, we're not the master.
+	 */
+	if (test_wid != (xwidgetnum_t)master_baseio_wid) {
+		return 0;
+	}
+
+	/*
+	 * If the NASIDs are the same or equivalent, we're the master.
+	 */
+	if (check_nasid_equiv(test_nasid, master_baseio_nasid)) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
 
 /*
  * This routine is responsible for the setup of all the IRIX hwgraph style
@@ -44,15 +81,13 @@
  *
  */
 
-void
-irix_io_init(void)
+void __init
+sgi_master_io_infr_init(void)
 {
 	cnodeid_t cnode;
+	extern void pciio_ppb_init(void);
 
-	/*
-	 * This is the Master CPU.  Emulate mlsetup and main.c in Irix.
-	 */
-	mlreset();
+	init_hcl(); /* Sets up the hwgraph compatibility layer */
 
         /*
          * Initialize platform-dependent vertices in the hwgraph:
@@ -76,6 +111,17 @@
 	}
 
 	/* We can do headless hub cnodes here .. */
+
+	/* Initialize ICE for TIO Nodes. */
+	for (cnode = numnodes; cnode < numionodes; cnode++) {
+		extern void per_ice_init(cnodeid_t);
+		per_ice_init(cnode);
+	}
+
+	/*
+	 * Initialize for PCI-to-PCI bridges.   FIXME: This should be elsewhere.
+	 */
+	pciio_ppb_init();
 
 	/*
 	 *
diff -Nru a/arch/ia64/sn/io/platform_init/sgi_io_init.c b/arch/ia64/sn/io/platform_init/sgi_io_init.c
--- a/arch/ia64/sn/io/platform_init/sgi_io_init.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/platform_init/sgi_io_init.c	Thu Nov  6 13:42:35 2003
@@ -78,3 +78,18 @@
 	/* Initialize error interrupts for this hub. */
 	hub_error_init(cnode);
 }
+
+/*
+ * per_ice_init
+ *
+ *      This code is executed once for each Ice chip.
+ */
+void
+per_ice_init(cnodeid_t cnode)
+{
+
+        /* Initialize error interrupts for this ice. */
+	printk("per_ice_init: We need to init ice here ....!\n");
+        /* ice_error_init(cnode); */
+
+}
diff -Nru a/arch/ia64/sn/io/sgi_if.c b/arch/ia64/sn/io/sgi_if.c
--- a/arch/ia64/sn/io/sgi_if.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sgi_if.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -12,45 +11,18 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/ioerror_handling.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/slotnum.h>
 
 void *
-snia_kmem_zalloc(size_t size, int flag)
+snia_kmem_zalloc(size_t size)
 {
         void *ptr = kmalloc(size, GFP_KERNEL);
 	if ( ptr )
-        	BZERO(ptr, size);
-        return(ptr);
-}
-
-void
-snia_kmem_free(void *ptr, size_t size)
-{
-        kfree(ptr);
-}
-
-/*
- * the alloc/free_node routines do a simple kmalloc for now ..
- */
-void *
-snia_kmem_alloc_node(register size_t size, register int flags, cnodeid_t node)
-{
-	/* someday will Allocate on node 'node' */
-	return(kmalloc(size, GFP_KERNEL));
-}
-
-void *
-snia_kmem_zalloc_node(register size_t size, register int flags, cnodeid_t node)
-{
-	void *ptr = kmalloc(size, GFP_KERNEL);
-	if ( ptr )
-		BZERO(ptr, size);
+        	memset(ptr, 0, size);
         return(ptr);
 }
 
diff -Nru a/arch/ia64/sn/io/sn2/Makefile b/arch/ia64/sn/io/sn2/Makefile
--- a/arch/ia64/sn/io/sn2/Makefile	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/Makefile	Thu Nov  6 13:42:35 2003
@@ -9,8 +9,7 @@
 # Makefile for the sn2 specific io routines.
 #
 
-EXTRA_CFLAGS := -DLITTLE_ENDIAN
-
-obj-y += pcibr/ ml_SN_intr.o shub_intr.o shuberror.o shub.o bte_error.o \
-	 pic.o geo_op.o l1_command.o klconflib.o klgraph.o ml_SN_init.o \
-	 ml_iograph.o module.o pciio.o xbow.o xtalk.o shubio.o
+obj-y += pcibr/ ioc4/ ml_SN_intr.o shub_intr.o shuberror.o shub.o \
+	 bte_error.o pic.o geo_op.o l1_command.o klconflib.o klgraph.o \
+	 ml_SN_init.o ml_iograph.o module.o pciio.o tio.o tio_intr.o \
+	 xbow.o xtalk.o shubio.o tiocp.o pciio_ppb.o
diff -Nru a/arch/ia64/sn/io/sn2/bte_error.c b/arch/ia64/sn/io/sn2/bte_error.c
--- a/arch/ia64/sn/io/sn2/bte_error.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/bte_error.c	Thu Nov  6 13:42:35 2003
@@ -1,45 +1,17 @@
 /*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
- *
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it 
- * under the terms of version 2 of the GNU General Public License 
- * as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it would be useful, but 
- * WITHOUT ANY WARRANTY; without even the implied warranty of 
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
- * 
- * Further, this software is distributed without any warranty that it is 
- * free of the rightful claim of any third person regarding infringement 
- * or the like.  Any license provided herein, whether implied or 
- * otherwise, applies only to this software file.  Patent licenses, if 
- * any, provided herein do not apply to combinations of this program with 
- * other software, or any other product whatsoever.
- * 
- * You should have received a copy of the GNU General Public 
- * License along with this program; if not, write the Free Software 
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- * 
- * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 
- * Mountain View, CA  94043, or:
- * 
- * http://www.sgi.com 
- * 
- * For further information regarding this notice, see: 
- * 
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <asm/smp.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/sn_private.h>
diff -Nru a/arch/ia64/sn/io/sn2/geo_op.c b/arch/ia64/sn/io/sn2/geo_op.c
--- a/arch/ia64/sn/io/sn2/geo_op.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/geo_op.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -29,7 +28,6 @@
 #include <asm/sn/types.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/io.h>
diff -Nru a/arch/ia64/sn/io/sn2/klconflib.c b/arch/ia64/sn/io/sn2/klconflib.c
--- a/arch/ia64/sn/io/sn2/klconflib.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/klconflib.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -15,7 +14,6 @@
 #include <asm/sn/io.h>
 #include <asm/sn/sn_cpuid.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/klconfig.h>
@@ -25,9 +23,6 @@
 #include <asm/sn/xtalk/xbow.h>
 
 
-#define LDEBUG 0
-#define NIC_UNKNOWN ((nic_t) -1)
-
 #undef DEBUG_KLGRAPH
 #ifdef DEBUG_KLGRAPH
 #define DBG(x...) printk(x)
@@ -39,7 +34,8 @@
 static int hasmetarouter;
 
 
-char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#=012345";
+/* XXX habeck: Need to update for IABrick and PABrick once we know char */
+char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#=vo^34567890123456789...";
 
 lboard_t *
 find_lboard(lboard_t *start, unsigned char brd_type)
@@ -270,17 +266,17 @@
 				board_name = EDGE_LBL_PXBRICK;
 			else if (brd->brd_type == KLTYPE_IXBRICK)
 				board_name = EDGE_LBL_IXBRICK;
-			else if (brd->brd_type == KLTYPE_PBRICK)
-				board_name = EDGE_LBL_PBRICK;
-			else if (brd->brd_type == KLTYPE_IBRICK)
-				board_name = EDGE_LBL_IBRICK;
-			else if (brd->brd_type == KLTYPE_XBRICK)
-				board_name = EDGE_LBL_XBRICK;
-			else if (brd->brd_type == KLTYPE_PEBRICK)
-				board_name = EDGE_LBL_PEBRICK;
+			else if (brd->brd_type == KLTYPE_OPUSBRICK)
+				board_name = EDGE_LBL_OPUSBRICK;
 			else if (brd->brd_type == KLTYPE_CGBRICK)
 				board_name = EDGE_LBL_CGBRICK;
-			else
+			else if (brd->brd_type == KLTYPE_BUBRICK)
+				board_name = EDGE_LBL_BUBRICK;
+			else if (brd->brd_type == KLTYPE_IABRICK)
+				board_name = EDGE_LBL_IABRICK;
+			else if (brd->brd_type == KLTYPE_PABRICK)
+				board_name = EDGE_LBL_PABRICK;
+			else 
 				board_name = EDGE_LBL_IOBRICK;
 			break;
 		default:
@@ -293,23 +289,6 @@
 	sprintf(path, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d/%s", buffer, geo_slab(brd->brd_geoid), board_name);
 }
 
-/*
- * Get the module number for a NASID.
- */
-moduleid_t
-get_module_id(nasid_t nasid)
-{
-	lboard_t *brd;
-
-	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-
-	if (!brd)
-		return INVALID_MODULE;
-	else
-		return geo_module(brd->brd_geoid);
-}
-
-
 #define MHZ	1000000
 
 
@@ -419,7 +398,13 @@
 	strcpy(serial_number,"");
 	switch(KLCLASS(board->brd_type)) {
 	case KLCLASS_CPU: {	/* Node board */
-		klhub_t	*hub;
+		klhub_t *hub;
+
+		if (board->brd_type == KLTYPE_TIO) {
+			printk("*****board_serial_number_get: Need to support TIO.*****\n");
+			strcpy(serial_number,"");
+			return(0);
+		}
 		
 		/* Get the hub component information */
 		hub = (klhub_t *)find_first_component(board,
@@ -440,71 +425,25 @@
 		break;
 	}
 	case KLCLASS_IO: {	/* IO board */
-		if (KLTYPE(board->brd_type) == KLTYPE_TPU) {
-		/* Special case for TPU boards */
-			kltpu_t *tpu;	
-		
-			/* Get the tpu component information */
-			tpu = (kltpu_t *)find_first_component(board,
-						      KLSTRUCT_TPU);
-			/* If we don't have a tpu component on a tpu board
-			 * then we have a weird klconfig.
-			 */
-			if (!tpu)
-				return(1);
-			/* Get the serial number information from
-			 * the tpu's manufacturing nic info
-			 */
-			if (component_serial_number_get(board,
-						tpu->tpu_mfg_nic,
-						serial_number,
-						""))
-				return(1);
-			break;
-		} else  if ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ||
-		            (KLTYPE(board->brd_type) == KLTYPE_GSN_B)) {
-		/* Special case for GSN boards */
-			klgsn_t *gsn;	
-		
-			/* Get the gsn component information */
-			gsn = (klgsn_t *)find_first_component(board,
-			      ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ?
-					KLSTRUCT_GSN_A : KLSTRUCT_GSN_B));
-			/* If we don't have a gsn component on a gsn board
-			 * then we have a weird klconfig.
-			 */
-			if (!gsn)
-				return(1);
-			/* Get the serial number information from
-			 * the gsn's manufacturing nic info
-			 */
-			if (component_serial_number_get(board,
-						gsn->gsn_mfg_nic,
-						serial_number,
-						""))
-				return(1);
-			break;
-		} else {
-		     	klbri_t	*bridge;
-		
-			/* Get the bridge component information */
-			bridge = (klbri_t *)find_first_component(board,
-							 KLSTRUCT_BRI);
-			/* If we don't have a bridge component on an IO board
-			 * then we have a weird klconfig.
-			 */
-			if (!bridge)
-				return(1);
-			/* Get the serial number information from
-		 	 * the bridge's manufacturing nic info
-			 */
-			if (component_serial_number_get(board,
-						bridge->bri_mfg_nic,
-						serial_number,
-						""))
-				return(1);
-			break;
-		}
+	     	klbri_t	*bridge;
+	
+		/* Get the bridge component information */
+		bridge = (klbri_t *)find_first_component(board,
+						 KLSTRUCT_BRI);
+		/* If we don't have a bridge component on an IO board
+		 * then we have a weird klconfig.
+		 */
+		if (!bridge)
+			return(1);
+		/* Get the serial number information from
+	 	 * the bridge's manufacturing nic info
+		 */
+		if (component_serial_number_get(board,
+					bridge->bri_mfg_nic,
+					serial_number,
+					""))
+			return(1);
+		break;
 	}
 	case KLCLASS_ROUTER: {	/* Router board */
 		klrou_t *router;	
@@ -685,4 +624,27 @@
 
 	/* avoid sign extending the moduleid_t */
 	return (int)(unsigned short)m;
+}
+
+int
+cbrick_type_get_nasid(nasid_t nasid) {
+	lboard_t *brd;
+	moduleid_t module;
+	uint type;
+	int t;
+
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
+	module = geo_module(brd->brd_geoid);
+	type = (module & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT;
+	/* convert brick_type to lower case */
+	if ((type >= 'A') && (type <= 'Z'))
+		type = type - 'A' + 'a';
+    
+	/* convert to a module.h brick type */
+	for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
+		if( brick_types[t] == type ) {
+			return t;
+		}
+	} 
+	return -1;
 }
diff -Nru a/arch/ia64/sn/io/sn2/klgraph.c b/arch/ia64/sn/io/sn2/klgraph.c
--- a/arch/ia64/sn/io/sn2/klgraph.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/klgraph.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -15,20 +14,15 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/init.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/kldir.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/router.h>
-#include <asm/sn/xtalk/xbow.h>
 #include <asm/sn/hcl_util.h>
+#include <asm/sn/sn_private.h>
 
-// #define KLGRAPH_DEBUG 1
+/* #define KLGRAPH_DEBUG 1 */
 #ifdef KLGRAPH_DEBUG
 #define GRPRINTF(x)	printk x
 #define CE_GRPANIC	CE_PANIC
@@ -37,174 +31,35 @@
 #define CE_GRPANIC	CE_PANIC
 #endif
 
-#include <asm/sn/sn_private.h>
-
 extern char arg_maxnodes[];
-extern u64 klgraph_addr[];
 void mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid);
+extern int is_specified(char *);
 
-
-/*
- * Support for verbose inventory via hardware graph. 
- * klhwg_invent_alloc allocates the necessary size of inventory information
- * and fills in the generic information.
- */
-invent_generic_t *
-klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
-{
-	invent_generic_t *invent;
-
-	invent = kern_malloc(size);
-	if (!invent) return NULL;
-	
-	invent->ig_module = NODE_MODULEID(cnode);
-	invent->ig_slot = SLOTNUM_GETSLOT(NODE_SLOTID(cnode));
-	invent->ig_invclass = class;
-
-	return invent;
-}
-
-/*
- * Add detailed disabled cpu inventory info to the hardware graph.
- */
-void
-klhwg_disabled_cpu_invent_info(vertex_hdl_t cpuv,
-                               cnodeid_t cnode,
-                               klcpu_t *cpu, slotid_t slot)
-{
-	invent_cpuinfo_t *cpu_invent;
-	diag_inv_t       *diag_invent;
-
-	cpu_invent = (invent_cpuinfo_t *)
-	klhwg_invent_alloc(cnode, INV_PROCESSOR, sizeof(invent_cpuinfo_t));
-	if (!cpu_invent)
-		return;
-
-	/* Diag information on this processor */
-	diag_invent = (diag_inv_t *)
-	klhwg_invent_alloc(cnode, INV_CPUDIAGVAL, sizeof(diag_inv_t));
-
-	if (!diag_invent)
-		return;
-
-
-	/* Disabled CPU */
-	cpu_invent->ic_gen.ig_flag = 0x0;
-	cpu_invent->ic_gen.ig_slot = slot;
-	cpu_invent->ic_cpu_info.cpuflavor = cpu->cpu_prid;
-	cpu_invent->ic_cpu_info.cpufq = cpu->cpu_speed;
-	cpu_invent->ic_cpu_info.sdfreq = cpu->cpu_scachespeed;
-
-	cpu_invent->ic_cpu_info.sdsize = cpu->cpu_scachesz;
-	cpu_invent->ic_cpuid = cpu->cpu_info.virtid;
-	cpu_invent->ic_slice = cpu->cpu_info.physid;
-
-	/* Disabled CPU label */
-	hwgraph_info_add_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
-			(arbitrary_info_t) cpu_invent);
-	hwgraph_info_export_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
-			sizeof(invent_cpuinfo_t));
-
-	/* Diagval label - stores reason for disable +{virt,phys}id +diagval*/
-	hwgraph_info_add_LBL(cpuv, INFO_LBL_DIAGVAL,
-			(arbitrary_info_t) diag_invent);
-
-	hwgraph_info_export_LBL(cpuv, INFO_LBL_DIAGVAL,
-			sizeof(diag_inv_t));
-}
-
-/*
- * Add detailed cpu inventory info to the hardware graph.
- */
-void
-klhwg_cpu_invent_info(vertex_hdl_t cpuv,
-			cnodeid_t cnode,
-			klcpu_t *cpu)
-{
-	invent_cpuinfo_t *cpu_invent;
-
-	cpu_invent = (invent_cpuinfo_t *)
-		klhwg_invent_alloc(cnode, INV_PROCESSOR, sizeof(invent_cpuinfo_t));
-	if (!cpu_invent)
-		return;
-
-	if (KLCONFIG_INFO_ENABLED((klinfo_t *)cpu))
-		cpu_invent->ic_gen.ig_flag = INVENT_ENABLED;
-	else
-		cpu_invent->ic_gen.ig_flag = 0x0;
-
-	cpu_invent->ic_cpu_info.cpuflavor = cpu->cpu_prid;
-	cpu_invent->ic_cpu_info.cpufq = cpu->cpu_speed;
-	cpu_invent->ic_cpu_info.sdfreq = cpu->cpu_scachespeed;
-
-	cpu_invent->ic_cpu_info.sdsize = cpu->cpu_scachesz;
-	cpu_invent->ic_cpuid = cpu->cpu_info.virtid;
-	cpu_invent->ic_slice = cpu_physical_id_to_slice(cpu->cpu_info.virtid);
-
-	hwgraph_info_add_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
-			(arbitrary_info_t) cpu_invent);
-	hwgraph_info_export_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
-			sizeof(invent_cpuinfo_t));
-}
-
-/* 
- * Add information about the baseio prom version number
- * as a part of detailed inventory info in the hwgraph.
- */
+/* ARGSUSED */
 void
-klhwg_baseio_inventory_add(vertex_hdl_t baseio_vhdl,cnodeid_t cnode)
+klhwg_add_ice(vertex_hdl_t node_vertex, klhub_t *hub, cnodeid_t cnode)
 {
-	invent_miscinfo_t	*baseio_inventory;
-	unsigned char		version = 0,revision = 0;
+	vertex_hdl_t myicev;
+	vertex_hdl_t ice_mon;
+	int rc;
+	extern struct file_operations shub_mon_fops;
 
-	/* Allocate memory for the "detailed inventory" info
-	 * for the baseio
-	 */
-	baseio_inventory = (invent_miscinfo_t *) 
-		klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
-	baseio_inventory->im_type = INV_IO6PROM;
-	/* Store the revision info  in the inventory */
-	baseio_inventory->im_version = version;
-	baseio_inventory->im_rev = revision;
-	/* Put the inventory info in the hardware graph */
-	hwgraph_info_add_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT, 
-			     (arbitrary_info_t) baseio_inventory);
-	/* Make the information available to the user programs
-	 * thru hwgfs.
-	 */
-        hwgraph_info_export_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
-				sizeof(invent_miscinfo_t));
-}
+	hwgraph_path_add(node_vertex, EDGE_LBL_ICE, &myicev);
 
-/*
- * Add detailed cpu inventory info to the hardware graph.
- */
-void
-klhwg_hub_invent_info(vertex_hdl_t hubv,
-		      cnodeid_t cnode, 
-		      klhub_t *hub)
-{
-	invent_miscinfo_t *hub_invent;
-
-	hub_invent = (invent_miscinfo_t *) 
-	    klhwg_invent_alloc(cnode, INV_MISC, sizeof(invent_miscinfo_t));
-	if (!hub_invent)
-	    return;
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, myicev, NULL, "Created path for ice vertex for TIO node.\n"));
 
-	if (KLCONFIG_INFO_ENABLED((klinfo_t *)hub))
-	    hub_invent->im_gen.ig_flag = INVENT_ENABLED;
+	rc = device_master_set(myicev, node_vertex);
+	if (rc)
+		panic("klhwg_add_ice: Unable to create ice vertex.\n");
 
-	hub_invent->im_type = INV_HUB;
-	hub_invent->im_rev = hub->hub_info.revision;
-	hub_invent->im_speed = hub->hub_speed;
-	hwgraph_info_add_LBL(hubv, INFO_LBL_DETAIL_INVENT, 
-			     (arbitrary_info_t) hub_invent);
-        hwgraph_info_export_LBL(hubv, INFO_LBL_DETAIL_INVENT,
-				sizeof(invent_miscinfo_t));
+	ice_mon = hwgraph_register(myicev, EDGE_LBL_PERFMON,
+		0, 0, 0, 0,
+		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+		&shub_mon_fops, (void *)(long)cnode);
 }
 
 /* ARGSUSED */
-void
+static void __init
 klhwg_add_hub(vertex_hdl_t node_vertex, klhub_t *hub, cnodeid_t cnode)
 {
 	vertex_hdl_t myhubv;
@@ -213,17 +68,22 @@
 	extern struct file_operations shub_mon_fops;
 
 	GRPRINTF(("klhwg_add_hub: adding %s\n", EDGE_LBL_HUB));
-	(void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
+	hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
+
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__,__LINE__, myhubv, NULL, "Created path for hub vertex for Shub node.\n"));
+
 	rc = device_master_set(myhubv, node_vertex);
+	if (rc)
+		panic("klhwg_add_hub: Unable to create hub vertex.\n");
+
 	hub_mon = hwgraph_register(myhubv, EDGE_LBL_PERFMON,
-		0, 0,
-		0, 0,
+		0, 0, 0, 0,
 		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
 		&shub_mon_fops, (void *)(long)cnode);
 }
 
 /* ARGSUSED */
-void
+static void __init
 klhwg_add_disabled_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
 {
         vertex_hdl_t my_cpu;
@@ -234,19 +94,20 @@
 	nasid = COMPACT_TO_NASID_NODEID(cnode);
         cpu_id = nasid_slice_to_cpuid(nasid, cpu->cpu_info.physid);
         if(cpu_id != -1){
-		sprintf(name, "%s/%s/%c", EDGE_LBL_DISABLED, EDGE_LBL_CPU, 'a' + cpu->cpu_info.physid);
+		snprintf(name, 120, "%s/%s/%c", EDGE_LBL_DISABLED, EDGE_LBL_CPU, 'a' + cpu->cpu_info.physid);
 		(void) hwgraph_path_add(node_vertex, name, &my_cpu);
 
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__,__LINE__, my_cpu, NULL, "Created path for disabled cpu slice.\n"));
+
 		mark_cpuvertex_as_cpu(my_cpu, cpu_id);
 		device_master_set(my_cpu, node_vertex);
 
-		klhwg_disabled_cpu_invent_info(my_cpu, cnode, cpu, slot);
 		return;
         }
 }
 
 /* ARGSUSED */
-void
+static void __init
 klhwg_add_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
 {
         vertex_hdl_t my_cpu, cpu_dir;
@@ -257,27 +118,65 @@
 	nasid = COMPACT_TO_NASID_NODEID(cnode);
         cpu_id = nasid_slice_to_cpuid(nasid, cpu->cpu_info.physid);
 
-        sprintf(name, "%s/%d/%c",
+        snprintf(name, 120, "%s/%d/%c",
                 EDGE_LBL_CPUBUS,
                 0,
                 'a' + cpu->cpu_info.physid);
 
         GRPRINTF(("klhwg_add_cpu: adding %s to vertex 0x%p\n", name, node_vertex));
         (void) hwgraph_path_add(node_vertex, name, &my_cpu);
-        mark_cpuvertex_as_cpu(my_cpu, cpu_id);
+
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__,__LINE__, my_cpu, NULL, "Created path for active cpu slice.\n"));
+
+	mark_cpuvertex_as_cpu(my_cpu, cpu_id);
         device_master_set(my_cpu, node_vertex);
 
         /* Add an alias under the node's CPU directory */
         if (hwgraph_edge_get(node_vertex, EDGE_LBL_CPU, &cpu_dir) == GRAPH_SUCCESS) {
-                sprintf(name, "%c", 'a' + cpu->cpu_info.physid);
+                snprintf(name, 120, "%c", 'a' + cpu->cpu_info.physid);
                 (void) hwgraph_edge_add(cpu_dir, my_cpu, name);
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__,__LINE__, cpu_dir, my_cpu, "Created % from vhdl1 to vhdl2.\n", name));
         }
 
-        klhwg_cpu_invent_info(my_cpu, cnode, cpu);
 }
 
 
-void
+static void __init
+klhwg_add_coretalk(cnodeid_t cnode, nasid_t tio_nasid)
+{
+	lboard_t *brd;
+	vertex_hdl_t coretalk_v, icev;
+	/*REFERENCED*/
+	graph_error_t err;
+
+	if ((brd = find_lboard((lboard_t *)KL_CONFIG_INFO(tio_nasid), KLTYPE_IOBRICK_XBOW)) == NULL)
+			return;
+
+	if (KL_CONFIG_DUPLICATE_BOARD(brd))
+	    return;
+
+	icev = cnodeid_to_vertex(cnode);
+
+	err = hwgraph_path_add(icev, EDGE_LBL_CORETALK, &coretalk_v);
+	if (err != GRAPH_SUCCESS) {
+		if (err == GRAPH_DUP)
+			printk(KERN_WARNING  "klhwg_add_coretalk: Check for "
+                                        "working routers and router links!");
+
+                 panic("klhwg_add_coretalkk: Failed to add "
+                             "edge: vertex 0x%p to vertex 0x%p,"
+                             "error %d\n",
+                              (void *)icev, (void *)coretalk_v, err);
+        }
+
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, coretalk_v, NULL, "Created coretalk path for TIO node.\n"));
+
+        NODEPDA(cnode)->xbow_vhdl = coretalk_v;
+
+}
+
+
+static void __init
 klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
 {
 	lboard_t *brd;
@@ -295,9 +194,6 @@
 	if (KL_CONFIG_DUPLICATE_BOARD(brd))
 	    return;
 
-	GRPRINTF(("klhwg_add_xbow: adding cnode %d nasid %d xbow edges\n",
-			cnode, nasid));
-
 	if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
 	    == NULL)
 	    return;
@@ -326,11 +222,14 @@
                                 printk(KERN_WARNING  "klhwg_add_xbow: Check for "
                                         "working routers and router links!");
 
-                        PRINT_PANIC("klhwg_add_xbow: Failed to add "
+                        panic("klhwg_add_xbow: Failed to add "
                                 "edge: vertex 0x%p to vertex 0x%p,"
                                 "error %d\n",
                                 (void *)hubv, (void *)xbow_v, err);
                 }
+
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, xbow_v, NULL, "Created path for xtalk.\n"));
+
 		xswitch_vertex_init(xbow_v); 
 
 		NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;
@@ -345,14 +244,71 @@
 				hub_nasid;
 		}
 
-		GRPRINTF(("klhwg_add_xbow: adding port nasid %d %s to vertex 0x%p\n",
-			hub_nasid, EDGE_LBL_XTALK, hubv));
 	}
 }
 
 
 /* ARGSUSED */
-void
+static void __init
+klhwg_add_tionode(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
+{
+	nasid_t tio_nasid;
+	lboard_t *brd;
+	klhub_t *hub;
+	vertex_hdl_t node_vertex = NULL;
+	char path_buffer[100];
+	int rv;
+	char *s;
+	int board_disabled = 0;
+
+	tio_nasid = COMPACT_TO_NASID_NODEID(cnode);
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(tio_nasid), KLTYPE_TIO);
+	ASSERT(brd);
+
+	/* Generate a hardware graph path for this board. */
+	board_to_path(brd, path_buffer);
+	rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
+	if (rv != GRAPH_SUCCESS)
+		panic("TIO Node vertex creation failed.  "
+					  "Path == %s", path_buffer);
+
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created path for TIO node.\n"));
+	hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
+	ASSERT(hub);
+	if(hub->hub_info.flags & KLINFO_ENABLE)
+		board_disabled = 0;
+	else
+		board_disabled = 1;
+
+	if(!board_disabled) {
+		mark_nodevertex_as_node(node_vertex,
+				    cnode + board_disabled * numionodes);
+
+		s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
+		NODEPDA(cnode)->hwg_node_name =
+					kmalloc(strlen(s) + 1,
+					GFP_KERNEL);
+		ASSERT_ALWAYS(NODEPDA(cnode)->hwg_node_name != NULL);
+		strcpy(NODEPDA(cnode)->hwg_node_name, s);
+
+		hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
+
+		/* Set up node board's slot */
+		NODEPDA(cnode)->slotdesc = brd->brd_slot;
+
+		/* Set up the module we're in */
+		NODEPDA(cnode)->geoid = brd->brd_geoid;
+		NODEPDA(cnode)->module = module_lookup(geo_module(brd->brd_geoid));
+	}
+
+        if(!board_disabled)
+                klhwg_add_ice(node_vertex, hub, cnode);
+
+}
+
+
+/* ARGSUSED */
+static void __init
 klhwg_add_node(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
 {
 	nasid_t nasid;
@@ -367,8 +323,6 @@
 
 	nasid = COMPACT_TO_NASID_NODEID(cnode);
 	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-	GRPRINTF(("klhwg_add_node: Adding cnode %d, nasid %d, brd 0x%p\n",
-                cnode, nasid, brd));
 	ASSERT(brd);
 
 	do {
@@ -376,16 +330,11 @@
 
 		/* Generate a hardware graph path for this board. */
 		board_to_path(brd, path_buffer);
-
-		GRPRINTF(("klhwg_add_node: adding %s to vertex 0x%p\n",
-			path_buffer, hwgraph_root));
 		rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
 		if (rv != GRAPH_SUCCESS)
-			PRINT_PANIC("Node vertex creation failed.  "
-					  "Path == %s",
-				path_buffer);
+			panic("Node vertex creation failed.  Path == %s", path_buffer);
 
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created path for SHUB node.\n"));
 		hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
 		ASSERT(hub);
 		if(hub->hub_info.flags & KLINFO_ENABLE)
@@ -427,6 +376,8 @@
 			rv = hwgraph_path_add(node_vertex, EDGE_LBL_CPU, &cpu_dir);
 			if (rv != GRAPH_SUCCESS)
 				panic("klhwg_add_node: Cannot create CPU directory\n");
+			HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, cpu_dir, NULL, "Created cpu directiry on SHUB node.\n"));
+
 		}
 
 		/* Add each CPU */
@@ -455,7 +406,7 @@
 
 
 /* ARGSUSED */
-void
+static void __init
 klhwg_add_all_routers(vertex_hdl_t hwgraph_root)
 {
 	nasid_t nasid;
@@ -467,54 +418,40 @@
 
 	for (cnode = 0; cnode < numnodes; cnode++) {
 		nasid = COMPACT_TO_NASID_NODEID(cnode);
-
-		GRPRINTF(("klhwg_add_all_routers: adding router on cnode %d\n",
-			cnode));
-
 		brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
 				KLTYPE_ROUTER);
-
 		if (!brd)
 			/* No routers stored in this node's memory */
 			continue;
 
 		do {
 			ASSERT(brd);
-			GRPRINTF(("Router board struct is %p\n", brd));
 
 			/* Don't add duplicate boards. */
 			if (brd->brd_flags & DUPLICATE_BOARD)
 				continue;
 
-			GRPRINTF(("Router 0x%p module number is %d\n", brd, brd->brd_geoid));
 			/* Generate a hardware graph path for this board. */
 			board_to_path(brd, path_buffer);
 
-			GRPRINTF(("Router path is %s\n", path_buffer));
-
 			/* Add the router */
-			GRPRINTF(("klhwg_add_all_routers: adding %s to vertex 0x%p\n",
-				path_buffer, hwgraph_root));
 			rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
 			if (rv != GRAPH_SUCCESS)
-				PRINT_PANIC("Router vertex creation "
+				panic("Router vertex creation "
 						  "failed.  Path == %s",
 					path_buffer);
 
-			GRPRINTF(("klhwg_add_all_routers: get next board from 0x%p\n",
-					brd));
+			HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, node_vertex, NULL, "Created router path.\n"));
+
 		/* Find the rest of the routers stored on this node. */
 		} while ( (brd = find_lboard_class(KLCF_NEXT(brd),
 			 KLTYPE_ROUTER)) );
-
-		GRPRINTF(("klhwg_add_all_routers: Done.\n"));
 	}
 
 }
 
 /* ARGSUSED */
-void
+static void __init
 klhwg_connect_one_router(vertex_hdl_t hwgraph_root, lboard_t *brd,
 			 cnodeid_t cnode, nasid_t nasid)
 {
@@ -527,13 +464,8 @@
 	int port;
 	lboard_t *dest_brd;
 
-	GRPRINTF(("klhwg_connect_one_router: Connecting router on cnode %d\n",
-			cnode));
-
 	/* Don't add duplicate boards. */
 	if (brd->brd_flags & DUPLICATE_BOARD) {
-		GRPRINTF(("klhwg_connect_one_router: Duplicate router 0x%p on cnode %d\n",
-			brd, cnode));
 		return;
 	}
 
@@ -550,7 +482,7 @@
 
 	/* We don't know what to do with multiple router components */
 	if (brd->brd_numcompts != 1) {
-		PRINT_PANIC("klhwg_connect_one_router: %d cmpts on router\n",
+		panic("klhwg_connect_one_router: %d cmpts on router\n",
 			brd->brd_numcompts);
 		return;
 	}
@@ -584,15 +516,12 @@
 		if (rc != GRAPH_SUCCESS) {
 			if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
 				continue;
-			PRINT_PANIC("Can't find router: %s", dest_path);
+			panic("Can't find router: %s", dest_path);
 		}
-		GRPRINTF(("klhwg_connect_one_router: Link from %s/%d to %s\n",
-			  path_buffer, port, dest_path));
 
 		sprintf(dest_path, "%d", port);
 
 		rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);
-
 		if (rc == GRAPH_DUP) {
 			GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
 				  port, router->rou_port[port].port_nasid,
@@ -601,14 +530,16 @@
 		}
 
 		if (rc != GRAPH_SUCCESS && !is_specified(arg_maxnodes))
-			PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
+			panic("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
 				path_buffer, dest_path, (void *)dest_hndl, rc);
+
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, router_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", dest_path));
 		
 	}
 }
 
 
-void
+static void __init
 klhwg_connect_routers(vertex_hdl_t hwgraph_root)
 {
 	nasid_t nasid;
@@ -641,7 +572,7 @@
 
 
 
-void
+static void __init
 klhwg_connect_hubs(vertex_hdl_t hwgraph_root)
 {
 	nasid_t nasid;
@@ -656,14 +587,18 @@
 	graph_error_t rc;
 	int port;
 
-	for (cnode = 0; cnode < numnodes; cnode++) {
+	for (cnode = 0; cnode < numionodes; cnode++) {
 		nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-		GRPRINTF(("klhwg_connect_hubs: Connecting hubs on cnode %d\n",
-			cnode));
-
-		brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-		ASSERT(brd);
+		if (!(nasid & 1)) {
+			brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
+			ASSERT(brd);
+		} else {
+			printk("klhwg_connect_hubs: Hwgraph 'link' vertex for NUMA Links not ready for TIO boards yet!\n");
+			return; /* We need to make sure that TIO has the right stuff .. */
+			/* brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_TIO); */
+			/* ASSERT(brd); */
+		}
 
 		hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
 		ASSERT(hub);
@@ -680,10 +615,7 @@
 
 			/* Generate a hardware graph path for this board. */
 			board_to_path(brd, path_buffer);
-
-			GRPRINTF(("klhwg_connect_hubs: Hub path is %s.\n", path_buffer));
 			rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);
-
 			if (rc != GRAPH_SUCCESS)
 				printk(KERN_WARNING  "Can't find hub: %s", path_buffer);
 
@@ -699,24 +631,34 @@
 			if (rc != GRAPH_SUCCESS) {
 				if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
 					continue;
-				PRINT_PANIC("Can't find board: %s", dest_path);
+				panic("Can't find board: %s", dest_path);
 			} else {
-				char buf[1024];
-		
+				char *buf;
+
+				buf = kmalloc(1024, GFP_KERNEL);
+				if (!buf) {
+					panic("klhwg_add_all_nodes(): unable "
+					      "to allocate buffer");
+				}
 
 				GRPRINTF(("klhwg_connect_hubs: Link from %s to %s.\n",
 			  	path_buffer, dest_path));
 
 				rc = hwgraph_path_add(hub_hndl, EDGE_LBL_INTERCONNECT, &hub_hndl);
+
+				HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, hub_hndl, NULL, "Created link path.\n"));
+
 				sprintf(buf,"%s/%s",path_buffer,EDGE_LBL_INTERCONNECT);
 				rc = hwgraph_traverse(hwgraph_root, buf, &hub_hndl);
 				sprintf(buf,"%d",port);
 				rc = hwgraph_edge_add(hub_hndl, dest_hndl, buf);
 
+				HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, hub_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", buf));
+
 				if (rc != GRAPH_SUCCESS)
-					PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
+					panic("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
 					path_buffer, dest_path, (void *)dest_hndl, rc);
-
+				kfree(buf);
 			}
 		}
 	}
@@ -726,7 +668,7 @@
  * hints which can later be used by the drivers using the device/driver
  * admin interface. 
  */
-void
+static void __init
 klhwg_device_disable_hints_add(void)
 {
 	cnodeid_t	cnode; 		/* node we are looking at */
@@ -785,7 +727,7 @@
 	}
 }
 
-void
+void __init
 klhwg_add_all_modules(vertex_hdl_t hwgraph_root)
 {
 	cmoduleid_t	cm;
@@ -807,6 +749,7 @@
 		rc = hwgraph_path_add(hwgraph_root, name, &module_vhdl);
 		ASSERT(rc == GRAPH_SUCCESS);
 		rc = rc;
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, module_vhdl, NULL, "Created module path.\n"));
 
 		hwgraph_fastinfo_set(module_vhdl, (arbitrary_info_t) modules[cm]);
 
@@ -816,17 +759,17 @@
 			buffer);
 
 		rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
-		ASSERT_ALWAYS(rc == GRAPH_SUCCESS); 
+		ASSERT_ALWAYS(rc == GRAPH_SUCCESS);
 		rc = rc;
+		HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, vhdl, NULL, "Created L1 path.\n"));
 
 		hwgraph_info_add_LBL(vhdl,
 				     INFO_LBL_ELSC,
-				     (arbitrary_info_t) (__psint_t) 1);
-
+				     (arbitrary_info_t) (int64_t) 1);
 	}
 }
 
-void
+void __init
 klhwg_add_all_nodes(vertex_hdl_t hwgraph_root)
 {
 	cnodeid_t	cnode;
@@ -835,10 +778,18 @@
 		klhwg_add_node(hwgraph_root, cnode);
 	}
 
+	for (cnode = numnodes; cnode < numionodes; cnode++) {
+		klhwg_add_tionode(hwgraph_root, cnode);
+	}
+
 	for (cnode = 0; cnode < numnodes; cnode++) {
 		klhwg_add_xbow(cnode, cnodeid_to_nasid(cnode));
 	}
 
+	for (cnode = numnodes; cnode < numionodes; cnode++) {
+		klhwg_add_coretalk(cnode, cnodeid_to_nasid(cnode));
+	}
+
 	/*
 	 * As for router hardware inventory information, we set this
 	 * up in router.c. 
@@ -852,5 +803,4 @@
 	 * to figure out which pci components have been disabled
 	 */
 	klhwg_device_disable_hints_add();
-
 }
diff -Nru a/arch/ia64/sn/io/sn2/l1_command.c b/arch/ia64/sn/io/sn2/l1_command.c
--- a/arch/ia64/sn/io/sn2/l1_command.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/l1_command.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -12,7 +11,6 @@
 #include <asm/sn/sgi.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/hcl_util.h>
 #include <asm/sn/labelcl.h>
@@ -60,8 +58,6 @@
 	*rack = (result & MODULE_RACK_MASK) >> MODULE_RACK_SHFT;
 	*bay = (result & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT;
 	*brick_type = (result & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT;
-	*brick_type = toupper(*brick_type);
-
 	return 0;
 }
 
@@ -113,12 +109,22 @@
 	brick_type = MODULE_IXBRICK; break;
       case L1_BRICKTYPE_PX: 
 	brick_type = MODULE_PXBRICK; break;
+      case L1_BRICKTYPE_OPUS: 
+	brick_type = MODULE_OPUSBRICK; break;
       case L1_BRICKTYPE_I: 
 	brick_type = MODULE_IBRICK; break;
       case L1_BRICKTYPE_P:
 	brick_type = MODULE_PBRICK; break;
       case L1_BRICKTYPE_X:
 	brick_type = MODULE_XBRICK; break;
+      case L1_BRICKTYPE_CHI_CG:
+	brick_type = MODULE_CGBRICK; break;
+      case L1_BRICKTYPE_IA:
+	brick_type = MODULE_IABRICK; break;
+      case L1_BRICKTYPE_PA:
+	brick_type = MODULE_PABRICK; break;
+      case L1_BRICKTYPE_BU:
+	brick_type = MODULE_BUBRICK; break;
     }
 
     ret = RBT_TO_MODULE(rack, bay, brick_type);
@@ -178,26 +184,32 @@
     default:
         return("Unknown");
 
-    case L1_BRICKTYPE_X:
-        return("Xbrick");
-
-    case L1_BRICKTYPE_I:
-        return("Ibrick");
-
-    case L1_BRICKTYPE_P:
-        return("Pbrick");
-
     case L1_BRICKTYPE_PX:
-        return("PXbrick");
+        return(EDGE_LBL_PXBRICK);
+
+    case L1_BRICKTYPE_OPUS:
+        return(EDGE_LBL_OPUSBRICK);
 
     case L1_BRICKTYPE_IX:
-        return("IXbrick");
+        return(EDGE_LBL_IXBRICK);
 
     case L1_BRICKTYPE_C:
         return("Cbrick");
 
     case L1_BRICKTYPE_R:
         return("Rbrick");
+
+    case L1_BRICKTYPE_CHI_CG:
+        return(EDGE_LBL_CGBRICK);
+
+    case L1_BRICKTYPE_IA:
+	return(EDGE_LBL_IABRICK);
+
+    case L1_BRICKTYPE_PA:
+	return(EDGE_LBL_PABRICK);
+
+    case L1_BRICKTYPE_BU:
+	return(EDGE_LBL_BUBRICK);
     }
 }
 
diff -Nru a/arch/ia64/sn/io/sn2/ml_SN_init.c b/arch/ia64/sn/io/sn2/ml_SN_init.c
--- a/arch/ia64/sn/io/sn2/ml_SN_init.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/ml_SN_init.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -13,7 +12,6 @@
 #include <asm/sn/sgi.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/sn_private.h>
@@ -25,46 +23,6 @@
 
 extern xwidgetnum_t hub_widget_id(nasid_t);
 
-extern void iograph_early_init(void);
-
-nasid_t master_nasid = INVALID_NASID;		/* This is the partition master nasid */
-nasid_t master_baseio_nasid = INVALID_NASID;	/* This is the master base I/O nasid */
-
-
-/*
- * mlreset(void)
- * 	very early machine reset - at this point NO interrupts have been
- * 	enabled; nor is memory, tlb, p0, etc setup.
- *
- * 	slave is zero when mlreset is called for the master processor and
- *	is nonzero thereafter.
- */
-
-
-void
-mlreset(int slave)
-{
-	/*
-	 * We are the master cpu and node.
-	 */ 
-	master_nasid = get_nasid();
-	set_master_bridge_base();
-
-	/* We're the master processor */
-	master_procid = smp_processor_id();
-	master_nasid = cpuid_to_nasid(master_procid);
-
-	/*
-	 * master_nasid we get back better be same as one from
-	 * get_nasid()
-	 */
-	ASSERT_ALWAYS(master_nasid == get_nasid());
-
-	/* early initialization of iograph */
-	iograph_early_init();
-}
-
-
 /* XXX - Move the meat of this to intr.c ? */
 /*
  * Set up the platform-dependent fields in the nodepda.
@@ -72,22 +30,26 @@
 void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
 {
 	hubinfo_t hubinfo;
+	nasid_t nasid;
 
 	extern void router_map_init(nodepda_t *);
 	extern void router_queue_init(nodepda_t *,cnodeid_t);
 	extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);
 
 	/* Allocate per-node platform-dependent data */
-	hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));
+	
+	nasid = COMPACT_TO_NASID_NODEID(node);
+	if (nasid & 1) /* TIO nodes' data reside in Node 0 */
+		hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(0), sizeof(struct hubinfo_s));
+	else
+		hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));
 
 	npda->pdinfo = (void *)hubinfo;
 	hubinfo->h_nodepda = npda;
 	hubinfo->h_cnodeid = node;
-	hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);
 
 	spin_lock_init(&hubinfo->h_crblock);
 
-	hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
 	npda->xbow_peer = INVALID_NASID;
 
 	/* 
@@ -104,7 +66,22 @@
 	npda->npda_rip_last = &npda->npda_rip_first;
 	npda->geoid.any.type = GEO_TYPE_INVALID;
 
-	mutex_init_locked(&npda->xbow_sema); /* init it locked? */
+	init_MUTEX_LOCKED(&npda->xbow_sema); /* init it locked? */
+}
+
+void
+init_platform_hubinfo(nodepda_t **nodepdaindr) {
+	cnodeid_t       cnode;
+	hubinfo_t hubinfo;
+	nodepda_t *npda;
+
+	for (cnode = 0; cnode < numionodes; cnode++) {
+		npda = nodepdaindr[cnode];
+		hubinfo = (hubinfo_t)npda->pdinfo;
+		hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(cnode);
+		/* For TIO the following returns -1 */
+		hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
+	}
 }
 
 void
diff -Nru a/arch/ia64/sn/io/sn2/ml_SN_intr.c b/arch/ia64/sn/io/sn2/ml_SN_intr.c
--- a/arch/ia64/sn/io/sn2/ml_SN_intr.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/ml_SN_intr.c	Thu Nov  6 13:42:35 2003
@@ -14,7 +14,6 @@
 #include <asm/hw_irq.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/io.h>
@@ -82,7 +81,7 @@
 	/* Config and enable UART interrupt, all nodes. */
 	local5_config.sh_local_int5_config_regval = 0;
 	local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
-	local5_config.sh_local_int5_config_s.pid = cpu0;
+	local5_config.sh_local_int5_config_s.pid = cpu;
 	HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
 		local5_config.sh_local_int5_config_regval);
 
@@ -223,7 +222,7 @@
 		int intrs;
 
 		cpu = cnode_slice_to_cpuid(cnode, slice);
-		if (cpu == num_online_cpus())
+		if (cpu == NR_CPUS)
 			continue;
 		if (!cpu_online(cpu))
 			continue;
@@ -247,6 +246,27 @@
 	return best_cpu;
 }
 
+/* See if we can use this cpu/vect. */
+
+static cpuid_t
+intr_bit_reserve_test(cpuid_t cpu,
+		      cnodeid_t cnode, int req_bit, int resflags, int *resp_bit)
+{
+	ASSERT((cpu == CPU_NONE) || (cnode == CNODEID_NONE));
+
+	if (cnode != CNODEID_NONE) {
+		cpu = intr_cpu_choose_from_node(cnode);
+	}
+
+	if (cpu != CPU_NONE) {
+		*resp_bit = intr_reserve_level(cpu, req_bit);
+		if (*resp_bit >= 0) {
+			return (cpu);
+		}
+	}
+	return CPU_NONE;
+}
+
 /*
  * We couldn't put it on the closest node.  Try to find another one.
  * Do a stupid round-robin assignment of the node.
@@ -270,6 +290,31 @@
 	}
 
 	return CPU_NONE;
+}
+
+/*
+ * This is a temporary hack until we have the topology
+ * information to make an intelligent guess as to which
+ * node to send the interrupt to.
+ * For now, allocate the interrupt on the first node we find
+ * with processors.
+ */
+
+cpuid_t
+tio_intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit)
+{
+	cnodeid_t cnode = 0;
+	cpuid_t cpuid;
+
+	while (is_headless_node(cnode)) {
+		if (++cnode >= numnodes) {
+			panic
+			    ("tio_intr_heuristic: Can't find a node with processors\n");
+		}
+	}
+	cpuid = intr_bit_reserve_test(CPU_NONE, cnode, req_bit, 0, resp_bit);
+	/* return value is checked by caller */
+	return cpuid;
 }
 
 /*
diff -Nru a/arch/ia64/sn/io/sn2/ml_iograph.c b/arch/ia64/sn/io/sn2/ml_iograph.c
--- a/arch/ia64/sn/io/sn2/ml_iograph.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/ml_iograph.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,30 +6,17 @@
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/types.h>
-#include <linux/slab.h>
 #include <linux/ctype.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/klconfig.h>
 #include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
+#include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/xtalk/xtalkaddrs.h>
 
-/* #define IOGRAPH_DEBUG */
+/* #define IOGRAPH_DEBUG 1 */
 #ifdef IOGRAPH_DEBUG
 #define DBG(x...) printk(x)
 #else
@@ -49,20 +35,20 @@
  * xswitch vertex is created.
  */
 typedef struct xswitch_vol_s {
-	mutex_t xswitch_volunteer_mutex;
+	struct semaphore xswitch_volunteer_mutex;
 	int		xswitch_volunteer_count;
 	vertex_hdl_t	xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
 } *xswitch_vol_t;
 
+
 void
 xswitch_vertex_init(vertex_hdl_t xswitch)
 {
 	xswitch_vol_t xvolinfo;
 	int rc;
-	extern void * snia_kmem_zalloc(size_t size, int flag);
 
-	xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL);
-	mutex_init(&xvolinfo->xswitch_volunteer_mutex);
+	xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s));
+	init_MUTEX(&xvolinfo->xswitch_volunteer_mutex);
 	rc = hwgraph_info_add_LBL(xswitch, 
 			INFO_LBL_XSWITCH_VOL,
 			(arbitrary_info_t)xvolinfo);
@@ -79,12 +65,11 @@
 {
 	xswitch_vol_t xvolinfo;
 	int rc;
-	extern void snia_kmem_free(void *ptr, size_t size);
 
 	rc = hwgraph_info_remove_LBL(xswitch, 
 				INFO_LBL_XSWITCH_VOL,
 				(arbitrary_info_t *)&xvolinfo);
-	snia_kmem_free(xvolinfo, sizeof(struct xswitch_vol_s));
+	kfree(xvolinfo);
 }
 /*
  * A Crosstalk master volunteers to manage xwidgets on the specified xswitch.
@@ -101,14 +86,16 @@
 				INFO_LBL_XSWITCH_VOL, 
 				(arbitrary_info_t *)&xvolinfo);
 	if (xvolinfo == NULL) {
-	    if (!is_headless_node_vertex(master))
+	    if (!is_headless_node_vertex(master)) {
+		    char name[MAXDEVNAME];
 		    printk(KERN_WARNING
-			"volunteer for widgets: vertex 0x%p has no info label",
-			(void *)xswitch);
+			"volunteer for widgets: vertex %s has no info label",
+			vertex_to_name(xswitch, name, MAXDEVNAME));
+	    }
 	    return;
 	}
 
-	mutex_lock(&xvolinfo->xswitch_volunteer_mutex);
+	down(&xvolinfo->xswitch_volunteer_mutex);
 	ASSERT(xvolinfo->xswitch_volunteer_count < NUM_XSWITCH_VOLUNTEER);
 	xvolinfo->xswitch_volunteer[xvolinfo->xswitch_volunteer_count] = master;
 	xvolinfo->xswitch_volunteer_count++;
@@ -126,7 +113,7 @@
 			xvolinfo->xswitch_volunteer[1] = hubv;
 		}
 	}
-	mutex_unlock(&xvolinfo->xswitch_volunteer_mutex);
+	up(&xvolinfo->xswitch_volunteer_mutex);
 }
 
 extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
@@ -158,11 +145,13 @@
 				INFO_LBL_XSWITCH_VOL, 
 				(arbitrary_info_t *)&xvolinfo);
 	if (xvolinfo == NULL) {
-	    if (!is_headless_node_vertex(hubv))
+	    if (!is_headless_node_vertex(hubv)) {
+		    char name[MAXDEVNAME];
 		    printk(KERN_WARNING
-			"assign_widgets_to_volunteers:vertex 0x%p has "
+			"assign_widgets_to_volunteers:vertex %s has "
 			" no info label",
-			(void *)xswitch);
+			vertex_to_name(xswitch, name, MAXDEVNAME));
+	    }
 	    return;
 	}
 
@@ -193,16 +182,16 @@
 		 * hub that owned it in the prom.
 		 */
 		if (is_master_baseio_nasid_widget(nasid, widgetnum)) {
-			extern nasid_t get_master_baseio_nasid(void);
+			extern nasid_t snia_get_master_baseio_nasid(void);
 			for (i=0; i<num_volunteer; i++) {
 				hubv = xvolinfo->xswitch_volunteer[i];
 				hubinfo_get(hubv, &hubinfo);
 				nasid = hubinfo->h_nasid;
-				if (nasid == get_master_baseio_nasid())
+				if (nasid == snia_get_master_baseio_nasid())
 					goto do_assignment;
 			}
-			PRINT_PANIC("Nasid == %d, console nasid == %d",
-				nasid, get_master_baseio_nasid());
+			panic("Nasid == %d, console nasid == %d",
+				nasid, snia_get_master_baseio_nasid());
 		}
 
 		/*
@@ -236,38 +225,6 @@
 	xswitch_volunteer_delete(xswitch);
 }
 
-/*
- * Early iograph initialization.  Called by master CPU in mlreset().
- * Useful for including iograph.o in kernel.o.
- */
-void
-iograph_early_init(void)
-{
-/*
- * Need new way to get this information ..
- */
-	cnodeid_t cnode;
-	nasid_t nasid;
-	lboard_t *board;
-	
-	/*
-	 * Init. the board-to-hwgraph link early, so FRU analyzer
-	 * doesn't trip on leftover values if we panic early on.
-	 */
-	for(cnode = 0; cnode < numnodes; cnode++) {
-		nasid = COMPACT_TO_NASID_NODEID(cnode);
-		board = (lboard_t *)KL_CONFIG_INFO(nasid);
-		DBG("iograph_early_init: Found board 0x%p\n", board);
-
-		/* Check out all the board info stored on a node */
-		while(board) {
-			board->brd_graph_link = GRAPH_VERTEX_NONE;
-			board = KLCF_NEXT(board);
-			DBG("iograph_early_init: Found board 0x%p\n", board);
-		}
-	}
-}
-
 /* 
  * Probe to see if this hub's xtalk link is active.  If so,
  * return the Crosstalk Identification of the widget that we talk to.  
@@ -308,22 +265,104 @@
 		hwid->part_num = XWIDGET_PART_NUM(widget_id);
 		hwid->rev_num = XWIDGET_REV_NUM(widget_id);
 		hwid->mfg_num = XWIDGET_MFG_NUM(widget_id);
-
 		/* TBD: link reset */
 	} else {
-
 		hwid->part_num = XWIDGET_PART_NUM_NONE;
 		hwid->rev_num = XWIDGET_REV_NUM_NONE;
 		hwid->mfg_num = XWIDGET_MFG_NUM_NONE;
 	}
 }
 
+static void
+tio_widget_init( vertex_hdl_t	hubv,
+		xwidgetnum_t	widgetnum,
+		struct xwidget_hwid_s   *hwid)
+{
+	xwidgetnum_t		hub_widgetid;
+	vertex_hdl_t		widgetv;
+	cnodeid_t		cnode;
+	nasid_t			nasid;
+	hubinfo_t		hubinfo;
+	/*REFERENCED*/
+	int			rc;
+	char 			pathname[128];
+	lboard_t		*board = NULL, dummy;
+	char			buffer[16];
+	slotid_t get_widget_slotnum(int xbow, int widget);
+	
+	DBG("\nio_xswitch_widget_init: hubv 0x%p, widgetnum 0x%x\n", hubv, widgetnum);
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+	cnode = NASID_TO_COMPACT_NODEID(nasid);
+	hub_widgetid = hubinfo->h_widgetid;
+
+
+	board = find_lboard_class(
+			(lboard_t *)KL_CONFIG_INFO(nasid),
+			KLCLASS_IOBRICK);
+	while (board->brd_nasid != nasid) {
+		board = KLCF_NEXT(board);
+		if (!board) break;
+		board = find_lboard_class(board, KLCLASS_IOBRICK);
+	}
+
+	if (board) {
+		DBG("tio_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
+	} else {
+		DBG("tio_widget_init: FIXME did not find IOBOARD\n");
+		board = &dummy;
+	}
+
+
+	/* Copy over the nodes' geoid info */
+	{
+		lboard_t *brd;
+
+		brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_TIO);
+		if ( brd != (lboard_t *)0 ) {
+			board->brd_geoid = brd->brd_geoid;
+		}
+	}
+
+	/*
+ 	 * Make sure we really want to say xbrick, pbrick,
+	 * etc. rather than XIO, graphics, etc.
+ 	 */
+
+	memset(buffer, 0, 16);
+	format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
+
+	sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d",
+		buffer,
+		geo_slab(board->brd_geoid),
+		(board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK :
+		(board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK :
+		(board->brd_type == KLTYPE_CGBRICK) ? EDGE_LBL_CGBRICK :
+		(board->brd_type == KLTYPE_OPUSBRICK) ? EDGE_LBL_OPUSBRICK :
+		(board->brd_type == KLTYPE_BUBRICK) ? EDGE_LBL_BUBRICK :
+		(board->brd_type == KLTYPE_IABRICK) ? EDGE_LBL_IABRICK :
+		(board->brd_type == KLTYPE_PABRICK) ? EDGE_LBL_PABRICK : "?brick",
+		EDGE_LBL_CORETALK, widgetnum);
+		
+	DBG("tio_widget_init: path= %s\n", pathname);
+	rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
+		
+	ASSERT(rc == GRAPH_SUCCESS);
+
+	(void)xwidget_register(hwid, widgetv, widgetnum,
+			       hubv, hub_widgetid);
+
+}
+
+
+
 /*
  * io_xswitch_widget_init
  *	
  */
 
-void
+static void
 io_xswitch_widget_init(vertex_hdl_t  	xswitchv,
 		       vertex_hdl_t	hubv,
 		       xwidgetnum_t	widgetnum)
@@ -424,11 +463,10 @@
 		sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d",
 			buffer,
 			geo_slab(board->brd_geoid),
-			(board->brd_type == KLTYPE_IBRICK) ? EDGE_LBL_IBRICK :
-			(board->brd_type == KLTYPE_PBRICK) ? EDGE_LBL_PBRICK :
 			(board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK :
 			(board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK :
-			(board->brd_type == KLTYPE_XBRICK) ? EDGE_LBL_XBRICK : "?brick",
+			(board->brd_type == KLTYPE_CGBRICK) ? EDGE_LBL_CGBRICK :
+			(board->brd_type == KLTYPE_OPUSBRICK) ? EDGE_LBL_OPUSBRICK : "?brick",
 			EDGE_LBL_XTALK, widgetnum);
 		
 		DBG("io_xswitch_widget_init: path= %s\n", pathname);
@@ -461,12 +499,10 @@
 		hwid.part_num = XWIDGET_PART_NUM(widget_id);
 		hwid.rev_num = XWIDGET_REV_NUM(widget_id);
 		hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
-
 		(void)xwidget_register(&hwid, widgetv, widgetnum,
 				       hubv, hub_widgetid);
 
 		ia64_sn_sysctl_iobrick_module_get(nasid, &io_module);
-
 		if (io_module >= 0) {
 			char			buffer[16];
 			vertex_hdl_t		to, from;
@@ -477,8 +513,8 @@
 			memset(buffer, 0, 16);
 			format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
 
-			if ( islower(MODULE_GET_BTCHAR(io_module)) ) {
-				bt = toupper(MODULE_GET_BTCHAR(io_module));
+			if ( isupper(MODULE_GET_BTCHAR(io_module)) ) {
+				bt = tolower(MODULE_GET_BTCHAR(io_module));
 			}
 			else {
 				bt = MODULE_GET_BTCHAR(io_module);
@@ -497,24 +533,24 @@
 				EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/"
 				"0",
 				buffer, geo_slab(board->brd_geoid));
+			DBG("io_xswitch_widget_init: FROM path '%s'\n", pathname);
+
 			from = hwgraph_path_to_vertex(pathname);
 			ASSERT_ALWAYS(from);
+
 			sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
 				EDGE_LBL_SLAB "/%d/"
 				"%s",
 				buffer, geo_slab(board->brd_geoid), brick_name);
-
+			DBG("io_xswitch_widget_init: TO path '%s'\n", pathname);
 			to = hwgraph_path_to_vertex(pathname);
 			ASSERT_ALWAYS(to);
 			rc = hwgraph_edge_add(from, to,
 				EDGE_LBL_INTERCONNECT);
-			if (rc == -EEXIST)
-				goto link_done;
-			if (rc != GRAPH_SUCCESS) {
+			if (rc != -EEXIST && rc != GRAPH_SUCCESS) {
 				printk("%s: Unable to establish link"
 					" for xbmon.", pathname);
 			}
-link_done:
 		}
 
 #ifdef	SN0_USE_BTE
@@ -603,6 +639,110 @@
 	}
 }
 
+
+
+/*
+ * Initialize all I/O on the specified TIO node.
+ */
+static void
+tio_init_node(cnodeid_t cnodeid)
+{
+	/*REFERENCED*/
+	vertex_hdl_t hubv, switchv, widgetv[3], coreletv;
+	struct xwidget_hwid_s hwid[3];
+	nodepda_t	*npdap;
+	uint32_t	cp0_partnum, cp1_partnum, ca_partnum;
+	int32_t		cp0_id, cp1_id, ca_id;
+	int		i;
+
+	npdap = NODEPDA(cnodeid);
+
+	/*
+	 * Get the "top" vertex for this node's hardware
+	 * graph; it will carry the per-hub hub-specific
+	 * data, and act as the coretalk provider master.
+	 * It's canonical path is probably something of the
+	 * form /hw/module/%M/slot/%d/node
+	 */
+	hubv = cnodeid_to_vertex(cnodeid);
+	DBG("tio_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);
+
+	ASSERT(hubv != GRAPH_VERTEX_NONE);
+
+	/* 
+	 * attach our hub_provider information to hubv,
+	 * so we can use it as a crosstalk provider "master"
+	 * vertex.
+	 */
+	xtalk_provider_register(hubv, &tio_provider);
+	xtalk_provider_startup(hubv);
+
+	/*
+	 * Create a vertex to represent the coretalk bus
+	 * attached to this hub, and a vertex to be used
+	 * as the connect point for whatever is out there
+	 * on the other side of our coretalk connection.
+	 *
+	 *
+	 * Of course, the edges and verticies may already
+	 * exist, in which case our net effect is just to
+	 * associate the "coretalk_" driver with the connection
+	 * point for the device.
+	 */
+
+	(void)hwgraph_path_add(hubv, EDGE_LBL_CORETALK, &switchv);
+
+	DBG("tio_init_node: Created 'coretalk' entry to '../node/' coretalk vertex 0x%p\n", switchv);
+
+	ASSERT(switchv != GRAPH_VERTEX_NONE);
+
+	(void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);
+
+	DBG("tio_init_node: Created symlink 'io' from ../node/io to ../node/coretalk \n");
+
+	/*
+	 * We need to find the widget id and update the basew_id field.
+	 * We do this for the two cps and the ca.
+	 */
+
+	cp0_id = *(volatile int32_t *)(TIO_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID);
+	cp0_partnum = hwid[0].part_num = (cp0_id & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
+	hwid[0].mfg_num = (cp0_id & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
+	hwid[0].rev_num = (cp0_id & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT;
+
+	cp1_id = *(volatile int32_t *)(TIO_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 1) + WIDGET_ID);
+	cp1_partnum = hwid[1].part_num = (cp1_id & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
+	hwid[1].mfg_num = (cp1_id & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
+	hwid[1].rev_num = (cp1_id & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT;
+
+	ca_id = *(volatile int32_t *)(TIO_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 2) + WIDGET_ID);
+	ca_partnum = hwid[2].part_num = (ca_id & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
+	hwid[2].mfg_num = (ca_id & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
+	hwid[2].rev_num = (ca_id & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT;
+
+	for (i = 0; i < TIO_WIDGET_ID_MAX; i++) {
+		npdap->tio_basew_id[i] = (((*(volatile int32_t *)(TIO_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), i) + WIDGET_CONTROL))) & CORELET_ID);
+	}
+
+	(void)hwgraph_path_add(switchv, EDGE_LBL_CORELET, &coreletv);
+	ASSERT(coreletv != GRAPH_VERTEX_NONE);
+	for (i = 0; i < TIO_WIDGET_ID_MAX; i++) {
+		char widname[10];
+		sprintf(widname, "%x", i);
+		(void)hwgraph_path_add(coreletv, widname, &widgetv[i]);
+		DBG("tio_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv[i]);
+		ASSERT(widgetv[i] != GRAPH_VERTEX_NONE);
+		nodepda->tio_basew_xc[i] = widgetv[i];
+	}
+	
+	for (i = 0; i < TIO_WIDGET_ID_MAX; i++) {
+		(void)tio_widget_init(hubv, i, &hwid[i]);
+	}
+
+	DBG("\ntio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
+}
+
+
 /*
  * Initialize all I/O on the specified node.
  */
@@ -617,7 +757,6 @@
 	nodepda_t	*npdap;
 	struct semaphore *peer_sema = 0;
 	uint32_t	widget_partnum;
-	cpu_cookie_t	c = 0;
 
 	npdap = NODEPDA(cnodeid);
 
@@ -697,13 +836,7 @@
 
 	widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
 
-	if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
-				widget_partnum == XBRIDGE_WIDGET_PART_NUM){
-		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
-
-		DBG("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);
-
-	} else if ((widget_partnum == XBOW_WIDGET_PART_NUM) ||
+	if ((widget_partnum == XBOW_WIDGET_PART_NUM) ||
 			(widget_partnum == XXBOW_WIDGET_PART_NUM) ||
 			(widget_partnum == PXBOW_WIDGET_PART_NUM) ) {
 		/* 
@@ -714,7 +847,9 @@
 		npdap->basew_id = 0;
 
 	} else {
-		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
+		pci_bridge_t	*bridge;
+		bridge = (pci_bridge_t *)NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0);
+		npdap->basew_id = pcireg_control_get(bridge) & WIDGET_WIDGET_ID;
 
 		panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
 
@@ -763,12 +898,15 @@
 		 *	5) Initialize all xwidgets on the xswitch
 		 */
 
+		DBG("call volunteer_for_widgets\n");
+
 		volunteer_for_widgets(switchv, hubv);
 
 		/* If there's someone else on this crossbow, recognize him */
 		if (npdap->xbow_peer != INVALID_NASID) {
 			nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
 			peer_sema = &peer_npdap->xbow_sema;
+			DBG("call volunteer_for_widgets again\n");
 			volunteer_for_widgets(switchv, peer_npdap->node_vertex);
 		}
 
@@ -776,13 +914,13 @@
 
 		/* Signal that we're done */
 		if (peer_sema) {
-			mutex_unlock(peer_sema);
+			up(peer_sema);
 		}
 		
 	}
 	else {
 	    /* Wait 'til master is done assigning widgets. */
-	    mutex_lock(&npdap->xbow_sema);
+	    down(&npdap->xbow_sema);
 	}
 
 #ifdef PROBE_TEST
@@ -800,8 +938,8 @@
 	DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
 }
 
+
 #include <asm/sn/ioerror_handling.h>
-/* #endif */
 
 /*
  * Initialize all I/O devices.  Starting closest to nodes, probe and
@@ -810,75 +948,56 @@
 void
 init_all_devices(void)
 {
-	/* Governor on init threads..bump up when safe 
-	 * (beware many devfs races) 
-	 */
 	cnodeid_t cnodeid, active;
 
 	active = 0;
-	for (cnodeid = 0; cnodeid < numnodes; cnodeid++) {
+	for (cnodeid = 0; cnodeid < numionodes; cnodeid++) {
                 DBG("init_all_devices: Calling io_init_node() for cnode %d\n", cnodeid);
-                io_init_node(cnodeid);
+		if (COMPACT_TO_NASID_NODEID(cnodeid) & 0x1) {
+			tio_init_node(cnodeid);
+		} else {
+                	io_init_node(cnodeid);
+		}
 
 		DBG("init_all_devices: Done io_init_node() for cnode %d\n", cnodeid);
 	}
 
-	for (cnodeid = 0; cnodeid < numnodes; cnodeid++)
+	for (cnodeid = 0; cnodeid < numnodes; cnodeid++) {
 		/*
 	 	 * Update information generated by IO init.
 		 */
 		update_node_information(cnodeid);
-
-#if HWG_PRINT
-	hwgraph_print();
-#endif
-
+	}
 }
 
-#define toint(x) ((int)(x) - (int)('0'))
-
 static
 struct io_brick_map_s io_brick_tab[] = {
 
-/* Ibrick widget number to PCI bus number map */
- {      MODULE_IBRICK,                          /* Ibrick type    */ 
+/* PXbrick widget number to PCI bus number map */
+ {      MODULE_PXBRICK,                         /* PXbrick type   */ 
     /*  PCI Bus #                                  Widget #       */
     {   0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7      */
         0,                                      /* 0x8            */
         0,                                      /* 0x9            */
         0, 0,                                   /* 0xa - 0xb      */
-        0,                                      /* 0xc            */
-        0,                                      /* 0xd            */
-        2,                                      /* 0xe            */
-        1                                       /* 0xf            */
-     }
- },
-
-/* Pbrick widget number to PCI bus number map */
- {      MODULE_PBRICK,                          /* Pbrick type    */ 
-    /*  PCI Bus #                                  Widget #       */
-    {   0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7      */
-        2,                                      /* 0x8            */
-        1,                                      /* 0x9            */
-        0, 0,                                   /* 0xa - 0xb      */
-        4,                                      /* 0xc            */
-        6,                                      /* 0xd            */
-        3,                                      /* 0xe            */
-        5                                       /* 0xf            */
+        1,                                      /* 0xc            */
+        5,                                      /* 0xd            */
+        0,                                      /* 0xe            */
+        3                                       /* 0xf            */
     }
  },
 
-/* PXbrick widget number to PCI bus number map */
- {      MODULE_PXBRICK,                         /* PXbrick type   */ 
+/* OPUSbrick widget number to PCI bus number map */
+ {      MODULE_OPUSBRICK,                       /* OPUSbrick type */ 
     /*  PCI Bus #                                  Widget #       */
     {   0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7      */
         0,                                      /* 0x8            */
         0,                                      /* 0x9            */
         0, 0,                                   /* 0xa - 0xb      */
-        1,                                      /* 0xc            */
-        5,                                      /* 0xd            */
+        0,                                      /* 0xc            */
+        0,                                      /* 0xd            */
         0,                                      /* 0xe            */
-        3                                       /* 0xf            */
+        1                                       /* 0xf            */
     }
  },
 
@@ -896,23 +1015,48 @@
     }
  },
 
-/* Xbrick widget to XIO slot map */
- {      MODULE_XBRICK,                          /* Xbrick type    */ 
-    /*  XIO Slot #                                 Widget #       */
+/* CG brick widget number to PCI bus number map */
+ {      MODULE_CGBRICK,				/* CG brick       */
+    /*  PCI Bus #                                  Widget #       */
     {   0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7      */
-        1,                                      /* 0x8            */
-        3,                                      /* 0x9            */
-        0, 0,                                   /* 0xa - 0xb      */
-        2,                                      /* 0xc            */
-        4,                                      /* 0xd            */
+        0,                                      /* 0x8            */
+        0,                                      /* 0x9            */
+        0, 1,                                   /* 0xa - 0xb      */
+        0,                                      /* 0xc            */
+        0,                                      /* 0xd            */
         0,                                      /* 0xe            */
         0                                       /* 0xf            */
+     }
+ },
+/* IAbrick corelet number to PCI bus number map */
+ {	MODULE_IABRICK,				/* IA brick       */
+    /*  PCI Bus #                                  corelet #	  */
+    {   1, 					/* 0x0		  */
+	2,					/* 0x1		  */
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0
+    }
+ },
+/* PAbrick corelet number to PCI bus number map */
+ {	MODULE_PABRICK,				/* PA brick       */
+    /*  PCI Bus #                                  corelet #	  */
+    {   1, 					/* 0x0		  */
+	2,					/* 0x1		  */
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0
+    }
+ },
+/* TIO BringUp brick corelet number to PCI bus number map */
+ {	MODULE_BUBRICK,				/* BU brick       */
+    /*  PCI Bus #                                  corelet #	  */
+    {   2, 					/* 0x0		  */
+	1,					/* 0x1		  */
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0
     }
  }
 };
 
 /*
- * Use the brick's type to map a widget number to a meaningful int
+ * Use the brick's type to map a widget (or corelet) number to a 
+ * meaningful int
  */
 int
 io_brick_map_widget(int brick_type, int widget_num)
@@ -929,5 +1073,4 @@
         }
 
         return 0;
-
 }
diff -Nru a/arch/ia64/sn/io/sn2/module.c b/arch/ia64/sn/io/sn2/module.c
--- a/arch/ia64/sn/io/sn2/module.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/module.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -9,14 +8,14 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/io.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/klconfig.h>
 #include <asm/sn/module.h>
 #include <asm/sn/pci/pcibr.h>
@@ -25,7 +24,7 @@
 #include <asm/sn/sn_cpuid.h>
 
 
-/* #define LDEBUG	1  */
+/* #define LDEBUG	1 */
 
 #ifdef LDEBUG
 #define DPRINTF		printk
@@ -40,40 +39,8 @@
 #define SN00_SERIAL_FUDGE	0x3b1af409d513c2
 #define SN0_SERIAL_FUDGE	0x6e
 
-void
-encode_int_serial(uint64_t src,uint64_t *dest)
-{
-    uint64_t val;
-    int i;
-
-    val = src + SN00_SERIAL_FUDGE;
-
-
-    for (i = 0; i < sizeof(long long); i++) {
-	((char*)dest)[i] =
-	    ((char*)&val)[sizeof(long long)/2 +
-			 ((i%2) ? ((i/2 * -1) - 1) : (i/2))];
-    }
-}
-
-
-void
-decode_int_serial(uint64_t src, uint64_t *dest)
-{
-    uint64_t val;
-    int i;
-
-    for (i = 0; i < sizeof(long long); i++) {
-	((char*)&val)[sizeof(long long)/2 +
-		     ((i%2) ? ((i/2 * -1) - 1) : (i/2))] =
-	    ((char*)&src)[i];
-    }
-
-    *dest = val - SN00_SERIAL_FUDGE;
-}
-
 
-void
+static void __init
 encode_str_serial(const char *src, char *dest)
 {
     int i;
@@ -86,20 +53,7 @@
     }
 }
 
-void
-decode_str_serial(const char *src, char *dest)
-{
-    int i;
-
-    for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
-	dest[MAX_SERIAL_NUM_SIZE/2 +
-	    ((i%2) ? ((i/2 * -1) - 1) : (i/2))] = src[i] -
-	    SN0_SERIAL_FUDGE;
-    }
-}
-
-
-module_t *module_lookup(moduleid_t id)
+module_t * __init module_lookup(moduleid_t id)
 {
     int			i;
 
@@ -122,27 +76,33 @@
  *   The node number is added to the list of nodes in the module.
  */
 
-module_t *module_add_node(geoid_t geoid, cnodeid_t cnodeid)
+static module_t * __init
+module_add_node(geoid_t geoid, cnodeid_t cnodeid)
 {
     module_t	       *m;
     int			i;
     char		buffer[16];
     moduleid_t		moduleid;
+    slabid_t		slab_number;
 
     memset(buffer, 0, 16);
     moduleid = geo_module(geoid);
     format_module_id(buffer, moduleid, MODULE_FORMAT_BRIEF);
-    DPRINTF("module_add_node: moduleid=%s node=%d\n", buffer, cnodeid);
+    DPRINTF("module_add_node: moduleid=%s node=%d ", buffer, cnodeid);
 
     if ((m = module_lookup(moduleid)) == 0) {
 	m = kmalloc(sizeof (module_t), GFP_KERNEL);
-	memset(m, 0 , sizeof(module_t));
 	ASSERT_ALWAYS(m);
+	memset(m, 0 , sizeof(module_t));
+
+	for (slab_number = 0; slab_number <= MAX_SLABS; slab_number++) {
+		m->nodes[slab_number] = -1;
+	}
 
 	m->id = moduleid;
 	spin_lock_init(&m->lock);
 
-	mutex_init_locked(&m->thdcnt);
+	init_MUTEX_LOCKED(&m->thdcnt);
 
 	/* Insert in sorted order by module number */
 
@@ -153,16 +113,24 @@
 	nummodules++;
     }
 
-    m->nodes[m->nodecnt] = cnodeid;
-    m->geoid[m->nodecnt] = geoid;
-    m->nodecnt++;
+    /*
+     * Save this information in the correct slab number of the node in the 
+     * module.
+     */
+    slab_number = geo_slab(geoid);
+    DPRINTF("slab number added 0x%x\n", slab_number);
+
+    if (m->nodes[slab_number] != -1)
+	panic("module_add_node .. slab previously found\n");
 
-    DPRINTF("module_add_node: module %s now has %d nodes\n", buffer, m->nodecnt);
+    m->nodes[slab_number] = cnodeid;
+    m->geoid[slab_number] = geoid;
 
     return m;
 }
 
-int module_probe_snum(module_t *m, nasid_t nasid)
+static int __init
+module_probe_snum(module_t *m, nasid_t host_nasid, nasid_t nasid)
 {
     lboard_t	       *board;
     klmod_serial_num_t *comp;
@@ -171,7 +139,7 @@
     /*
      * record brick serial number
      */
-    board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
+    board = find_lboard((lboard_t *) KL_CONFIG_INFO(host_nasid), KLTYPE_SNIA);
 
     if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
     {
@@ -214,10 +182,9 @@
 #endif
 
 	    if (comp->snum.snum_str[0] != '\0') {
-		memcpy(m->sys_snum,
-		       comp->snum.snum_str,
-		       MAX_SERIAL_NUM_SIZE);
-		m->sys_snum_valid = 1;
+		    memcpy(m->sys_snum, comp->snum.snum_str,
+			   MAX_SERIAL_NUM_SIZE);
+		    m->sys_snum_valid = 1;
 	    }
     }
 
@@ -230,7 +197,7 @@
     }
 }
 
-void
+void __init
 io_module_init(void)
 {
     cnodeid_t		node;
@@ -243,21 +210,47 @@
 
     nserial = 0;
 
+    /*
+     * First pass just scan for compute node boards KLTYPE_SNIA.
+     * We do not support memoryless compute nodes.
+     */
     for (node = 0; node < numnodes; node++) {
 	nasid = COMPACT_TO_NASID_NODEID(node);
-
 	board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
 	ASSERT(board);
 
-	m = module_add_node(board->brd_geoid, node);
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, NULL, NULL, "Found Shub lboard 0x%lx nasid 0x%x cnode 0x%x \n", (unsigned long)board, (int)nasid, (int)node));
 
-	if (! m->snum_valid && module_probe_snum(m, nasid))
+	m = module_add_node(board->brd_geoid, node);
+	if (! m->snum_valid && module_probe_snum(m, nasid, nasid))
 	    nserial++;
     }
 
-    DPRINTF("********found total of %d serial numbers in the system\n",
-	    nserial);
+    /*
+     * Second scan, look for TIO's board hosted by compute nodes.
+     */
+    for (node = numnodes; node < numionodes; node++) {
+	nasid_t		tio_nasid;
+	cnodeid_t	tio_node;
+	char		serial_number[16];
 
-    if (nserial == 0)
-	DPRINTF(KERN_WARNING  "io_module_init: No serial number found.\n");
+        tio_nasid = COMPACT_TO_NASID_NODEID(node);
+        board = find_lboard((lboard_t *) KL_CONFIG_INFO(tio_nasid), KLTYPE_TIO);
+	ASSERT(board);
+	tio_node = NASID_TO_COMPACT_NODEID(tio_nasid);
+
+	HWGRAPH_DEBUG((__FILE__, __FUNCTION__, __LINE__, NULL, NULL, "Found TIO lboard 0x%lx node %d tio nasid %d tio cnode %d\n", (unsigned long)board, node, (int)tio_nasid, (int)tio_node));
+
+        m = module_add_node(board->brd_geoid, tio_node);
+
+	/*
+	 * Get and initialize the serial number of TIO.
+	 */
+	board_serial_number_get( board, serial_number );
+    	if( serial_number[0] != '\0' ) {
+        	encode_str_serial( serial_number, m->snum.snum_str );
+        	m->snum_valid = 1;
+		nserial++;
+	}
+    }
 }
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/Makefile b/arch/ia64/sn/io/sn2/pcibr/Makefile
--- a/arch/ia64/sn/io/sn2/pcibr/Makefile	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/Makefile	Thu Nov  6 13:42:35 2003
@@ -9,7 +9,8 @@
 # Makefile for the sn2 specific pci bridge routines.
 #
 
-EXTRA_CFLAGS    := -DLITTLE_ENDIAN
-
-obj-y += pcibr_ate.o pcibr_config.o pcibr_dvr.o pcibr_hints.o pcibr_intr.o pcibr_rrb.o \
-	 pcibr_slot.o pcibr_error.o
+obj-y += pcibr_ate.o pcibr_config.o \
+	 pcibr_dvr.o pcibr_hints.o  \
+	 pcibr_intr.o pcibr_rrb.o   \
+	 pcibr_slot.o pcibr_error.o \
+	 pcibr_reg.o pcibr_msix_intr.o
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,137 +7,22 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-#ifndef LOCAL
-#define LOCAL           static
-#endif
 
 /*
  * functions
  */
-int               pcibr_init_ext_ate_ram(bridge_t *);
-int               pcibr_ate_alloc(pcibr_soft_t, int);
-void              pcibr_ate_free(pcibr_soft_t, int, int);
-bridge_ate_t      pcibr_flags_to_ate(unsigned);
-bridge_ate_p      pcibr_ate_addr(pcibr_soft_t, int);
-unsigned 	  ate_freeze(pcibr_dmamap_t pcibr_dmamap,
-#if PCIBR_FREEZE_TIME
-	   			unsigned *freeze_time_ptr,
-#endif
-	   			unsigned *cmd_regs);
-void 	  ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
-void ate_thaw(pcibr_dmamap_t pcibr_dmamap,
-	 			int ate_index,
-#if PCIBR_FREEZE_TIME
-	 			bridge_ate_t ate,
-	 			int ate_total,
-	 			unsigned freeze_time_start,
-#endif
-	 			unsigned *cmd_regs,
-	 			unsigned s);
-
+int		pcibr_ate_alloc(pcibr_soft_t, int);
+void		pcibr_ate_free(pcibr_soft_t, int, int);
+bridge_ate_t	pcibr_flags_to_ate(pcibr_soft_t, unsigned);
+bridge_ate_p	pcibr_ate_addr(pcibr_soft_t, int);
+void		ate_write(pcibr_soft_t, pcibr_dmamap_t, int, int, bridge_ate_t);
 
-/* Convert from ssram_bits in control register to number of SSRAM entries */
-#define ATE_NUM_ENTRIES(n) _ate_info[n]
-
-/* Possible choices for number of ATE entries in Bridge's SSRAM */
-LOCAL int               _ate_info[] =
-{
-    0,					/* 0 entries */
-    8 * 1024,				/* 8K entries */
-    16 * 1024,				/* 16K entries */
-    64 * 1024				/* 64K entries */
-};
-
-#define ATE_NUM_SIZES (sizeof(_ate_info) / sizeof(int))
-#define ATE_PROBE_VALUE 0x0123456789abcdefULL
-
-/*
- * Determine the size of this bridge's external mapping SSRAM, and set
- * the control register appropriately to reflect this size, and initialize
- * the external SSRAM.
- */
-int
-pcibr_init_ext_ate_ram(bridge_t *bridge)
-{
-    int                     largest_working_size = 0;
-    int                     num_entries, entry;
-    int                     i, j;
-    bridgereg_t             old_enable, new_enable;
-    int                     s;
-
-    /* Probe SSRAM to determine its size. */
-    old_enable = bridge->b_int_enable;
-    new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
-    bridge->b_int_enable = new_enable;
-
-    for (i = 1; i < ATE_NUM_SIZES; i++) {
-	/* Try writing a value */
-	bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
-
-	/* Guard against wrap */
-	for (j = 1; j < i; j++)
-	    bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
-
-	/* See if value was written */
-	if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
-				largest_working_size = i;
-    }
-    bridge->b_int_enable = old_enable;
-    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-
-    /*
-     * ensure that we write and read without any interruption.
-     * The read following the write is required for the Bridge war
-     */
-
-    s = splhi();
-    bridge->b_wid_control = (bridge->b_wid_control
-			& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
-			| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
-    bridge->b_wid_control;		/* inval addr bug war */
-    splx(s);
-
-    num_entries = ATE_NUM_ENTRIES(largest_working_size);
-
-    if (pcibr_debug_mask & PCIBR_DEBUG_ATE) {
-	if (num_entries) {
-	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
-			"bridge at 0x%x: clearing %d external ATEs\n",
-			bridge, num_entries));
-	} else {
-	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
-			"bridge at 0x%x: no external ATE RAM found\n",
-			bridge));
-	}
-    }
-
-    /* Initialize external mapping entries */
-    for (entry = 0; entry < num_entries; entry++)
-	bridge->b_ext_ate_ram[entry] = 0;
-
-    return (num_entries);
-}
 
 /*
  * Allocate "count" contiguous Bridge Address Translation Entries
@@ -154,24 +38,15 @@
     int			    status = 0;
     struct resource	    *new_res;
     struct resource         **allocated_res;
+    unsigned long	    flag;
 
     new_res = (struct resource *) kmalloc( sizeof(struct resource), GFP_ATOMIC);
     memset(new_res, 0, sizeof(*new_res));
+    flag = pcibr_lock(pcibr_soft);
     status = allocate_resource( &pcibr_soft->bs_int_ate_resource, new_res,
 				count, pcibr_soft->bs_int_ate_resource.start, 
 				pcibr_soft->bs_int_ate_resource.end, 1,
 				NULL, NULL);
-
-    if ( status && (pcibr_soft->bs_ext_ate_resource.end != 0) ) {
-	status = allocate_resource( &pcibr_soft->bs_ext_ate_resource, new_res,
-				count, pcibr_soft->bs_ext_ate_resource.start,
-				pcibr_soft->bs_ext_ate_resource.end, 1,
-				NULL, NULL);
-	if (status) {
-		new_res->start = -1;
-	}
-    }
-
     if (status) {
 	/* Failed to allocate */
 	kfree(new_res);
@@ -181,24 +56,30 @@
     /* Save the resource for freeing */
     allocated_res = (struct resource **)(((unsigned long)pcibr_soft->bs_allocated_ate_res) + new_res->start * sizeof( unsigned long));
     *allocated_res = new_res;
+    pcibr_unlock(pcibr_soft, flag);
 
     return new_res->start;
 }
 
 void
 pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
-/* Who says there's no such thing as a free meal? :-) */
 {
 
     struct resource **allocated_res;
+    struct resource *res;
     int status = 0;
+    unsigned long flags;
 
+    flags = pcibr_lock(pcibr_soft);
     allocated_res = (struct resource **)(((unsigned long)pcibr_soft->bs_allocated_ate_res) + index * sizeof(unsigned long));
+    res = *allocated_res;
+    *allocated_res = NULL;
 
-    status = release_resource(*allocated_res);
+    status = release_resource(res);
+    pcibr_unlock(pcibr_soft, flags);
     if (status)
 	BUG(); /* Ouch .. */
-    kfree(*allocated_res);
+    kfree(res);
 
 }
 
@@ -207,52 +88,59 @@
  * into Bridge-specific Address Translation Entry attribute bits.
  */
 bridge_ate_t
-pcibr_flags_to_ate(unsigned flags)
+pcibr_flags_to_ate(pcibr_soft_t pcibr_soft, unsigned flags)
 {
-    bridge_ate_t            attributes;
+    bridge_ate_t	attributes = PCIBR_ATE_V;
 
     /* default if nothing specified:
      * NOBARRIER
      * NOPREFETCH
      * NOPRECISE
-     * COHERENT
+     * COHERENT		* PIC ONLY *
      * Plus the valid bit
      */
-    attributes = ATE_CO | ATE_V;
+    if (IS_PIC_SOFT(pcibr_soft)) {
+	attributes |= PIC_ATE_CO;
+    }
 
     /* Generic macro flags
      */
     if (flags & PCIIO_DMA_DATA) {	/* standard data channel */
-	attributes &= ~ATE_BAR;		/* no barrier */
-	attributes |= ATE_PREF;		/* prefetch on */
+	attributes &= ~PCIBR_ATE_BAR;	/* no barrier */
+	attributes |= PCIBR_ATE_PREF;	/* prefetch on */
     }
     if (flags & PCIIO_DMA_CMD) {	/* standard command channel */
-	attributes |= ATE_BAR;		/* barrier bit on */
-	attributes &= ~ATE_PREF;	/* disable prefetch */
+	attributes |= PCIBR_ATE_BAR;	/* barrier bit on */
+	attributes &= ~PCIBR_ATE_PREF;	/* disable prefetch */
     }
     /* Generic detail flags
      */
     if (flags & PCIIO_PREFETCH)
-	attributes |= ATE_PREF;
+	attributes |= PCIBR_ATE_PREF;
     if (flags & PCIIO_NOPREFETCH)
-	attributes &= ~ATE_PREF;
+	attributes &= ~PCIBR_ATE_PREF;
 
     /* Provider-specific flags
      */
     if (flags & PCIBR_BARRIER)
-	attributes |= ATE_BAR;
+	attributes |= PCIBR_ATE_BAR;
     if (flags & PCIBR_NOBARRIER)
-	attributes &= ~ATE_BAR;
+	attributes &= ~PCIBR_ATE_BAR;
 
     if (flags & PCIBR_PREFETCH)
-	attributes |= ATE_PREF;
+	attributes |= PCIBR_ATE_PREF;
     if (flags & PCIBR_NOPREFETCH)
-	attributes &= ~ATE_PREF;
+	attributes &= ~PCIBR_ATE_PREF;
 
     if (flags & PCIBR_PRECISE)
-	attributes |= ATE_PREC;
+	attributes |= PCIBR_ATE_PREC;
     if (flags & PCIBR_NOPRECISE)
-	attributes &= ~ATE_PREC;
+	attributes &= ~PCIBR_ATE_PREC;
+
+    /* In PCI-X mode, Prefetch & Precise not supported */
+    if (IS_PCIX(pcibr_soft)) {
+	attributes &= ~(PCIBR_ATE_PREC | PCIBR_ATE_PREF);
+    }
 
     return (attributes);
 }
@@ -262,220 +150,31 @@
  * internal maps or the external map RAM, as appropriate.
  */
 bridge_ate_p
-pcibr_ate_addr(pcibr_soft_t pcibr_soft,
-	       int ate_index)
+pcibr_ate_addr(pcibr_soft_t pcibr_soft, int ate_index)
 {
-    bridge_t *bridge = pcibr_soft->bs_base;
-
-    return (ate_index < pcibr_soft->bs_int_ate_size)
-	? &(bridge->b_int_ate_ram[ate_index].wr)
-	: &(bridge->b_ext_ate_ram[ate_index]);
-}
-
-/* We are starting to get more complexity
- * surrounding writing ATEs, so pull
- * the writing code into this new function.
- */
-
-#if PCIBR_FREEZE_TIME
-#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
-#else
-#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, cmd_regs)
-#endif
-
-unsigned
-ate_freeze(pcibr_dmamap_t pcibr_dmamap,
-#if PCIBR_FREEZE_TIME
-	   unsigned *freeze_time_ptr,
-#endif
-	   unsigned *cmd_regs)
-{
-    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
-#ifdef LATER
-    int                     dma_slot = pcibr_dmamap->bd_slot;
-#endif
-    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
-    int                     slot;
-
-    unsigned long           s;
-    unsigned                cmd_reg;
-    volatile unsigned      *cmd_lwa;
-    unsigned                cmd_lwd;
-
-    if (!ext_ates)
-	return 0;
-
-    /* Bridge Hardware Bug WAR #484930:
-     * Bridge can't handle updating External ATEs
-     * while DMA is occurring that uses External ATEs,
-     * even if the particular ATEs involved are disjoint.
-     */
-
-    /* need to prevent anyone else from
-     * unfreezing the grant while we
-     * are working; also need to prevent
-     * this thread from being interrupted
-     * to keep PCI grant freeze time
-     * at an absolute minimum.
-     */
-    s = pcibr_lock(pcibr_soft);
-
-#ifdef LATER
-    /* just in case pcibr_dmamap_done was not called */
-    if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
-	pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
-	if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
-	    atomic_dec(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
-	xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
+    if (ate_index < pcibr_soft->bs_int_ate_size) {
+	return (pcireg_int_ate_addr(pcibr_soft, ate_index));
+    } else {
+	panic("pcibr_ate_addr(): invalid ate_index 0x%x", ate_index);
     }
-#endif	/* LATER */
-#if PCIBR_FREEZE_TIME
-    *freeze_time_ptr = get_timestamp();
-#endif
-
-    cmd_lwa = 0;
-    for (slot = pcibr_soft->bs_min_slot; 
-		slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
-	    cmd_reg = pcibr_soft->
-		bs_slot[slot].
-		bss_cmd_shadow;
-	    if (cmd_reg & PCI_CMD_BUS_MASTER) {
-		cmd_lwa = pcibr_soft->
-		    bs_slot[slot].
-		    bss_cmd_pointer;
-		cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
-		cmd_lwa[0] = cmd_lwd;
-	    }
-	    cmd_regs[slot] = cmd_reg;
-	} else
-	    cmd_regs[slot] = 0;
-
-    if (cmd_lwa) {
-	    bridge_t	*bridge = pcibr_soft->bs_base;
-
-	    /* Read the last master bit that has been cleared. This PIO read
-	     * on the PCI bus is to ensure the completion of any DMAs that
-	     * are due to bus requests issued by PCI devices before the
-	     * clearing of master bits.
-	     */
-	    cmd_lwa[0];
-
-	    /* Flush all the write buffers in the bridge */
-	    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
-		    if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
-			    /* Flush the write buffer associated with this
-			     * PCI device which might be using dma map RAM.
-			     */
-			bridge->b_wr_req_buf[slot].reg;
-		    }
-	    }
-    }
-    return s;
-}
-
-void
-ate_write(pcibr_soft_t pcibr_soft,
-	  bridge_ate_p ate_ptr,
-	  int ate_count,
-	  bridge_ate_t ate)
-{
-	if (IS_PIC_SOFT(pcibr_soft) ) {
-    		while (ate_count-- > 0) {
-			*ate_ptr++ = ate;
-			ate += IOPGSIZE;
-		}
-	}
-	else {
-		if (io_get_sh_swapper(NASID_GET(ate_ptr))) {
-    			while (ate_count-- > 0) {
-				*ate_ptr++ = __swab64(ate);
-				ate += IOPGSIZE;
-			}
-		}
-		else {
-    			while (ate_count-- > 0) {
-				*ate_ptr++ = ate;
-				ate += IOPGSIZE;
-			}
-		}
-	}
 }
 
-#if PCIBR_FREEZE_TIME
-#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
-#else
-#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
-#endif
-
+/*
+ * Write the ATE.
+ */
 void
-ate_thaw(pcibr_dmamap_t pcibr_dmamap,
-	 int ate_index,
-#if PCIBR_FREEZE_TIME
-	 bridge_ate_t ate,
-	 int ate_total,
-	 unsigned freeze_time_start,
-#endif
-	 unsigned *cmd_regs,
-	 unsigned s)
+ate_write(pcibr_soft_t pcibr_soft, pcibr_dmamap_t pcibr_dmamap, 
+	  int ate_index, int ate_count, bridge_ate_t ate)
 {
-    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
-    int                     dma_slot = pcibr_dmamap->bd_slot;
-    int                     slot;
-    bridge_t               *bridge = pcibr_soft->bs_base;
-    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
-
-    unsigned                cmd_reg;
-
-#if PCIBR_FREEZE_TIME
-    unsigned                freeze_time;
-    static unsigned         max_freeze_time = 0;
-    static unsigned         max_ate_total;
-#endif
-
-    if (!ext_ates)
-	return;
-
-    /* restore cmd regs */
-    for (slot = pcibr_soft->bs_min_slot; 
-		slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
-	if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER) {
-		if ( IS_PIC_SOFT(pcibr_soft) ) {
-			pcibr_slot_config_set(bridge, slot, PCI_CFG_COMMAND/4, cmd_reg);
-		}
-		else {
-			if (io_get_sh_swapper(NASID_GET(bridge))) {
-				bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = __swab32(cmd_reg);
-			}
-			else {
-//				BUG(); /* Does this really work if called when io_get_sh_swapper = 0? */
-//				bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = cmd_reg;
-				pcibr_slot_config_set(bridge, slot, PCI_CFG_COMMAND/4, cmd_reg);
-			}
-		}
+    while (ate_count-- > 0) {
+	if (ate_index < pcibr_soft->bs_int_ate_size) {
+	    pcireg_int_ate_set(pcibr_soft, ate_index, ate);
+	} else {
+	    panic("ate_write(): invalid ate_index 0x%x", ate_index);
 	}
+	ate_index++;
+	ate += IOPGSIZE;
     }
-    pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
-    atomic_inc(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
-
-#if PCIBR_FREEZE_TIME
-    freeze_time = get_timestamp() - freeze_time_start;
 
-    if ((max_freeze_time < freeze_time) ||
-	(max_ate_total < ate_total)) {
-	if (max_freeze_time < freeze_time)
-	    max_freeze_time = freeze_time;
-	if (max_ate_total < ate_total)
-	    max_ate_total = ate_total;
-	pcibr_unlock(pcibr_soft, s);
-	printk( "%s: pci freeze time %d usec for %d ATEs\n"
-		"\tfirst ate: %R\n",
-		pcibr_soft->bs_name,
-		freeze_time * 1000 / 1250,
-		ate_total,
-		ate, ate_bits);
-    } else
-#endif
-	pcibr_unlock(pcibr_soft, s);
+    pcireg_tflush_get(pcibr_soft);	/* wait until Bridge PIO complete */
 }
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,52 +7,30 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/byteorder/swab.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
 
 extern pcibr_info_t      pcibr_info_get(vertex_hdl_t);
+extern int pcibr_probe_slot(pci_bridge_t *, cfg_p, unsigned int *);
 
 uint64_t          pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
+uint64_t          pcibr_config_get_safe(vertex_hdl_t, unsigned, unsigned);
 uint64_t          do_pcibr_config_get(cfg_p, unsigned, unsigned);
 void              pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
-void       	  do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
+void              do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
 
 /*
- * on sn-ia we need to twiddle the the addresses going out
- * the pci bus because we use the unswizzled synergy space
- * (the alternative is to use the swizzled synergy space
- * and byte swap the data)
+ * on sn-ia we need to twiddle the the addresses going out the pci bus because
+ * we use the unswizzled synergy space  (the alternative is to use the swizzled
+ * synergy space and byte swap the data)
  */
-#define CB(b,r) (((volatile uint8_t *) b)[((r)^4)])
-#define CS(b,r) (((volatile uint16_t *) b)[((r^4)/2)])
-#define CW(b,r) (((volatile uint32_t *) b)[((r^4)/4)])
-
-#define	CBP(b,r) (((volatile uint8_t *) b)[(r)])
-#define	CSP(b,r) (((volatile uint16_t *) b)[((r)/2)])
-#define	CWP(b,r) (((volatile uint32_t *) b)[(r)/4])
-
-#define SCB(b,r) (((volatile uint8_t *) b)[((r)^3)])
-#define SCS(b,r) (((volatile uint16_t *) b)[((r^2)/2)])
-#define SCW(b,r) (((volatile uint32_t *) b)[((r)/4)])
+#define CB(b,r) (((volatile uint8_t *) b)[(r)])
+#define CS(b,r) (((volatile uint16_t *) b)[((r)/2)])
+#define CW(b,r) (((volatile uint32_t *) b)[(r)/4])
 
 /*
  * Return a config space address for given slot / func / offset.  Note the
@@ -61,22 +38,21 @@
  * the 32bit word that contains the "offset" byte.
  */
 cfg_p
-pcibr_func_config_addr(bridge_t *bridge, pciio_bus_t bus, pciio_slot_t slot, 
-					pciio_function_t func, int offset)
+pcibr_func_config_addr(pci_bridge_t *bridge, pciio_bus_t bus, 
+		       pciio_slot_t slot, pciio_function_t func, int offset)
 {
-	/*
-	 * Type 1 config space
-	 */
-	if (bus > 0) {
-		bridge->b_pci_cfg = ((bus << 16) | (slot << 11));
-		return &bridge->b_type1_cfg.f[func].l[(offset)];
-	}
+    /*
+     * Type 1 config space
+     */
+    if (bus > 0) {
+	pcireg_type1_cntr_set(bridge, ((bus << 16) | (slot << 11)));
+	return (pcireg_type1_cfg_addr(bridge, func, offset));
+    }
 
-	/*
-	 * Type 0 config space
-	 */
-	slot++;
-	return &bridge->b_type0_cfg_dev[slot].f[func].l[offset];
+    /*
+     * Type 0 config space.
+     */
+    return (pcireg_type0_cfg_addr(bridge, slot, func, offset));
 }
 
 /*
@@ -85,60 +61,60 @@
  * 32bit word that contains the "offset" byte.
  */
 cfg_p
-pcibr_slot_config_addr(bridge_t *bridge, pciio_slot_t slot, int offset)
+pcibr_slot_config_addr(pci_bridge_t *bridge, pciio_slot_t slot, int offset)
 {
-	return pcibr_func_config_addr(bridge, 0, slot, 0, offset);
+    return pcibr_func_config_addr(bridge, 0, slot, 0, offset);
 }
 
 /*
  * Return config space data for given slot / offset
  */
 unsigned
-pcibr_slot_config_get(bridge_t *bridge, pciio_slot_t slot, int offset)
+pcibr_slot_config_get(pci_bridge_t *bridge, pciio_slot_t slot, int offset)
 {
-	cfg_p  cfg_base;
+    cfg_p  cfg_base;
 	
-	cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
-	return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
+    cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
+    return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
 }
 
 /*
  * Return config space data for given slot / func / offset
  */
 unsigned
-pcibr_func_config_get(bridge_t *bridge, pciio_slot_t slot, 
+pcibr_func_config_get(pci_bridge_t *bridge, pciio_slot_t slot, 
 					pciio_function_t func, int offset)
 {
-	cfg_p  cfg_base;
+    cfg_p  cfg_base;
 
-	cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
-	return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
+    cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
+    return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
 }
 
 /*
  * Set config space data for given slot / offset
  */
 void
-pcibr_slot_config_set(bridge_t *bridge, pciio_slot_t slot, 
+pcibr_slot_config_set(pci_bridge_t *bridge, pciio_slot_t slot, 
 					int offset, unsigned val)
 {
-	cfg_p  cfg_base;
+    cfg_p  cfg_base;
 
-	cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
-	do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
+    cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
+    do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
 }
 
 /*
  * Set config space data for given slot / func / offset
  */
 void
-pcibr_func_config_set(bridge_t *bridge, pciio_slot_t slot, 
+pcibr_func_config_set(pci_bridge_t *bridge, pciio_slot_t slot, 
 			pciio_function_t func, int offset, unsigned val)
 {
-	cfg_p  cfg_base;
+    cfg_p  cfg_base;
 
-	cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
-	do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
+    cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
+    do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
 }
 
 int pcibr_config_debug = 0;
@@ -152,7 +128,7 @@
     pciio_slot_t            pciio_slot;
     pciio_function_t        pciio_func;
     pcibr_soft_t            pcibr_soft;
-    bridge_t               *bridge;
+    pci_bridge_t	   *bridge;
     cfg_p                   cfgbase = (cfg_p)0;
     pciio_info_t	    pciio_info;
 
@@ -173,6 +149,17 @@
 	pciio_func = PCI_TYPE1_FUNC(reg);
 
 	ASSERT(pciio_bus != 0);
+    } else if (conn != pciio_info_hostdev_get(pciio_info)) {
+	/*
+	 * Conn is on a subordinate bus, so get bus/slot/func directly from
+	 * its pciio_info_t structure.
+	 */
+	pciio_bus = pciio_info->c_bus;
+	pciio_slot = pciio_info->c_slot;
+	pciio_func = pciio_info->c_func;
+	if (pciio_func == PCIIO_FUNC_NONE) {
+		pciio_func = 0;
+	}
     } else {
 	/*
 	 * Conn is directly connected to the host bus.  PCI bus number is
@@ -182,13 +169,13 @@
 	 */
 	pciio_bus = 0;
 
-    pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
-    if (pciio_slot == PCIIO_SLOT_NONE)
-	pciio_slot = PCI_TYPE1_SLOT(reg);
-
-    pciio_func = pcibr_info->f_func;
-    if (pciio_func == PCIIO_FUNC_NONE)
-	pciio_func = PCI_TYPE1_FUNC(reg);
+	pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
+	if (pciio_slot == PCIIO_SLOT_NONE)
+    	    pciio_slot = PCI_TYPE1_SLOT(reg);
+
+	pciio_func = pcibr_info->f_func;
+	if (pciio_func == PCIIO_FUNC_NONE)
+    	    pciio_func = PCI_TYPE1_FUNC(reg);
     }
 
     pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
@@ -206,18 +193,51 @@
 		 unsigned reg,
 		 unsigned size)
 {
-	return do_pcibr_config_get(pcibr_config_addr(conn, reg),
-				PCI_TYPE1_REG(reg), size);
+    return do_pcibr_config_get(pcibr_config_addr(conn, reg),
+			       PCI_TYPE1_REG(reg), size);
 }
 
+/*
+ * Read value of configuration register.  If the config read
+ * time's out we return -1 instead of a master-abort, hence 'safe'.
+ */
 uint64_t
-do_pcibr_config_get(cfg_p cfgbase,
+pcibr_config_get_safe(vertex_hdl_t conn,
+                 unsigned reg,
+                 unsigned size)
+{
+    pcibr_info_t            pcibr_info;
+    pcibr_soft_t            pcibr_soft;
+    pci_bridge_t           *bridge;
+    cfg_p                   cfg;
+    int                     rv;
+    unsigned                val;
+
+    pcibr_info = pcibr_info_get(conn);
+
+    pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+    bridge = pcibr_soft->bs_base;
+    cfg = pcibr_config_addr(conn, reg);
+
+    rv = pcibr_probe_slot(bridge, cfg, &val);
+
+    if (rv == 0) {
+        return (uint64_t)val;
+    }
+
+    return (uint64_t)(-1);
+}
+
+uint64_t
+do_pcibr_config_get(
+		       cfg_p cfgbase,
 		       unsigned reg,
 		       unsigned size)
 {
     unsigned                value;
 
-    value = CWP(cfgbase, reg);
+    value = CW(cfgbase, reg);
+
     if (reg & 3)
 	value >>= 8 * (reg & 3);
     if (size < 4)
@@ -231,7 +251,7 @@
 		 unsigned size,
 		 uint64_t value)
 {
-	do_pcibr_config_set(pcibr_config_addr(conn, reg),
+    do_pcibr_config_set(pcibr_config_addr(conn, reg),
 			PCI_TYPE1_REG(reg), size, value);
 }
 
@@ -241,28 +261,29 @@
 		    unsigned size,
 		    uint64_t value)
 {
-	switch (size) {
-	case 1:
-		CBP(cfgbase, reg) = value;
-		break;
-	case 2:
-		if (reg & 1) {
-			CBP(cfgbase, reg) = value;
-			CBP(cfgbase, reg + 1) = value >> 8;
-		} else
-			CSP(cfgbase, reg) = value;
-		break;
-	case 3:
-		if (reg & 1) {
-			CBP(cfgbase, reg) = value;
-			CSP(cfgbase, (reg + 1)) = value >> 8;
-		} else {
-			CSP(cfgbase, reg) = value;
-			CBP(cfgbase, reg + 2) = value >> 16;
-		}
-		break;
-	case 4:
-		CWP(cfgbase, reg) = value;
-		break;
- 	}
+    switch (size) {
+    case 1:
+	CB(cfgbase, reg) = value;
+	break;
+    case 2:
+	if (reg & 1) {
+	    CB(cfgbase, reg) = value;
+	    CB(cfgbase, (reg + 1)) = value >> 8;
+	} else
+	    CS(cfgbase, reg) = value;
+	break;
+    case 3:
+	if (reg & 1) {
+	    CB(cfgbase, reg) = value;
+	    CS(cfgbase, (reg + 1)) = value >> 8;
+	} else {
+	    CS(cfgbase, reg) = value;
+	    CB(cfgbase, (reg + 2)) = value >> 16;
+	}
+	break;
+
+    case 4:
+	CW(cfgbase, reg) = value;
+	break;
+    }
 }
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,31 +6,17 @@
  * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/types.h>
-#include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
+
+#include <asm/sn/prio.h> 
 #include <asm/sn/sn_private.h>
 
 /*
@@ -39,76 +24,22 @@
  *   -pcibr_debug_mask is the mask of the different types of debugging
  *    you want to enable.  See sys/PCI/pcibr_private.h 
  *   -pcibr_debug_module is the module you want to trace.  By default
- *    all modules are trace.  For IP35 this value has the format of
- *    something like "001c10".  For IP27 this value is a node number,
- *    i.e. "1", "2"...  For IP30 this is undefined and should be set to
- *    'all'.
- *   -pcibr_debug_widget is the widget you want to trace.  For IP27
- *    the widget isn't exposed in the hwpath so use the xio slot num.
- *    i.e. for 'io2' set pcibr_debug_widget to "2".
+ *    all modules are trace.  The format is something like "001c10".
+ *   -pcibr_debug_widget is the widget you want to trace.  For TIO 
+ *    based bricks use the corelet id.
  *   -pcibr_debug_slot is the pci slot you want to trace.
  */
-uint32_t pcibr_debug_mask = 0x0;	/* 0x00000000 to disable */
-char      *pcibr_debug_module = "all";		/* 'all' for all modules */
-int	   pcibr_debug_widget = -1;		/* '-1' for all widgets  */
-int	   pcibr_debug_slot = -1;		/* '-1' for all slots    */
+uint32_t   	  pcibr_debug_mask = 0x00000000;	/* 0x00000000 to disable */
+static char      *pcibr_debug_module = "all";		/* 'all' for all modules */
+static int	   pcibr_debug_widget = -1;		/* '-1' for all widgets  */
+static int	   pcibr_debug_slot = -1;		/* '-1' for all slots    */
 
-/*
- * Macros related to the Lucent USS 302/312 usb timeout workaround.  It
- * appears that if the lucent part can get into a retry loop if it sees a
- * DAC on the bus during a pio read retry.  The loop is broken after about
- * 1ms, so we need to set up bridges holding this part to allow at least
- * 1ms for pio.
- */
-
-#define USS302_TIMEOUT_WAR
-
-#ifdef USS302_TIMEOUT_WAR
-#define LUCENT_USBHC_VENDOR_ID_NUM	0x11c1
-#define LUCENT_USBHC302_DEVICE_ID_NUM	0x5801
-#define LUCENT_USBHC312_DEVICE_ID_NUM	0x5802
-#define USS302_BRIDGE_TIMEOUT_HLD	4
-#endif
-
-/* kbrick widgetnum-to-bus layout */
-int p_busnum[MAX_PORT_NUM] = {                  /* widget#      */
-        0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7    */
-        2,                                      /* 0x8          */
-        1,                                      /* 0x9          */
-        0, 0,                                   /* 0xa - 0xb    */
-        5,                                      /* 0xc          */
-        6,                                      /* 0xd          */
-        4,                                      /* 0xe          */
-        3,                                      /* 0xf          */
-};
 
 #if PCIBR_SOFT_LIST
 pcibr_list_p            pcibr_list = 0;
 #endif
 
-extern int              hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
-extern long             atoi(register char *p);
-extern cnodeid_t        nodevertex_to_cnodeid(vertex_hdl_t vhdl);
-extern char             *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
-extern struct map       *atemapalloc(uint64_t);
-extern void             atefree(struct map *, size_t, uint64_t);
-extern void             atemapfree(struct map *);
-extern pciio_dmamap_t   get_free_pciio_dmamap(vertex_hdl_t);
-extern void		free_pciio_dmamap(pcibr_dmamap_t);
-extern void		xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
-
-#define	ATE_WRITE()    ate_write(pcibr_soft, ate_ptr, ate_count, ate)
-#if PCIBR_FREEZE_TIME
-#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
-#else
-#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, cmd_regs)
-#endif /* PCIBR_FREEZE_TIME */
-
-#if PCIBR_FREEZE_TIME
-#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
-#else
-#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
-#endif
+extern char *pci_space[];
 
 /* =====================================================================
  *    Function Table of Contents
@@ -119,48 +50,34 @@
  *      perhaps bust this file into smaller chunks.
  */
 
-extern int		 do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
-extern void              do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
+extern void		 do_pcibr_rrb_free_all(pcibr_soft_t, pci_bridge_t *, pciio_slot_t);
+extern void              pcibr_rrb_alloc_more(pcibr_soft_t, int, int, int);
 
 extern int  		 pcibr_wrb_flush(vertex_hdl_t);
 extern int               pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
 extern void              pcibr_rrb_flush(vertex_hdl_t);
 
-static int                pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
-void                     pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
-
-extern void              pcibr_setwidint(xtalk_intr_t);
-extern void              pcibr_clearwidint(bridge_t *);
+static int                pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, uint64_t);
+void                     pcibr_release_device(pcibr_soft_t, pciio_slot_t, uint64_t);
 
 extern iopaddr_t         pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
                                               pciio_space_t, int, int, int);
+extern int		 hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, 
+						 uint buflen);
 
-int                      pcibr_attach(vertex_hdl_t);
-int			 pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
-				       int, pcibr_soft_t *);
 int			 pcibr_detach(vertex_hdl_t);
+void			 pcibr_directmap_init(pcibr_soft_t);
 int			 pcibr_pcix_rbars_calc(pcibr_soft_t);
-extern int               pcibr_init_ext_ate_ram(bridge_t *);
 extern int               pcibr_ate_alloc(pcibr_soft_t, int);
 extern void              pcibr_ate_free(pcibr_soft_t, int, int);
+extern pciio_dmamap_t	 get_free_pciio_dmamap(vertex_hdl_t);
+extern void		 free_pciio_dmamap(pcibr_dmamap_t);
 extern int 		 pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
 
-extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
-#if PCIBR_FREEZE_TIME
-	   		 unsigned *freeze_time_ptr,
-#endif
-	   		 unsigned *cmd_regs);
-extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
-extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
-#if PCIBR_FREEZE_TIME
-	 		bridge_ate_t ate,
-	 		int ate_total,
-	 		unsigned freeze_time_start,
-#endif
-	 		unsigned *cmd_regs,
-	 		unsigned s);
+extern void 		ate_write(pcibr_soft_t, pcibr_dmamap_t, int, int, 
+				  bridge_ate_t);
 
-pcibr_info_t      pcibr_info_get(vertex_hdl_t);
+pcibr_info_t		pcibr_info_get(vertex_hdl_t);
 
 static iopaddr_t         pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
 
@@ -173,7 +90,7 @@
 void                    pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
 
 static iopaddr_t         pcibr_flags_to_d64(unsigned, pcibr_soft_t);
-extern bridge_ate_t     pcibr_flags_to_ate(unsigned);
+extern bridge_ate_t     pcibr_flags_to_ate(pcibr_soft_t, unsigned);
 
 pcibr_dmamap_t          pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
 void                    pcibr_dmamap_free(pcibr_dmamap_t);
@@ -188,74 +105,24 @@
 void                    pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
 iopaddr_t               pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
 
-extern unsigned		pcibr_intr_bits(pciio_info_t info, 
-					pciio_intr_line_t lines, int nslots);
-extern pcibr_intr_t     pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
-extern void             pcibr_intr_free(pcibr_intr_t);
-extern void             pcibr_setpciint(xtalk_intr_t);
-extern int              pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
-extern void             pcibr_intr_disconnect(pcibr_intr_t);
-
-extern vertex_hdl_t     pcibr_intr_cpu_get(pcibr_intr_t);
-extern void             pcibr_intr_func(intr_arg_t);
-
-extern void             print_bridge_errcmd(uint32_t, char *);
-
-extern void             pcibr_error_dump(pcibr_soft_t);
-extern uint32_t       pcibr_errintr_group(uint32_t);
-extern void	        pcibr_pioerr_check(pcibr_soft_t);
-extern void             pcibr_error_intr_handler(int, void *, struct pt_regs *);
-
-extern int              pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
-extern void             pcibr_error_cleanup(pcibr_soft_t, int);
-extern void                    pcibr_device_disable(pcibr_soft_t, int);
-extern int              pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
-extern int              pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
-extern int              pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
-extern int              pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
-extern int              pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
 void                    pcibr_provider_startup(vertex_hdl_t);
 void                    pcibr_provider_shutdown(vertex_hdl_t);
 
 int                     pcibr_reset(vertex_hdl_t);
+
 pciio_endian_t          pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
-int                     pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
-pciio_priority_t        pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
 int                     pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
 
-extern cfg_p            pcibr_config_addr(vertex_hdl_t, unsigned);
-extern uint64_t         pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
-extern void             pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
-
-extern pcibr_hints_t    pcibr_hints_get(vertex_hdl_t, int);
-extern void             pcibr_hints_fix_rrbs(vertex_hdl_t);
-extern void             pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
-extern void	 	pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
-extern void             pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
-extern void             pcibr_hints_handsoff(vertex_hdl_t);
-extern void             pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
-
-extern int		pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
 extern int		pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
-extern int	        pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
-                                               pcibr_slot_info_resp_t);
-extern void       	pcibr_slot_func_info_return(pcibr_info_h, int,
-                                                    pcibr_slot_func_info_resp_t);
-extern int		pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
-extern int		pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
-extern int		pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
-extern int		pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
-extern int		pcibr_slot_call_device_attach(vertex_hdl_t,
-						      pciio_slot_t, int);
-extern int		pcibr_slot_call_device_detach(vertex_hdl_t,
-						      pciio_slot_t, int);
-extern int              pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int, 
-                                                      char *, int *);
 extern int              pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
                                                       char *, int *);
+#ifdef PCI_HOTPLUG
+extern int		pcibr_slot_startup(vertex_hdl_t, pcibr_slot_req_t);
+extern int		pcibr_slot_shutdown(vertex_hdl_t, pcibr_slot_req_t);
+extern int		pcibr_slot_query(vertex_hdl_t, pcibr_slot_req_t);
+#endif
 
-extern int		pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
-extern int		pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+pciio_businfo_t		pcibr_businfo_get(vertex_hdl_t);
 
 /* =====================================================================
  *    Device(x) register management
@@ -273,35 +140,30 @@
 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
 		     pciio_slot_t slot,
 		     unsigned flags,
-		     bridgereg_t mask)
+		     uint64_t mask)
 {
-    bridge_t               *bridge;
     pcibr_soft_slot_t       slotp;
-    bridgereg_t             old;
-    bridgereg_t             new;
-    bridgereg_t             chg;
-    bridgereg_t             bad;
-    bridgereg_t             badpmu;
-    bridgereg_t             badd32;
-    bridgereg_t             badd64;
-    bridgereg_t             fix;
-    unsigned long           s;
-    bridgereg_t             xmask;
+    uint64_t		    old;
+    uint64_t		    new;
+    uint64_t		    chg;
+    uint64_t		    bad;
+    uint64_t		    badpmu;
+    uint64_t		    badd32;
+    uint64_t		    badd64;
+    uint64_t		    fix;
+    uint64_t		    xmask;
+    unsigned long	    s;
 
     xmask = mask;
-    if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-    	if (mask == BRIDGE_DEV_PMU_BITS)
-		xmask = XBRIDGE_DEV_PMU_BITS;
-	if (mask == BRIDGE_DEV_D64_BITS)
-		xmask = XBRIDGE_DEV_D64_BITS;
-    }
+    if (mask == BRIDGE_DEV_PMU_BITS)
+	xmask = XBRIDGE_DEV_PMU_BITS;
+    if (mask == BRIDGE_DEV_D64_BITS)
+	xmask = XBRIDGE_DEV_D64_BITS;
 
     slotp = &pcibr_soft->bs_slot[slot];
 
     s = pcibr_lock(pcibr_soft);
 
-    bridge = pcibr_soft->bs_base;
-
     old = slotp->bss_device;
 
     /* figure out what the desired
@@ -330,94 +192,93 @@
      * channels are created.
      */
 
-#define	BRIDGE_DEV_WRGA_BITS	(BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
-#define	BRIDGE_DEV_SWAP_BITS	(BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
-
     /* Do not use Barrier, Write Gather,
      * or Prefetch unless asked.
      * Leave everything else as it
      * was from the last time.
      */
     new = new
-	& ~BRIDGE_DEV_BARRIER
-	& ~BRIDGE_DEV_WRGA_BITS
-	& ~BRIDGE_DEV_PREF
+	& ~PCIBR_DEV_BARRIER
+	& ~PCIBR_DEV_DIR_WRGA_EN
+	& ~PCIBR_DEV_PREF
 	;
 
     /* Generic macro flags
      */
     if (flags & PCIIO_DMA_DATA) {
 	new = (new
-            & ~BRIDGE_DEV_BARRIER)      /* barrier off */
-            | BRIDGE_DEV_PREF;          /* prefetch on */
-
+            & ~PCIBR_DEV_BARRIER)	/* barrier off */
+	    | PCIBR_DEV_PREF;		/* prefetch on */
     }
     if (flags & PCIIO_DMA_CMD) {
         new = ((new
-            & ~BRIDGE_DEV_PREF)         /* prefetch off */
-            & ~BRIDGE_DEV_WRGA_BITS)    /* write gather off */
-            | BRIDGE_DEV_BARRIER;       /* barrier on */
+            & ~PCIBR_DEV_PREF)		/* prefetch off */
+            & ~PCIBR_DEV_DIR_WRGA_EN)	/* write gather off */
+	    | PCIBR_DEV_BARRIER;	/* barrier on */
     }
     /* Generic detail flags
      */
     if (flags & PCIIO_WRITE_GATHER)
-	new |= BRIDGE_DEV_WRGA_BITS;
+	new |= PCIBR_DEV_DIR_WRGA_EN;
     if (flags & PCIIO_NOWRITE_GATHER)
-	new &= ~BRIDGE_DEV_WRGA_BITS;
+	new &= ~PCIBR_DEV_DIR_WRGA_EN;
 
     if (flags & PCIIO_PREFETCH)
-	new |= BRIDGE_DEV_PREF;
+	new |= PCIBR_DEV_PREF;
     if (flags & PCIIO_NOPREFETCH)
-	new &= ~BRIDGE_DEV_PREF;
+	new &= ~PCIBR_DEV_PREF;
 
     if (flags & PCIBR_WRITE_GATHER)
-	new |= BRIDGE_DEV_WRGA_BITS;
+	new |= PCIBR_DEV_DIR_WRGA_EN;
     if (flags & PCIBR_NOWRITE_GATHER)
-	new &= ~BRIDGE_DEV_WRGA_BITS;
+	new &= ~PCIBR_DEV_DIR_WRGA_EN;
 
     if (flags & PCIIO_BYTE_STREAM)
-	new |= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ? 
-			BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
+	new |= PCIBR_DEV_SWAP_DIR;
     if (flags & PCIIO_WORD_VALUES)
-	new &= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ? 
-			~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
+	new &= ~PCIBR_DEV_SWAP_DIR;
 
     /* Provider-specific flags
      */
     if (flags & PCIBR_PREFETCH)
-	new |= BRIDGE_DEV_PREF;
+	new |= PCIBR_DEV_PREF;
     if (flags & PCIBR_NOPREFETCH)
-	new &= ~BRIDGE_DEV_PREF;
+	new &= ~PCIBR_DEV_PREF;
 
     if (flags & PCIBR_PRECISE)
-	new |= BRIDGE_DEV_PRECISE;
+	new |= PCIBR_DEV_PRECISE;
     if (flags & PCIBR_NOPRECISE)
-	new &= ~BRIDGE_DEV_PRECISE;
+	new &= ~PCIBR_DEV_PRECISE;
 
     if (flags & PCIBR_BARRIER)
-	new |= BRIDGE_DEV_BARRIER;
+	new |= PCIBR_DEV_BARRIER;
     if (flags & PCIBR_NOBARRIER)
-	new &= ~BRIDGE_DEV_BARRIER;
+	new &= ~PCIBR_DEV_BARRIER;
 
     if (flags & PCIBR_64BIT)
-	new |= BRIDGE_DEV_DEV_SIZE;
+	new |= PCIBR_DEV_DEV_SIZE;
     if (flags & PCIBR_NO64BIT)
-	new &= ~BRIDGE_DEV_DEV_SIZE;
+	new &= ~PCIBR_DEV_DEV_SIZE;
 
     /*
      * PIC BRINGUP WAR (PV# 855271):
-     * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
+     * Allow setting PCIBR_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
      * device.  The bit is only intended for 64-bit devices and, on
      * PIC, can cause problems for 32-bit devices.
      */
     if (IS_PIC_SOFT(pcibr_soft) && mask == BRIDGE_DEV_D64_BITS &&
-                                PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
-        if (flags & PCIBR_VCHAN1) {
-                new |= BRIDGE_DEV_VIRTUAL_EN;
-                xmask |= BRIDGE_DEV_VIRTUAL_EN;
-        }
+				PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
+	if (flags & PCIBR_VCHAN1) {
+		new |= PCIBR_DEV_VIRTUAL_EN;
+		xmask |= PCIBR_DEV_VIRTUAL_EN;
+	}
     }
 
+    /* PIC BRINGUP WAR (PV# 878674):   Don't allow 64bit PIO accesses */
+    if (IS_PIC_SOFT(pcibr_soft) && (flags & PCIBR_64BIT) &&
+				PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
+	new &= ~PCIBR_DEV_DEV_SIZE;
+    }
 
     chg = old ^ new;				/* what are we changing, */
     chg &= xmask;				/* of the interesting bits */
@@ -425,13 +286,8 @@
     if (chg) {
 
 	badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
-	if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-		badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
-		badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
-	} else {
-		badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
-		badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
-	}
+	badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
+	badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
 	bad = badpmu | badd32 | badd64;
 
 	if (bad) {
@@ -443,8 +299,8 @@
 	     * but the alternative is not allowing
 	     * the new stream at all.
 	     */
-            if ( (fix = bad & (BRIDGE_DEV_PRECISE |
-                             BRIDGE_DEV_BARRIER)) ) {
+            if ( (fix = bad & (PCIBR_DEV_PRECISE |
+                             PCIBR_DEV_BARRIER)) ) {
 		bad &= ~fix;
 		/* don't change these bits if
 		 * they are already set in "old"
@@ -458,8 +314,8 @@
 	     * but the alternative is not allowing
 	     * the new stream at all.
 	     */
-	    if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
-			     BRIDGE_DEV_PREF)) ) {
+	    if ( (fix = bad & (PCIBR_DEV_DIR_WRGA_EN |
+			     PCIBR_DEV_PREF)) ) {
 		bad &= ~fix;
 		/* don't change these bits if
 		 * we wanted to turn them on.
@@ -473,6 +329,8 @@
 	     */
 	    if (bad) {
 		pcibr_unlock(pcibr_soft, s);
+		PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
+			    "pcibr_try_set_device: mod blocked by 0x%x\n", bad));
 		return bad;
 	    }
 	}
@@ -495,35 +353,24 @@
 	pcibr_unlock(pcibr_soft, s);
 	return 0;
     }
-    if ( IS_PIC_SOFT(pcibr_soft) ) {
-	bridge->b_device[slot].reg = new;
-	slotp->bss_device = new;
-	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-    }
-    else {
-	if (io_get_sh_swapper(NASID_GET(bridge))) {
-		BRIDGE_REG_SET32((&bridge->b_device[slot].reg)) = __swab32(new);
-		slotp->bss_device = new;
-		BRIDGE_REG_GET32((&bridge->b_wid_tflush));  /* wait until Bridge PIO complete */
-	} else {
-		bridge->b_device[slot].reg = new;
-		slotp->bss_device = new;
-		bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
-	}
-    }
+    
+    pcireg_device_set(pcibr_soft, slot, new);
+    slotp->bss_device = new;
+    pcireg_tflush_get(pcibr_soft);	/* wait until Bridge PIO complete */
     pcibr_unlock(pcibr_soft, s);
 
-    printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
+    PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
+		"pcibr_try_set_device: Device(%d): 0x%x\n", slot, new));
     return 0;
 }
 
 void
 pcibr_release_device(pcibr_soft_t pcibr_soft,
 		     pciio_slot_t slot,
-		     bridgereg_t mask)
+		     uint64_t mask)
 {
     pcibr_soft_slot_t       slotp;
-    unsigned long           s;
+    unsigned long	    s;
 
     slotp = &pcibr_soft->bs_slot[slot];
 
@@ -539,31 +386,6 @@
     pcibr_unlock(pcibr_soft, s);
 }
 
-/*
- * flush write gather buffer for slot
- */
-static void
-pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
-              pciio_slot_t slot)
-{
-    bridge_t               *bridge;
-    unsigned long          s;
-    volatile uint32_t     wrf;
-    s = pcibr_lock(pcibr_soft);
-    bridge = pcibr_soft->bs_base;
-
-    if ( IS_PIC_SOFT(pcibr_soft) ) {
-	wrf = bridge->b_wr_req_buf[slot].reg;
-    }
-    else {
-	if (io_get_sh_swapper(NASID_GET(bridge))) {
-		wrf = BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
-	} else {
-		wrf = bridge->b_wr_req_buf[slot].reg;
-	}
-    }
-    pcibr_unlock(pcibr_soft, s);
-}
 
 /* =====================================================================
  *    Bridge (pcibr) "Device Driver" entry points
@@ -573,20 +395,21 @@
 static int
 pcibr_mmap(struct file * file, struct vm_area_struct * vma)
 {
-	vertex_hdl_t		pcibr_vhdl = file->f_dentry->d_fsdata;
+	vertex_hdl_t		pcibr_vhdl;
 	pcibr_soft_t            pcibr_soft;
-	bridge_t               *bridge;
+	pci_bridge_t           *bridge;
 	unsigned long		phys_addr;
 	int			error = 0;
 
+	pcibr_vhdl = (vertex_hdl_t) file->f_dentry->d_fsdata;
 	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
 	bridge = pcibr_soft->bs_base;
 	phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-        vma->vm_flags |= VM_RESERVED | VM_IO;
-        error = io_remap_page_range(vma, phys_addr, vma->vm_start,
-				    vma->vm_end - vma->vm_start,
-				    vma->vm_page_prot);
+        vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
+        error = io_remap_page_range(vma, vma->vm_start, phys_addr,
+                                   vma->vm_end-vma->vm_start,
+                                   vma->vm_page_prot);
 	return(error);
 }
 
@@ -601,7 +424,6 @@
 	.mmap		= pcibr_mmap,
 };
 
-
 /* This is special case code used by grio. There are plans to make
  * this a bit more general in the future, but till then this should
  * be sufficient.
@@ -637,7 +459,7 @@
 pcibr_info_t
 pcibr_info_get(vertex_hdl_t vhdl)
 {
-    return (pcibr_info_t) pciio_info_get(vhdl);
+    return (pcibr_info_t) pciio_hostinfo_get(vhdl);
 }
 
 pcibr_info_t
@@ -669,7 +491,11 @@
     pcibr_info->f_dev = slot;
 
     /* Set PCI bus number */
+#if !(defined(IP30) || defined(SN0))
     pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
+#else
+    pcibr_info->f_bus = 0;
+#endif
 
     if (slot != PCIIO_SLOT_NONE) {
 
@@ -718,13 +544,13 @@
     vertex_hdl_t	 pcibr_vhdl;
     pciio_slot_t	 slot;
     pcibr_soft_t	 pcibr_soft;
-    bridge_t		*bridge;
+    pci_bridge_t	*bridge;
     int                  count_vchan0, count_vchan1;
-    unsigned             s;
+    unsigned long	 s;
     int			 error_call;
     int			 error = 0;
 
-    pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info = pciio_hostinfo_get(pconn_vhdl);
 
     pcibr_vhdl = pciio_info_master_get(pciio_info);
     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
@@ -746,9 +572,6 @@
 
         s = pcibr_lock(pcibr_soft);
 
-	/* PIC NOTE: If this is a BRIDGE, VCHAN2 & VCHAN3 will be zero so
-	 * no need to conditionalize this (ie. "if (IS_PIC_SOFT())" ).
-	 */
         pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
                                        pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
                                        pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
@@ -805,14 +628,21 @@
     if ((key1 == -1) || (key2 == -1))
         return;
 
-    pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_info = pcibr_info_get(pconn_vhdl);
 
     pcibr_vhdl = pciio_info_master_get(pciio_info);
     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+#ifdef PCI_HOTPLUG
+    /* This may be a loadable driver so lock out any pciconfig actions */
+    mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
+#endif
+
     pcibr_info->f_att_det_error = error;
+
     pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
 
     if (error) {
@@ -820,6 +650,12 @@
     } else {
         pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
     }
+        
+#ifdef PCI_HOTPLUG
+    /* Release the bus lock */
+    mrunlock(pcibr_soft->bs_bus_lock);
+#endif
+
 }
 
 /*
@@ -844,14 +680,21 @@
     if ((key1 == -1) || (key2 == -1))
         return;
 
-    pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_info = pcibr_info_get(pconn_vhdl);
 
     pcibr_vhdl = pciio_info_master_get(pciio_info);
     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+#ifdef PCI_HOTPLUG
+    /* This may be a loadable driver so lock out any pciconfig actions */
+    mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
+#endif
+
     pcibr_info->f_att_det_error = error;
+
     pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
 
     if (error) {
@@ -859,999 +702,12 @@
     } else {
         pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
     }
-}
-
-/* 
- * build a convenience link path in the
- * form of ".../<iobrick>/bus/<busnum>"
- * 
- * returns 1 on success, 0 otherwise
- *
- * depends on hwgraph separator == '/'
- */
-int
-pcibr_bus_cnvlink(vertex_hdl_t f_c)
-{
-        char dst[MAXDEVNAME];
-	char *dp = dst;
-        char *cp, *xp;
-        int widgetnum;
-        char pcibus[8];
-	vertex_hdl_t nvtx, svtx;
-	int rv;
-
-	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
-
-	if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
-		return 0;
-
-	/* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
-
-	/* find the widget number */
-	xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
-	if (xp == NULL)
-		return 0;
-	widgetnum = simple_strtoul(xp+7, NULL, 0);
-	if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
-		return 0;
-
-	/* remove "/pci/direct" from path */
-	cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
-	if (cp == NULL)
-		return 0;
-	*cp = (char)NULL;
-
-	/* get the vertex for the widget */
-	if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))	
-		return 0;
-
-	*xp = (char)NULL;		/* remove "/xtalk/..." from path */
-
-	/* dst example now == /hw/module/001c02/Pbrick */
-
-	/* get the bus number */
-        strcat(dst, "/");
-        strcat(dst, EDGE_LBL_BUS);
-        sprintf(pcibus, "%d", p_busnum[widgetnum]);
-
-	/* link to bus to widget */
-	rv = hwgraph_path_add(NULL, dp, &nvtx);
-	if (GRAPH_SUCCESS == rv)
-		rv = hwgraph_edge_add(nvtx, svtx, pcibus);
-
-	return (rv == GRAPH_SUCCESS);
-}
-
-
-/*
- *    pcibr_attach: called every time the crosstalk
- *      infrastructure is asked to initialize a widget
- *      that matches the part number we handed to the
- *      registration routine above.
- */
-/*ARGSUSED */
-int
-pcibr_attach(vertex_hdl_t xconn_vhdl)
-{
-    /* REFERENCED */
-    graph_error_t           rc;
-    vertex_hdl_t            pcibr_vhdl;
-    bridge_t               *bridge;
-
-    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
-
-    bridge = (bridge_t *)
-	xtalk_piotrans_addr(xconn_vhdl, NULL,
-			    0, sizeof(bridge_t), 0);
-    /*
-     * Create the vertex for the PCI bus, which we
-     * will also use to hold the pcibr_soft and
-     * which will be the "master" vertex for all the
-     * pciio connection points we will hang off it.
-     * This needs to happen before we call nic_bridge_vertex_info
-     * as we are some of the *_vmc functions need access to the edges.
-     *
-     * Opening this vertex will provide access to
-     * the Bridge registers themselves.
-     */
-    rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
-    ASSERT(rc == GRAPH_SUCCESS);
-
-    pciio_provider_register(pcibr_vhdl, &pcibr_provider);
-    pciio_provider_startup(pcibr_vhdl);
-
-    return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
-}
-
-
-/*ARGSUSED */
-int
-pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge, 
-	      vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
-{
-    /* REFERENCED */
-    vertex_hdl_t            ctlr_vhdl;
-    bridgereg_t             id;
-    int                     rev;
-    pcibr_soft_t            pcibr_soft;
-    pcibr_info_t            pcibr_info;
-    xwidget_info_t          info;
-    xtalk_intr_t            xtalk_intr;
-    int                     slot;
-    int                     ibit;
-    vertex_hdl_t            noslot_conn;
-    char                    devnm[MAXDEVNAME], *s;
-    pcibr_hints_t           pcibr_hints;
-    uint64_t              int_enable;
-    bridgereg_t             int_enable_32;
-    picreg_t                int_enable_64;
-    unsigned                rrb_fixed = 0;
-
-    int                     spl_level;
-
-#if PCI_FBBE
-    int                     fast_back_to_back_enable;
+        
+#ifdef PCI_HOTPLUG
+    /* Release the bus lock */
+    mrunlock(pcibr_soft->bs_bus_lock);
 #endif
-    nasid_t		    nasid;
-    int	                    iobrick_type_get_nasid(nasid_t nasid);
-    int                     iobrick_module_get_nasid(nasid_t nasid);
-
-    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
-	        "pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
-
-    ctlr_vhdl = NULL;
-    ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0, 
-                0, 0, 0,
-		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0, 
-		(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
-    ASSERT(ctlr_vhdl != NULL);
-
-    /*
-     * Get the hint structure; if some NIC callback
-     * marked this vertex as "hands-off" then we
-     * just return here, before doing anything else.
-     */
-    pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
-
-    if (pcibr_hints && pcibr_hints->ph_hands_off)
-	return -1;			/* generic operations disabled */
-
-    id = bridge->b_wid_id;
-    rev = XWIDGET_PART_REV_NUM(id);
-
-    hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
-
-    /*
-     * allocate soft state structure, fill in some
-     * fields, and hook it up to our vertex.
-     */
-    NEW(pcibr_soft);
-    if (ret_softp)
-	*ret_softp = pcibr_soft;
-    BZERO(pcibr_soft, sizeof *pcibr_soft);
-    pcibr_soft_set(pcibr_vhdl, pcibr_soft);
-    pcibr_soft->bs_conn = xconn_vhdl;
-    pcibr_soft->bs_vhdl = pcibr_vhdl;
-    pcibr_soft->bs_base = bridge;
-    pcibr_soft->bs_rev_num = rev;
-    pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
-
-    pcibr_soft->bs_min_slot = 0;		/* lowest possible slot# */
-    pcibr_soft->bs_max_slot = 7;		/* highest possible slot# */
-    pcibr_soft->bs_busnum = busnum;
-    pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
-    switch(pcibr_soft->bs_bridge_type) {
-    case PCIBR_BRIDGETYPE_BRIDGE:
-	pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
-	pcibr_soft->bs_bridge_mode = 0;	/* speed is not available in bridge */
-	break;
-    case PCIBR_BRIDGETYPE_PIC:
-        pcibr_soft->bs_min_slot = 0;
-	pcibr_soft->bs_max_slot = 3;
-	pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
-	pcibr_soft->bs_bridge_mode = 
-	   (((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
-	    ((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
-
-	/* We have to clear PIC's write request buffer to avoid parity
-	 * errors.  See PV#854845.
-	 */
-	{
-	int i;
-
-	for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
-		bridge->p_wr_req_lower[i] = 0;
-		bridge->p_wr_req_upper[i] = 0;
-		bridge->p_wr_req_parity[i] = 0;
-	}
-	}
-
-	break;
-    case PCIBR_BRIDGETYPE_XBRIDGE:
-	pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
-	pcibr_soft->bs_bridge_mode = 
-	   ((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
-	break;
-    }
-
-    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
-		"pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
-                pcibr_soft, pcibr_soft->bs_bridge_mode));
-    pcibr_soft->bsi_err_intr = 0;
-
-    /* Bridges up through REV C
-     * are unable to set the direct
-     * byteswappers to BYTE_STREAM.
-     */
-    if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
-	pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
-	pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
-    }
-#if PCIBR_SOFT_LIST
-    /*
-     * link all the pcibr_soft structs
-     */
-    {
-	pcibr_list_p            self;
-
-	NEW(self);
-	self->bl_soft = pcibr_soft;
-	self->bl_vhdl = pcibr_vhdl;
-	self->bl_next = pcibr_list;
-	pcibr_list = self;
-    }
-#endif /* PCIBR_SOFT_LIST */
-
-    /*
-     * get the name of this bridge vertex and keep the info. Use this
-     * only where it is really needed now: like error interrupts.
-     */
-    s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
-    pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
-    strcpy(pcibr_soft->bs_name, s);
-
-    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
-		"pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
-		IS_XBRIDGE_SOFT(pcibr_soft) ? "XBridge" :
-			IS_PIC_SOFT(pcibr_soft) ? "PIC" : "Bridge", 
-		(rev == BRIDGE_PART_REV_A) ? "A" : 
-                (rev == BRIDGE_PART_REV_B) ? "B" :
-                (rev == BRIDGE_PART_REV_C) ? "C" :
-                (rev == BRIDGE_PART_REV_D) ? "D" :
-                (rev == XBRIDGE_PART_REV_A) ? "A" :
-                (rev == XBRIDGE_PART_REV_B) ? "B" :
-                (IS_PIC_PART_REV_A(rev)) ? "A" : 
-                "unknown", rev, pcibr_soft->bs_name));
-
-    info = xwidget_info_get(xconn_vhdl);
-    pcibr_soft->bs_xid = xwidget_info_id_get(info);
-    pcibr_soft->bs_master = xwidget_info_master_get(info);
-    pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
 
-    pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
-    pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
-    /*
-     * Bridge can only reset slots 0, 1, 2, and 3.  Ibrick internal
-     * slots 4, 5, 6, and 7 must be reset as a group, so do not
-     * reset them.
-     */
-    pcibr_soft->bs_last_reset = 3;
-
-    nasid = NASID_GET(bridge);
-
-    if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
-	printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
-				(unsigned int)pcibr_soft->bs_bricktype);
-
-    pcibr_soft->bs_moduleid = iobrick_module_get_nasid(nasid);
-
-    if (pcibr_soft->bs_bricktype > 0) {
-	switch (pcibr_soft->bs_bricktype) {
-	case MODULE_PXBRICK:
-	case MODULE_IXBRICK:
-	    pcibr_soft->bs_first_slot = 0;
-	    pcibr_soft->bs_last_slot = 1;
-	    pcibr_soft->bs_last_reset = 1;
-
-	    /* If Bus 1 has IO9 then there are 4 devices in that bus.  Note
-	     * we figure this out from klconfig since the kernel has yet to 
-	     * probe
-	     */
-	    if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
-		lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
-
-		while (brd) {
-		    if (brd->brd_flags & LOCAL_MASTER_IO6) {
-			pcibr_soft->bs_last_slot = 3;
-			pcibr_soft->bs_last_reset = 3;
-		    }
-		    brd = KLCF_NEXT(brd);
-		}
-	    }
-	    break;
-	case MODULE_PBRICK:
-            pcibr_soft->bs_first_slot = 1;
-            pcibr_soft->bs_last_slot = 2;
-            pcibr_soft->bs_last_reset = 2;
-            break;
-
-        case MODULE_IBRICK:
-	    /*
-	     * Here's the current baseio layout for SN1 style systems:
-	     *
-	     *    0    1    2    3    4    5    6    7		slot#
-	     *
-	     *    x    scsi x    x    ioc3 usb  x    x  	O300 Ibrick
-	     *
-             * x == never occupied
-             * E == external (add-in) slot
-	     *
-	     */
-            pcibr_soft->bs_first_slot = 1;	/* Ibrick first slot == 1 */
-            if (pcibr_soft->bs_xid == 0xe) { 
-                pcibr_soft->bs_last_slot = 2;
-                pcibr_soft->bs_last_reset = 2;
-            } else {
-		pcibr_soft->bs_last_slot = 6;
-	    }
-            break;
-	default:
-	    break;
-        }
-
-	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
-		    "pcibr_attach2: %cbrick, slots %d-%d\n",
-		    MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
-		    pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
-    }
-
-    /*
-     * Initialize bridge and bus locks
-     */
-    spin_lock_init(&pcibr_soft->bs_lock);
-    /*
-     * If we have one, process the hints structure.
-     */
-    if (pcibr_hints) {
-	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
-                    "pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
-
-	rrb_fixed = pcibr_hints->ph_rrb_fixed;
-
-	pcibr_soft->bs_rrb_fixed = rrb_fixed;
-
-	if (pcibr_hints->ph_intr_bits) {
-	    pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
-	}
-
-	for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
-	    int hslot = pcibr_hints->ph_host_slot[slot] - 1;
-
-	    if (hslot < 0) {
-		pcibr_soft->bs_slot[slot].host_slot = slot;
-	    } else {
-		pcibr_soft->bs_slot[slot].has_host = 1;
-		pcibr_soft->bs_slot[slot].host_slot = hslot;
-	    }
-	}
-    }
-    /*
-     * Set-up initial values for state fields
-     */
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
-	pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
-	pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
-	pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
-	pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
-	pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
-	pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
-    }
-
-    for (ibit = 0; ibit < 8; ++ibit) {
-	pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat = 
-							&(bridge->b_int_status);
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
-	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
-    }
-
-    /*
-     * connect up our error handler.  PIC has 2 busses (thus resulting in 2
-     * pcibr_soft structs under 1 widget), so only register a xwidget error
-     * handler for PIC's bus0.  NOTE: for PIC pcibr_error_handler_wrapper()
-     * is a wrapper routine we register that will call the real error handler
-     * pcibr_error_handler() with the correct pcibr_soft struct.
-     */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	if (busnum == 0) {
-	    xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
-	}
-    } else {
-	xwidget_error_register(xconn_vhdl, pcibr_error_handler, pcibr_soft);
-    }
-
-    /*
-     * Initialize various Bridge registers.
-     */
-  
-    /*
-     * On pre-Rev.D bridges, set the PCI_RETRY_CNT
-     * to zero to avoid dropping stores. (#475347)
-     */
-    if (rev < BRIDGE_PART_REV_D)
-	bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
-
-    /*
-     * Clear all pending interrupts.
-     */
-    bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
-
-    /* Initialize some PIC specific registers. */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
-
-	/* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
-	pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
-	pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
-	pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
-	pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
-
-	pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
-	pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
-
-	/* enable parity checking on PICs internal RAM */
-	pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
-	pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
-	/* PIC BRINGUP WAR (PV# 862253): dont enable write request
-	 * parity checking.
-	 */
-	if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
-	    pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
-	}
-
-	bridge->p_wid_control_64 = pic_ctrl_reg;
-    }
-
-    /*
-     * Until otherwise set up,
-     * assume all interrupts are
-     * from slot 7(Bridge/Xbridge) or 3(PIC).
-     * XXX. Not sure why we're doing this, made change for PIC
-     * just to avoid setting reserved bits.
-     */
-    if (IS_PIC_SOFT(pcibr_soft))
-	bridge->b_int_device = (uint32_t) 0x006db6db;
-    else
-	bridge->b_int_device = (uint32_t) 0xffffffff;
-
-    {
-	bridgereg_t             dirmap;
-	paddr_t                 paddr;
-	iopaddr_t               xbase;
-	xwidgetnum_t            xport;
-	iopaddr_t               offset;
-	int                     num_entries = 0;
-	int                     entry;
-	cnodeid_t		cnodeid;
-	nasid_t			nasid;
-
-	/* Set the Bridge's 32-bit PCI to XTalk
-	 * Direct Map register to the most useful
-	 * value we can determine.  Note that we
-	 * must use a single xid for all of:
-	 *      direct-mapped 32-bit DMA accesses
-	 *      direct-mapped 64-bit DMA accesses
-	 *      DMA accesses through the PMU
-	 *      interrupts
-	 * This is the only way to guarantee that
-	 * completion interrupts will reach a CPU
-	 * after all DMA data has reached memory.
-	 * (Of course, there may be a few special
-	 * drivers/controlers that explicitly manage
-	 * this ordering problem.)
-	 */
-
-	cnodeid = 0;  /* default node id */
-	nasid = COMPACT_TO_NASID_NODEID(cnodeid);
-	paddr = NODE_OFFSET(nasid) + 0;
-
-	/* currently, we just assume that if we ask
-	 * for a DMA mapping to "zero" the XIO
-	 * host will transmute this into a request
-	 * for the lowest hunk of memory.
-	 */
-	xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
-				    paddr, _PAGESZ, 0);
-
-	if (xbase != XIO_NOWHERE) {
-	    if (XIO_PACKED(xbase)) {
-		xport = XIO_PORT(xbase);
-		xbase = XIO_ADDR(xbase);
-	    } else
-		xport = pcibr_soft->bs_mxid;
-
-	    offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
-	    xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
-
-	    dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
-
-	    if (xbase)
-		dirmap |= BRIDGE_DIRMAP_OFF & xbase;
-	    else if (offset >= (512 << 20))
-		dirmap |= BRIDGE_DIRMAP_ADD512;
-
-	    bridge->b_dir_map = dirmap;
-	}
-	/*
-	 * Set bridge's idea of page size according to the system's
-	 * idea of "IO page size".  TBD: The idea of IO page size
-	 * should really go away.
-	 */
-	/*
-	 * ensure that we write and read without any interruption.
-	 * The read following the write is required for the Bridge war
-	 */
-	spl_level = splhi();
-#if IOPGSIZE == 4096
-        if (IS_PIC_SOFT(pcibr_soft)) {
-            bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
-        } else {
-            bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
-        }
-#elif IOPGSIZE == 16384
-        if (IS_PIC_SOFT(pcibr_soft)) {
-            bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
-        } else {
-            bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
-        }
-#else
-	<<<Unable to deal with IOPGSIZE >>>;
-#endif
-	bridge->b_wid_control;		/* inval addr bug war */
-	splx(spl_level);
-
-	/* Initialize internal mapping entries */
-	for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
-	    bridge->b_int_ate_ram[entry].wr = 0;
-	}
-
-	/*
-	 * Determine if there's external mapping SSRAM on this
-	 * bridge.  Set up Bridge control register appropriately,
-	 * inititlize SSRAM, and set software up to manage RAM
-	 * entries as an allocatable resource.
-	 *
-	 * Currently, we just use the rm* routines to manage ATE
-	 * allocation.  We should probably replace this with a
-	 * Best Fit allocator.
-	 *
-	 * For now, if we have external SSRAM, avoid using
-	 * the internal ssram: we can't turn PREFETCH on
-	 * when we use the internal SSRAM; and besides,
-	 * this also guarantees that no allocation will
-	 * straddle the internal/external line, so we
-	 * can increment ATE write addresses rather than
-	 * recomparing against BRIDGE_INTERNAL_ATES every
-	 * time.
-	 */
-
-	if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
-		num_entries = 0;
-	else
-		num_entries = pcibr_init_ext_ate_ram(bridge);
-
-	/* we always have 128 ATEs (512 for Xbridge) inside the chip
-	 * even if disabled for debugging.
-	 */
-	pcibr_soft->bs_int_ate_resource.start = 0;
-	pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
-
-	if (num_entries > pcibr_soft->bs_int_ate_size) {
-#if PCIBR_ATE_NOTBOTH			/* for debug -- forces us to use external ates */
-	    printk("pcibr_attach: disabling internal ATEs.\n");
-	    pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
-#endif
-	   pcibr_soft->bs_ext_ate_resource.start = pcibr_soft->bs_int_ate_size;
-	   pcibr_soft->bs_ext_ate_resource.end = num_entries;
-	}
-
-        pcibr_soft->bs_allocated_ate_res = (void *) kmalloc(pcibr_soft->bs_int_ate_size * sizeof(unsigned long), GFP_KERNEL);
-	memset(pcibr_soft->bs_allocated_ate_res, 0x0, pcibr_soft->bs_int_ate_size * sizeof(unsigned long));
-
-	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
-		    "pcibr_attach2: %d ATEs, %d internal & %d external\n",
-		    num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
-		    pcibr_soft->bs_int_ate_size,
-		    num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
-    }
-
-    {
-	bridgereg_t             dirmap;
-	iopaddr_t               xbase;
-
-	/*
-	 * now figure the *real* xtalk base address
-	 * that dirmap sends us to.
-	 */
-	dirmap = bridge->b_dir_map;
-	if (dirmap & BRIDGE_DIRMAP_OFF)
-	    xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
-			<< BRIDGE_DIRMAP_OFF_ADDRSHFT;
-	else if (dirmap & BRIDGE_DIRMAP_ADD512)
-	    xbase = 512 << 20;
-	else
-	    xbase = 0;
-
-	pcibr_soft->bs_dir_xbase = xbase;
-
-	/* it is entirely possible that we may, at this
-	 * point, have our dirmap pointing somewhere
-	 * other than our "master" port.
-	 */
-	pcibr_soft->bs_dir_xport =
-	    (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
-    }
-
-    /* pcibr sources an error interrupt;
-     * figure out where to send it.
-     *
-     * If any interrupts are enabled in bridge,
-     * then the prom set us up and our interrupt
-     * has already been reconnected in mlreset
-     * above.
-     *
-     * Need to set the D_INTR_ISERR flag
-     * in the dev_desc used for allocating the
-     * error interrupt, so our interrupt will
-     * be properly routed and prioritized.
-     *
-     * If our crosstalk provider wants to
-     * fix widget error interrupts to specific
-     * destinations, D_INTR_ISERR is how it
-     * knows to do this.
-     */
-
-    xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
-	{
-		int irq = ((hub_intr_t)xtalk_intr)->i_bit;
-		int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
-
-		intr_unreserve_level(cpu, irq);
-		((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
-	}
-    ASSERT(xtalk_intr != NULL);
-
-    pcibr_soft->bsi_err_intr = xtalk_intr;
-
-    /*
-     * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
-     * in order to work around some addressing limitations.  In order
-     * for that fire wall to work properly, we need to make sure we
-     * start from a known clean state.
-     */
-    pcibr_clearwidint(bridge);
-
-    xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
-		(intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
-
-    request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
-					(intr_arg_t) pcibr_soft);
-
-    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
-		"pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
-		bridge->b_wid_int_upper, bridge->b_wid_int_lower));
-
-    /*
-     * now we can start handling error interrupts;
-     * enable all of them.
-     * NOTE: some PCI ints may already be enabled.
-     */
-    /* We read the INT_ENABLE register as a 64bit picreg_t for PIC and a
-     * 32bit bridgereg_t for BRIDGE, but always process the result as a
-     * 64bit value so the code can be "common" for both PIC and BRIDGE...
-     */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
-        int_enable = (uint64_t)int_enable_64;
-#ifdef PFG_TEST
-	int_enable = (uint64_t)0x7ffffeff7ffffeff;
-#endif
-    } else {
-	int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
-	int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
-#ifdef PFG_TEST
-	int_enable = (uint64_t)0x7ffffeff;
-#endif
-    }
-
-
-#if BRIDGE_ERROR_INTR_WAR
-    if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
-	/*
-	 * We commonly get master timeouts when talking to ql.
-	 * We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
-	 * Insure that these are all disabled for now.
-	 */
-	int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
-			BRIDGE_ISR_RESP_XTLK_ERR |
-			BRIDGE_ISR_LLP_TX_RETRY);
-    }
-    if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
-	int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
-    }
-#endif				/* BRIDGE_ERROR_INTR_WAR */
-
-#ifdef QL_SCSI_CTRL_WAR			/* for IP30 only */
-    /* Really a QL rev A issue, but all newer hearts have newer QLs.
-     * Forces all IO6/MSCSI to be new.
-     */
-    if (heart_rev() == HEART_REV_A)
-	int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
-#endif
-
-#ifdef BRIDGE1_TIMEOUT_WAR
-    if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
-	/*
-	 * Turn off these interrupts.  They can't be trusted in bridge 1
-	 */
-	int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
-			BRIDGE_IMR_UNEXP_RESP);
-    }
-#endif
-
-    /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
-     * locked out to be freed up sooner (by timing out) so that the
-     * read tnums are never completely used up.
-     */
-    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
-        int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
-        int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
-
-        bridge->b_wid_req_timeout = 0x750;
-    }
-
-    /*
-     * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
-     * RRB0, RRB8, RRB1, and RRB9.  Assign them to DEVICE[2|3]--VCHAN3
-     * so they are not used
-     */
-    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
-        bridge->b_even_resp |= 0x000f000f;
-        bridge->b_odd_resp |= 0x000f000f;
-    }
-
-    if (IS_PIC_SOFT(pcibr_soft)) {
-        bridge->p_int_enable_64 = (picreg_t)int_enable;
-    } else {
-        bridge->b_int_enable = (bridgereg_t)int_enable;
-    }
-    bridge->b_int_mode = 0;		/* do not send "clear interrupt" packets */
-
-    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-
-    /*
-     * Depending on the rev of bridge, disable certain features.
-     * Easiest way seems to be to force the PCIBR_NOwhatever
-     * flag to be on for all DMA calls, which overrides any
-     * PCIBR_whatever flag or even the setting of whatever
-     * from the PCIIO_DMA_class flags (or even from the other
-     * PCIBR flags, since NO overrides YES).
-     */
-    pcibr_soft->bs_dma_flags = 0;
-
-    /* PREFETCH:
-     * Always completely disabled for REV.A;
-     * at "pcibr_prefetch_enable_rev", anyone
-     * asking for PCIIO_PREFETCH gets it.
-     * Between these two points, you have to ask
-     * for PCIBR_PREFETCH, which promises that
-     * your driver knows about known Bridge WARs.
-     */
-    if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
-	pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
-    else if (pcibr_soft->bs_rev_num < 
-		(BRIDGE_WIDGET_PART_NUM << 4))
-	pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
-
-    /* WRITE_GATHER: Disabled */
-    if (pcibr_soft->bs_rev_num < 
-		(BRIDGE_WIDGET_PART_NUM << 4))
-	pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
-
-    /* PIC only supports 64-bit direct mapping in PCI-X mode.  Since
-     * all PCI-X devices that initiate memory transactions must be
-     * capable of generating 64-bit addressed, we force 64-bit DMAs.
-     */
-    if (IS_PCIX(pcibr_soft)) {
-	pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
-    }
-
-    {
-
-    iopaddr_t               prom_base_addr = pcibr_soft->bs_xid << 24;
-    int                     prom_base_size = 0x1000000;
-    int			    status;
-    struct resource	    *res;
-
-    /* Allocate resource maps based on bus page size; for I/O and memory
-     * space, free all pages except those in the base area and in the
-     * range set by the PROM. 
-     *
-     * PROM creates BAR addresses in this format: 0x0ws00000 where w is
-     * the widget number and s is the device register offset for the slot.
-     */
-
-    /* Setup the Bus's PCI IO Root Resource. */
-    pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
-    pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
-    res = (struct resource *) kmalloc( sizeof(struct resource), KM_NOSLEEP);
-    if (!res)
-	panic("PCIBR:Unable to allocate resource structure\n");
-
-    /* Block off the range used by PROM. */
-    res->start = prom_base_addr;
-    res->end = prom_base_addr + (prom_base_size - 1);
-    status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
-    if (status)
-	panic("PCIBR:Unable to request_resource()\n");
-
-    /* Setup the Small Window Root Resource */
-    pcibr_soft->bs_swin_root_resource.start = _PAGESZ;
-    pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
-
-    /* Setup the Bus's PCI Memory Root Resource */
-    pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
-    pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
-    res = (struct resource *) kmalloc( sizeof(struct resource), KM_NOSLEEP);
-    if (!res)
-        panic("PCIBR:Unable to allocate resource structure\n");
-
-    /* Block off the range used by PROM. */
-    res->start = prom_base_addr;
-    res->end = prom_base_addr + (prom_base_size - 1);;
-    status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
-    if (status)
-        panic("PCIBR:Unable to request_resource()\n");
-
-    }
-
-    /* build "no-slot" connection point
-     */
-    pcibr_info = pcibr_device_info_new
-	(pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
-	 PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
-    noslot_conn = pciio_device_info_register
-	(pcibr_vhdl, &pcibr_info->f_c);
-
-    /* Remember the no slot connection point info for tearing it
-     * down during detach.
-     */
-    pcibr_soft->bs_noslot_conn = noslot_conn;
-    pcibr_soft->bs_noslot_info = pcibr_info;
-#if PCI_FBBE
-    fast_back_to_back_enable = 1;
-#endif
-
-#if PCI_FBBE
-    if (fast_back_to_back_enable) {
-	/*
-	 * All devices on the bus are capable of fast back to back, so
-	 * we need to set the fast back to back bit in all devices on
-	 * the bus that are capable of doing such accesses.
-	 */
-    }
-#endif
-
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
-	/* Find out what is out there */
-	(void)pcibr_slot_info_init(pcibr_vhdl,slot);
-    }
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	/* Set up the address space for this slot in the PCI land */
-	(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
-
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	/* Setup the device register */
-	(void)pcibr_slot_device_init(pcibr_vhdl, slot);
-
-    if (IS_PCIX(pcibr_soft)) {
-        pcibr_soft->bs_pcix_rbar_inuse = 0;
-        pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
-	pcibr_soft->bs_pcix_rbar_percent_allowed = 
-					pcibr_pcix_rbars_calc(pcibr_soft);
-
-	for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	    /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
-	    (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
-    }
-
-    /* Set up convenience links */
-    if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
-	pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
-
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	/* Setup host/guest relations */
-	(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
-
-    /* Handle initial RRB management for Bridge and Xbridge */
-    pcibr_initial_rrb(pcibr_vhdl, 
-                      pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
-    
-{  /* Before any drivers get called that may want to re-allocate
-    * RRB's, let's get some special cases pre-allocated. Drivers
-    * may override these pre-allocations, but by doing pre-allocations
-    * now we're assured not to step all over what the driver intended.
-    *
-    * Note: Someday this should probably be moved over to pcibr_rrb.c
-    */
-    /*
-     * Each Pbrick PCI bus only has slots 1 and 2.   Similarly for
-     * widget 0xe on Ibricks.  Allocate RRB's accordingly.
-     */
-    if (pcibr_soft->bs_bricktype > 0) {
-	switch (pcibr_soft->bs_bricktype) {
-	case MODULE_PBRICK:
-		do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
-		do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
-		break;
-	case MODULE_IBRICK:
-	  	/* port 0xe on the Ibrick only has slots 1 and 2 */
-		if (pcibr_soft->bs_xid == 0xe) {
-			do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
-			do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
-		}
-		else {
-		    	/* allocate one RRB for the serial port */
-			do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
-		}
-		break;
-	case MODULE_PXBRICK:
-	case MODULE_IXBRICK:
-		/* 
-		 * If the IO9 is in the PXBrick (bus1, slot1) allocate
-                 * RRBs to all the devices
-		 */
-		if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
-		    (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
-		    (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
-			do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
-			do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
-			do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
-			do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
-		} else {
-			do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
-			do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
-		}
-		break;
-	} /* switch */
-    }
-
-#ifdef LATER
-    if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
-	do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
-    }
-#endif
-}  /* OK Special RRB allocations are done. */
-
-    for (slot = pcibr_soft->bs_min_slot; 
-				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
-	/* Call the device attach */
-	(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
-
-    pciio_device_attach(noslot_conn, (int)0);
-
-    return 0;
 }
 
 /*
@@ -1859,15 +715,13 @@
  *	Detach the bridge device from the hwgraph after cleaning out all the 
  *	underlying vertices.
  */
-
 int
 pcibr_detach(vertex_hdl_t xconn)
 {
-    pciio_slot_t	slot;
-    vertex_hdl_t	pcibr_vhdl;
-    pcibr_soft_t	pcibr_soft;
-    bridge_t		*bridge;
-    unsigned             s;
+    pciio_slot_t	 slot;
+    vertex_hdl_t	 pcibr_vhdl;
+    pcibr_soft_t	 pcibr_soft;
+    unsigned long        s;
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
 
@@ -1876,16 +730,10 @@
 	return(1);
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-    bridge = pcibr_soft->bs_base;
 
-
-    s = pcibr_lock(pcibr_soft);
     /* Disable the interrupts from the bridge */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	bridge->p_int_enable_64 = 0;
-    } else {
-	bridge->b_int_enable = 0;
-    }
+    s = pcibr_lock(pcibr_soft);
+    pcireg_intr_enable_set(pcibr_soft, 0);
     pcibr_unlock(pcibr_soft, s);
 
     /* Detach all the PCI devices talking to this bridge */
@@ -1898,7 +746,6 @@
     pciio_device_info_unregister(pcibr_vhdl,
 				 &(pcibr_soft->bs_noslot_info->f_c));
 
-    spin_lock_destroy(&pcibr_soft->bs_lock);
     kfree(pcibr_soft->bs_name);
     
     /* Disconnect the error interrupt and free the xtalk resources 
@@ -1911,28 +758,73 @@
      * bridge.
      */
     DEL(pcibr_soft);
+
     /* Remove the Bridge revision labelled info */
     (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
+
     /* Remove the character device associated with this bridge */
-    (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
+    hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
+
     /* Remove the PCI bridge vertex */
-    (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
+    hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
 
     return(0);
 }
 
+
+/*
+ * Set the Bridge's 32-bit PCI to XTalk Direct Map register to the most useful
+ * value we can determine.  Note that we must use a single xid for all of:
+ * 	-direct-mapped 32-bit DMA accesses
+ *	-direct-mapped 64-bit DMA accesses
+ * 	-DMA accesses through the PMU
+ *	-interrupts
+ * This is the only way to guarantee that completion interrupts will reach a
+ * CPU after all DMA data has reached memory.
+ */
+void
+pcibr_directmap_init(pcibr_soft_t pcibr_soft)
+{
+    paddr_t		paddr;
+    iopaddr_t		xbase;
+    uint64_t		diroff;
+    cnodeid_t		cnodeid = 0;	/* We need api for diroff api */
+    nasid_t		nasid;
+
+    nasid = COMPACT_TO_NASID_NODEID(cnodeid);
+    paddr = NODE_OFFSET(nasid) + 0;
+
+    /* Assume that if we ask for a DMA mapping to zero the XIO host will
+     * transmute this into a request for the lowest hunk of memory.
+     */
+    xbase = xtalk_dmatrans_addr(pcibr_soft->bs_conn, 0, paddr, PAGE_SIZE, 0);
+
+    diroff = xbase >> PCIBR_DIRMAP_OFF_ADDRSHFT;
+    pcireg_dirmap_diroff_set(pcibr_soft, diroff);
+    pcireg_dirmap_wid_set(pcibr_soft, pcibr_soft->bs_mxid);
+    pcibr_soft->bs_dir_xport = pcibr_soft->bs_mxid;
+    if (xbase  == (512 << 20)) { /* 512Meg */
+	pcireg_dirmap_add512_set(pcibr_soft);
+	pcibr_soft->bs_dir_xbase = (512 << 20);
+    } else {
+	pcireg_dirmap_add512_clr(pcibr_soft);
+	pcibr_soft->bs_dir_xbase = diroff << PCIBR_DIRMAP_OFF_ADDRSHFT;
+    }
+}
+
+
 int
 pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
 {
-    vertex_hdl_t          pcibr_vhdl;
-    int                     tmp_vhdl;
+    vertex_hdl_t            pcibr_vhdl;
+    int			    rc;
     arbitrary_info_t        ainfo;
 
     if (GRAPH_SUCCESS !=
 	hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
 	return -1;
 
-    tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
+    rc = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
 
     /*
      * Any hwgraph function that returns a vertex handle will implicity
@@ -1945,20 +837,10 @@
      */
     hwgraph_vertex_unref(pcibr_vhdl);
 
-    if (tmp_vhdl != GRAPH_SUCCESS) 
+    if (rc != GRAPH_SUCCESS) 
 	return -1;
-    return (int) ainfo;
-}
 
-int
-pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
-{
-    pciio_info_t  pciio_info = pciio_info_get(pconn_vhdl);
-    pcibr_soft_t  pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    pciio_slot_t  slot;
-    slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
-    pcibr_device_write_gather_flush(pcibr_soft, slot);
-    return 0;
+    return (int) ainfo;
 }
 
 /* =====================================================================
@@ -1974,14 +856,12 @@
 		      unsigned flags)
 {
     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
-    pciio_info_t            pciio_info = &pcibr_info->f_c;
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    bridge_t               *bridge = pcibr_soft->bs_base;
-
     unsigned                bar;	/* which BASE reg on device is decoding */
     iopaddr_t               xio_addr = XIO_NOWHERE;
-    iopaddr_t               base;	/* base of devio(x) mapped area on PCI */
-    iopaddr_t               limit;	/* base of devio(x) mapped area on PCI */
+    iopaddr_t               base = 0;
+    iopaddr_t               limit = 0;
 
     pciio_space_t           wspace;	/* which space device is decoding */
     iopaddr_t               wbase;	/* base of device decode on PCI */
@@ -1996,7 +876,7 @@
     size_t                  mmask;	/* addr bits stored in Device(x) */
     char		    tmp_str[512];
 
-    unsigned long           s;
+    unsigned long	    s;
 
     s = pcibr_lock(pcibr_soft);
 
@@ -2005,8 +885,8 @@
 	pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
 
 	/*
-	 * Special case for dual-slot pci devices such as ioc3 on IP27
-	 * baseio.  In these cases, pconn_vhdl should never be for a pci
+	 * Special case for dual-slot pci devices.
+	 * In these cases, pconn_vhdl should never be for a pci
 	 * function on a subordiate PCI bus, so we can safely reset pciio_info
 	 * to be the info struct embedded in pcibr_info.  Failure to do this
 	 * results in using a bogus pciio_info_t for calculations done later
@@ -2020,6 +900,10 @@
 
     if (space == PCIIO_SPACE_CFG) {
 	/*
+	 * ###maule:  Don't allow pio maps to type-1 config space
+	 */
+
+	/*
 	 * Usually, the first mapping
 	 * established to a PCI device
 	 * is to its config space.
@@ -2032,8 +916,8 @@
 	 * this access ...
 	 */
 	if (((flags & PCIIO_BYTE_STREAM) == 0) &&
-	    ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
-	    xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
+	    ((pci_addr + req_size) <= PCIBR_CONFIG_FUNC_SIZE))
+	    xio_addr = pci_addr + PCIBR_BUS_TYPE0_CFG_DEV(pcibr_soft, slot);
 
 	goto done;
     }
@@ -2103,7 +987,7 @@
     maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
     halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
     for (try = 0; try < maxtry; ++try) {
-	bridgereg_t             devreg;
+	uint64_t		devreg;
 	unsigned                offset;
 
 	/* calculate win based on slot, attempt, and max possible
@@ -2154,45 +1038,30 @@
 	    if ((mbase | offset) != pci_addr)
 		continue;
 
-	    devreg &= ~BRIDGE_DEV_OFF_MASK;
+	    devreg &= ~PCIBR_DEV_OFF_MASK;
 	    if (space != PCIIO_SPACE_IO)
-		devreg |= BRIDGE_DEV_DEV_IO_MEM;
+		devreg |= PCIBR_DEV_DEV_IO_MEM;
 	    else
-		devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
-	    devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
+		devreg &= ~PCIBR_DEV_DEV_IO_MEM;
+	    devreg |= (mbase >> 20) & PCIBR_DEV_OFF_MASK;
 
 	    /* default is WORD_VALUES.
 	     * if you specify both,
 	     * operation is undefined.
 	     */
 	    if (flags & PCIIO_BYTE_STREAM)
-		devreg |= BRIDGE_DEV_DEV_SWAP;
+		devreg |= PCIBR_DEV_DEV_SWAP;
 	    else
-		devreg &= ~BRIDGE_DEV_DEV_SWAP;
+		devreg &= ~PCIBR_DEV_DEV_SWAP;
 
 	    if (pcibr_soft->bs_slot[win].bss_device != devreg) {
-		if ( IS_PIC_SOFT(pcibr_soft) ) {
-			bridge->b_device[win].reg = devreg;
-			pcibr_soft->bs_slot[win].bss_device = devreg;
-			bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
-		}
-		else {
-			if (io_get_sh_swapper(NASID_GET(bridge))) {
-				BRIDGE_REG_SET32((&bridge->b_device[win].reg)) = __swab32(devreg);
-				pcibr_soft->bs_slot[win].bss_device = devreg;
-				BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
-			} else {
-				bridge->b_device[win].reg = devreg;
-				pcibr_soft->bs_slot[win].bss_device = devreg;
-				bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
-			}
-		}
+		pcireg_device_set(pcibr_soft, win, devreg);
+		pcibr_soft->bs_slot[win].bss_device = devreg;
+		pcireg_tflush_get(pcibr_soft);	
 
-#ifdef PCI_LATER
 		PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl, 
-			    "pcibr_addr_pci_to_xio: Device(%d): %x\n",
-			    win, devreg, device_bits));
-#endif
+			    "pcibr_addr_pci_to_xio: Device(%d): 0x%x\n",
+			    win, devreg));
 	    }
 	    pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
 	    pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
@@ -2207,14 +1076,15 @@
 
 	    /*
 	     * The kernel only allows functions to have so many variable args,
-	     * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printk
+	     * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printf
 	     * arguments fails so sprintf() it into a temporary string.
 	     */
 	    if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
-	        sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
-		        "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
-		        (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
-		        (unsigned int)slot, win, win, (unsigned long)devreg);
+	        sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to space %s "
+			"[0x%lx..0x%lx] for slot %d allocates DevIO(%d) "
+		        "Device(%d) set to %lx\n",
+		        pci_space[space], pci_addr, pci_addr + req_size - 1,
+		        slot, win, win, devreg);
 	        PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
 	    }
 	    goto done;
@@ -2226,8 +1096,8 @@
 	if ((mspace != space) ||
 	    (pci_addr < mbase) ||
 	    ((pci_addr + req_size) > (mbase + msize)) ||
-	    ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
-	    (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
+	    ((flags & PCIIO_BYTE_STREAM) && !(devreg & PCIBR_DEV_DEV_SWAP)) ||
+	    (!(flags & PCIIO_BYTE_STREAM) && (devreg & PCIBR_DEV_DEV_SWAP)))
 	    continue;
 
 	/* DevIO(x) window is pointed at PCI space
@@ -2241,10 +1111,15 @@
         pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
 
         /* Save the DevIO register index used to access this BAR */
+	/* ###maule:  what to do here if this is a downstream device */
         if (bar != -1)
             pcibr_info->f_window[bar].w_devio_index = win;
 
 	if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
+	    sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to space %s "
+		    "[0x%lx..0x%lx] for slot %d uses DevIO(%d)\n",
+		    pci_space[space], pci_addr, pci_addr + req_size - 1,
+		    slot, win);
 	    PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
 	}
 	goto done;
@@ -2252,16 +1127,12 @@
 
     switch (space) {
 	/*
-	 * Accesses to device decode
-	 * areas that do a not fit
-	 * within the DevIO(x) space are
-	 * modified to be accesses via
-	 * the direct mapping areas.
+	 * Accesses to device decode areas that do a not fit within the
+	 * DevIO(x) space are modified to be accesses via the direct
+	 * mapping areas.
 	 *
-	 * If necessary, drivers can
-	 * explicitly ask for mappings
-	 * into these address spaces,
-	 * but this should never be needed.
+	 * If necessary, drivers can explicitly ask for mappings into 
+	 * these address spaces, but this should never be needed
 	 */
     case PCIIO_SPACE_MEM:		/* "mem space" */
     case PCIIO_SPACE_MEM32:		/* "mem, use 32-bit-wide bus" */
@@ -2271,9 +1142,11 @@
 	} else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) {	/* PIC bus 1 */
 		base = PICBRIDGE1_PCI_MEM32_BASE;
 		limit = PICBRIDGE1_PCI_MEM32_LIMIT;
-	} else {					/* Bridge/Xbridge */
-		base = BRIDGE_PCI_MEM32_BASE;
-		limit = BRIDGE_PCI_MEM32_LIMIT;
+	} else if (IS_TIOCP_SOFT(pcibr_soft)) {		/* TIOCP */
+		base = TIOCP_BRIDGE_PCI_MEM32_BASE;
+		limit = TIOCP_BRIDGE_PCI_MEM32_LIMIT;
+	} else {
+		panic("pcibr_addr_pci_to_xio(): unknown bridge type");
 	}
 
 	if ((pci_addr + base + req_size - 1) <= limit)
@@ -2287,9 +1160,11 @@
 	} else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) {	/* PIC bus 1 */
 		base = PICBRIDGE1_PCI_MEM64_BASE;
 		limit = PICBRIDGE1_PCI_MEM64_LIMIT;
-	} else {					/* Bridge/Xbridge */
-		base = BRIDGE_PCI_MEM64_BASE;
-		limit = BRIDGE_PCI_MEM64_LIMIT;
+	} else if (IS_TIOCP_SOFT(pcibr_soft)) {		/* TIOCP */
+		base = TIOCP_BRIDGE_PCI_MEM64_BASE;
+		limit = TIOCP_BRIDGE_PCI_MEM64_LIMIT;
+	} else {
+		panic("pcibr_addr_pci_to_xio(): unknown bridge type");
 	}
 
 	if ((pci_addr + base + req_size - 1) <= limit)
@@ -2298,22 +1173,10 @@
 
     case PCIIO_SPACE_IO:		/* "i/o space" */
 	/*
-	 * PIC bridges do not support big-window aliases into PCI I/O space
+	 * Only BRIDGE & XBRIDGE support big-window aliases into PCI I/O 
+	 * space.  Return XIO_NOWHERE for every other ASIC (PIC, TIOCP).
 	 */
-	if (IS_PIC_SOFT(pcibr_soft)) {
-		xio_addr = XIO_NOWHERE;
-		break;
-	}
-
-	/* Bridge Hardware Bug WAR #482741:
-	 * The 4G area that maps directly from
-	 * XIO space to PCI I/O space is busted
-	 * until Bridge Rev D.
-	 */
-	if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
-	    ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
-	     BRIDGE_PCI_IO_LIMIT))
-	    xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
+	xio_addr = XIO_NOWHERE;
 	break;
     }
 
@@ -2323,7 +1186,7 @@
     if (xio_addr != XIO_NOWHERE) {
 	unsigned                bst;	/* nonzero to set bytestream */
 	unsigned               *bfp;	/* addr of record of how swapper is set */
-	unsigned                swb;	/* which control bit to mung */
+	uint64_t		swb;	/* which control bit to mung */
 	unsigned                bfo;	/* current swapper setting */
 	unsigned                bfn;	/* desired swapper setting */
 
@@ -2341,38 +1204,36 @@
 	    ;
 	} else if (bfo != 0) {		/* we have a conflict. */
 	    if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
+	        sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %s, "
+		        "was%s%s, want%s%s\n", pci_space[space],
+		        bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		        bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
+		        bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		        bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
 	        PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
 	    }
 	    xio_addr = XIO_NOWHERE;
 	} else {			/* OK to make the change. */
-	    swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
-	    if ( IS_PIC_SOFT(pcibr_soft) ) {
-	    	picreg_t             octl, nctl;
-		octl = bridge->p_wid_control_64;
-		nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
 
-		if (octl != nctl)		/* make the change if any */
-			bridge->b_wid_control = nctl;
+#if 0	/*XXX habeck: BRIDGE_CTRL_IO_SWAP is not valid on PIC or TIO... Fix this */
+	    swb = (space == PCIIO_SPACE_IO) ? 
+				BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
+#else
+	    swb = (space == PCIIO_SPACE_IO) ? 0: PCIBR_CTRL_MEM_SWAP;
+#endif
+	    if (bst) {
+		pcireg_control_bit_set(pcibr_soft, swb);
+	    } else {
+		pcireg_control_bit_clr(pcibr_soft, swb);
 	    }
-	    else {
-	    	picreg_t             octl, nctl;
-		if (io_get_sh_swapper(NASID_GET(bridge))) {
-			octl = BRIDGE_REG_GET32((&bridge->b_wid_control));
-			nctl = bst ? octl | swb : octl & ~swb;
-
-			if (octl != nctl)           /* make the change if any */
-				BRIDGE_REG_SET32((&bridge->b_wid_control)) = __swab32(nctl);
-		} else {
-			octl = bridge->b_wid_control;
-			nctl = bst ? octl | swb : octl & ~swb;
 
-			if (octl != nctl)           /* make the change if any */
-				bridge->b_wid_control = nctl;
-		}
-	    }
 	    *bfp = bfn;			/* record the assignment */
 
 	    if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
+	        sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %s set "
+			"to%s%s\n", pci_space[space],
+		        bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		        bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
 	        PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
 	    }
 	}
@@ -2403,7 +1264,7 @@
     pcibr_piomap_t          pcibr_piomap;
     iopaddr_t               xio_addr;
     xtalk_piomap_t          xtalk_piomap;
-    unsigned long           s;
+    unsigned long	    s;
 
     /* Make sure that the req sizes are non-zero */
     if ((req_size < 1) || (req_size_max < 1)) {
@@ -2453,7 +1314,7 @@
     pcibr_piomap->bp_pciaddr = pci_addr;
     pcibr_piomap->bp_mapsz = req_size;
     pcibr_piomap->bp_soft = pcibr_soft;
-    pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
+    pcibr_piomap->bp_toc = ATOMIC_INIT(0);
 
     if (mapptr) {
 	s = pcibr_lock(pcibr_soft);
@@ -2480,7 +1341,7 @@
     }
     
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
-		"pcibr_piomap_alloc: map=0x%x\n", pcibr_piomap));
+		"pcibr_piomap_alloc: map=0x%lx\n", pcibr_piomap));
 
     return pcibr_piomap;
 }
@@ -2490,7 +1351,7 @@
 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
 {
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
-		"pcibr_piomap_free: map=0x%x\n", pcibr_piomap));
+		"pcibr_piomap_free: map=0x%lx\n", pcibr_piomap));
 
     xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
     pcibr_piomap->bp_xtalk_pio = 0;
@@ -2509,7 +1370,7 @@
 			     pci_addr - pcibr_piomap->bp_pciaddr,
 			     req_size);
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
-                "pcibr_piomap_free: map=0x%x, addr=0x%x\n", 
+                "pcibr_piomap_addr: map=0x%lx, addr=0x%lx\n", 
 		pcibr_piomap, addr));
 
     return(addr);
@@ -2520,7 +1381,7 @@
 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
 {
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
-		"pcibr_piomap_done: map=0x%x\n", pcibr_piomap));
+		"pcibr_piomap_done: map=0x%lx\n", pcibr_piomap));
     xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
 }
 
@@ -2533,7 +1394,7 @@
 		    size_t req_size,
 		    unsigned flags)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
@@ -2551,7 +1412,7 @@
 
     addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
     PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
-		"pcibr_piotrans_addr: xio_addr=0x%x, addr=0x%x\n",
+		"pcibr_piotrans_addr: xio_addr=0x%lx, addr=0x%lx\n",
 		xio_addr, addr));
     return(addr);
 }
@@ -2578,7 +1439,7 @@
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
 
     pciio_piospace_t        piosp;
-    unsigned long           s;
+    unsigned long	    s;
 
     iopaddr_t               start_addr;
     size_t                  align_mask;
@@ -2586,7 +1447,7 @@
     /*
      * Check for proper alignment
      */
-    ASSERT(alignment >= NBPP);
+    ASSERT(alignment >= PAGE_SIZE);
     ASSERT((alignment & (alignment - 1)) == 0);
 
     align_mask = alignment - 1;
@@ -2595,7 +1456,8 @@
     /*
      * First look if a previously allocated chunk exists.
      */
-    if ((piosp = pcibr_info->f_piospace)) {
+    piosp = pcibr_info->f_piospace;
+    if (piosp) {
 	/*
 	 * Look through the list for a right sized free chunk.
 	 */
@@ -2647,7 +1509,7 @@
     if (!start_addr) {
 	pcibr_unlock(pcibr_soft, s);
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
-		    "pcibr_piospace_alloc: request 0x%x to big\n", req_size));
+		    "pcibr_piospace_alloc: request 0x%lx to big\n", req_size));
 	return 0;
     }
 
@@ -2662,11 +1524,12 @@
     pcibr_unlock(pcibr_soft, s);
 
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
-		"pcibr_piospace_alloc: piosp=0x%x\n", piosp));
+		"pcibr_piospace_alloc: piosp=0x%lx\n", piosp));
 
     return start_addr;
 }
 
+#define ERR_MSG "!Device %s freeing size (0x%lx) different than allocated (0x%lx)"
 /*ARGSUSED */
 void
 pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
@@ -2675,8 +1538,9 @@
 		    size_t req_size)
 {
     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
     pciio_piospace_t        piosp;
-    unsigned long           s;
+    unsigned long	    s;
     char                    name[1024];
 
     /*
@@ -2698,18 +1562,18 @@
 	     * Print a message and break;
 	     */
 	    hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
-	    printk(KERN_WARNING  "pcibr_piospace_free: error");
-	    printk(KERN_WARNING  "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
-					name, req_size, piosp->count);
-	    printk(KERN_WARNING  "Freeing 0x%lx instead", piosp->count);
+	    KERN_MSG(K_WARN, "!pcibr_piospace_free: error");
+	    KERN_MSG(K_WARN, ERR_MSG,
+		    name, req_size, piosp->count);
+	    KERN_MSG(K_WARN, "!Freeing 0x%lx instead", piosp->count);
 	    break;
 	}
 	piosp = piosp->next;
     }
 
     if (!piosp) {
-	printk(KERN_WARNING  
-		"pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
+	KERN_MSG(K_WARN,
+		"!pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
 		pciaddr, req_size);
 	pcibr_unlock(pcibr_soft, s);
 	return;
@@ -2718,7 +1582,7 @@
     pcibr_unlock(pcibr_soft, s);
 
     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
-		"pcibr_piospace_free: piosp=0x%x\n", piosp));
+		"pcibr_piospace_free: piosp=0x%lx\n", piosp));
     return;
 }
 
@@ -2755,9 +1619,7 @@
     iopaddr_t               attributes = 0;
 
     /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
-#ifdef LATER
     ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
-#endif
 
     /* Generic macro flags
      */
@@ -2776,13 +1638,11 @@
     if (flags & PCIIO_NOPREFETCH)
 	attributes &= ~PCI64_ATTR_PREF;
 
-    /* the swap bit is in the address attributes for xbridge */
-    if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-    	if (flags & PCIIO_BYTE_STREAM)
-        	attributes |= PCI64_ATTR_SWAP;
-    	if (flags & PCIIO_WORD_VALUES)
-        	attributes &= ~PCI64_ATTR_SWAP;
-    }
+    /* the swap bit is in the address attributes */
+    if (flags & PCIIO_BYTE_STREAM)
+	attributes |= PCI64_ATTR_SWAP;
+    if (flags & PCIIO_WORD_VALUES)
+	attributes &= ~PCI64_ATTR_SWAP;
 
     /* Provider-specific flags
      */
@@ -2806,7 +1666,7 @@
     if (flags & PCIBR_VCHAN0)
 	attributes &= ~PCI64_ATTR_VIRTUAL;
 
-    /* PIC in PCI-X mode only supports barrier & swap */
+    /* Bus in PCI-X mode only supports barrier & swap */
     if (IS_PCIX(pcibr_soft)) {
 	attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
     }
@@ -2821,7 +1681,7 @@
 		   size_t req_size_max,
 		   unsigned flags)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
     pciio_slot_t            slot;
@@ -2832,6 +1692,7 @@
     int                     ate_count;
     int                     ate_index;
     int			    vchan = VCHAN0;
+    unsigned long	    s;
 
     /* merge in forced flags */
     flags |= pcibr_soft->bs_dma_flags;
@@ -2840,7 +1701,9 @@
      * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
      * can be called within an interrupt thread.
      */
+    s = pcibr_lock(pcibr_soft);
     pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
+    pcibr_unlock(pcibr_soft, s);
 
     if (!pcibr_dmamap)
 	return 0;
@@ -2894,25 +1757,36 @@
 		    else
 			min_rrbs = 1;
 		    if (have_rrbs < min_rrbs)
-			do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
+			pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
 					       min_rrbs - have_rrbs);
 		}
 	    }
 	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		 	"pcibr_dmamap_alloc: using direct64, map=0x%x\n",
+		 	"pcibr_dmamap_alloc: using direct64, map=0x%lx\n",
 			pcibr_dmamap));
 	    return pcibr_dmamap;
 	}
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
 		    "pcibr_dmamap_alloc: unable to use direct64\n"));
 
-	/* PIC only supports 64-bit direct mapping in PCI-X mode. */
-	if (IS_PCIX(pcibr_soft)) {
+	/* PIC in PCI-X mode only supports 64-bit direct mapping so
+	 * don't fall thru and try 32-bit direct mapping or 32-bit
+	 * page mapping
+	 */
+	if (IS_PIC_SOFT(pcibr_soft) && IS_PCIX(pcibr_soft)) {
 	    DEL(pcibr_dmamap);
 	    return 0;
 	}
 
-	flags &= ~PCIIO_DMA_A64;
+	/* TIOCP in PCI-X mode supports 32-bit page mapping, but
+	 * not 32-bit direct mapping.  So fall thru and attempt
+	 * 32-bit page mapping.
+	 */
+	if (IS_TIOCP_SOFT(pcibr_soft) && IS_PCIX(pcibr_soft)) {
+	    flags &= ~(PCIIO_DMA_A64 | PCIIO_FIXED);
+	} else {
+	    flags &= ~PCIIO_DMA_A64;
+	}
     }
     if (flags & PCIIO_FIXED) {
 	/* warning: mappings may fail later,
@@ -2927,7 +1801,7 @@
 	     * is outside the direct32 range.
 	     */
 	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
-			"pcibr_dmamap_alloc: using direct32, map=0x%x\n", 
+			"pcibr_dmamap_alloc: using direct32, map=0x%lx\n", 
 			pcibr_dmamap));
 	    pcibr_dmamap->bd_flags = flags;
 	    pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
@@ -2971,29 +1845,25 @@
 
 	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
 			"pcibr_dmamap_alloc: using PMU, ate_index=%d, "
-			"pcibr_dmamap=0x%x\n", ate_index, pcibr_dmamap));
+			"pcibr_dmamap=0x%lx\n", ate_index, pcibr_dmamap));
 
-	    ate_proto = pcibr_flags_to_ate(flags);
+	    ate_proto = pcibr_flags_to_ate(pcibr_soft, flags);
 
 	    pcibr_dmamap->bd_flags = flags;
 	    pcibr_dmamap->bd_pci_addr =
 		PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
+
+	    if (flags & PCIIO_BYTE_STREAM)
+		ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
 	    /*
-	     * for xbridge the byte-swap bit == bit 29 of PCI address
+	     * If swap was set in bss_device in pcibr_endian_set()
+	     * we need to change the address bit.
 	     */
-	    if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-		    if (flags & PCIIO_BYTE_STREAM)
-			    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
-		    /*
-		     * If swap was set in bss_device in pcibr_endian_set()
-		     * we need to change the address bit.
-		     */
-		    if (pcibr_soft->bs_slot[slot].bss_device & 
-							BRIDGE_DEV_SWAP_PMU)
-			    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
-		    if (flags & PCIIO_WORD_VALUES)
-			    ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
-	    }
+	    if (pcibr_soft->bs_slot[slot].bss_device & PCIBR_DEV_SWAP_DIR)
+		ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
+	    if (flags & PCIIO_WORD_VALUES)
+		ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
+
 	    pcibr_dmamap->bd_xio_addr = 0;
 	    pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
 	    pcibr_dmamap->bd_ate_index = ate_index;
@@ -3005,41 +1875,16 @@
 	    if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
 		have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
 		if (have_rrbs < 2) {
-		    if (ate_proto & ATE_PREF)
+		    if (ate_proto & PCIBR_ATE_PREF)
 			min_rrbs = 2;
 		    else
 			min_rrbs = 1;
 		    if (have_rrbs < min_rrbs)
-			do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
+			pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
 					       min_rrbs - have_rrbs);
 		}
 	    }
-	    if (ate_index >= pcibr_soft->bs_int_ate_size && 
-				!IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-		bridge_t               *bridge = pcibr_soft->bs_base;
-		volatile unsigned      *cmd_regp;
-		unsigned                cmd_reg;
-		unsigned long           s;
-
-		pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
-
-		s = pcibr_lock(pcibr_soft);
-		cmd_regp = pcibr_slot_config_addr(bridge, slot, 
-						PCI_CFG_COMMAND/4);
-		if ( IS_PIC_SOFT(pcibr_soft) ) {
-			cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
-		}
-		else {
-			if (io_get_sh_swapper(NASID_GET(bridge))) {
-				BRIDGE_REG_SET32((&cmd_reg)) = __swab32(*cmd_regp);
-			} else {
-				cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
-			}
-		}
-		pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
-		pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
-		pcibr_unlock(pcibr_soft, s);
-	    }
+
 	    return pcibr_dmamap;
 	}
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
@@ -3066,16 +1911,6 @@
     pciio_slot_t            slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
 							pcibr_dmamap->bd_slot);
 
-    unsigned                flags = pcibr_dmamap->bd_flags;
-
-    /* Make sure that bss_ext_ates_active
-     * is properly kept up to date.
-     */
-
-    if (PCIBR_DMAMAP_BUSY & flags)
-	if (PCIBR_DMAMAP_SSRAM & flags)
-	    atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
-
     xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
 
     if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
@@ -3089,7 +1924,7 @@
     }
 
     PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
-		"pcibr_dmamap_free: pcibr_dmamap=0x%x\n", pcibr_dmamap));
+		"pcibr_dmamap_free: pcibr_dmamap=0x%lx\n", pcibr_dmamap));
 
     free_pciio_dmamap(pcibr_dmamap);
 }
@@ -3131,28 +1966,31 @@
 	    pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
 	    return pci_addr;
     	}
+    } else if (IS_TIOCP_SOFT(soft)) {
+	if ((xio_addr >= TIOCP_BRIDGE_PCI_MEM32_BASE) &&
+	    (xio_lim <= TIOCP_BRIDGE_PCI_MEM32_LIMIT)) {
+	    pci_addr = xio_addr - TIOCP_BRIDGE_PCI_MEM32_BASE;
+	    return pci_addr;
+	}
+	if ((xio_addr >= TIOCP_BRIDGE_PCI_MEM64_BASE) &&
+	    (xio_lim <= TIOCP_BRIDGE_PCI_MEM64_LIMIT)) {
+	    pci_addr = xio_addr - TIOCP_BRIDGE_PCI_MEM64_BASE;
+	    return pci_addr;
+    	}
     } else {
-    if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
-	(xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
-	pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
-	return pci_addr;
-    }
-    if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
-	(xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
-	pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
-	return pci_addr;
-    }
+	panic("pcibr_addr_xio_to_pci(): unknown bridge type");
     }
+
     for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
 	if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
 	    (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
-	    bridgereg_t             dev;
+	    uint64_t		dev;
 
 	    dev = soft->bs_slot[slot].bss_device;
-	    pci_addr = dev & BRIDGE_DEV_OFF_MASK;
-	    pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
+	    pci_addr = dev & PCIBR_DEV_OFF_MASK;
+	    pci_addr <<= PCIBR_DEV_OFF_ADDR_SHFT;
 	    pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
-	    return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
+	    return (dev & PCIBR_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
 	}
     return 0;
 }
@@ -3215,8 +2053,8 @@
 
 	PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 
 		    pcibr_dmamap->bd_dev,
-		    "pcibr_dmamap_addr: (direct64): wanted paddr [0x%x..0x%x] "
-		    "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
+		    "pcibr_dmamap_addr: (direct64): wanted paddr [0x%lx..0x%lx] "
+		    "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",
 		    paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
 
     } else if (flags & PCIIO_FIXED) {
@@ -3232,7 +2070,7 @@
 	else if (xio_addr < pcibr_dmamap->bd_xio_addr)
 	    pci_addr = 0;		/* out of range */
 	else if ((xio_addr + req_size) >
-		 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
+		 (pcibr_dmamap->bd_xio_addr + PCIBR_DMA_DIRECT_SIZE))
 	    pci_addr = 0;		/* out of range */
 	else
 	    pci_addr = pcibr_dmamap->bd_pci_addr +
@@ -3240,38 +2078,23 @@
 
 	PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 
 		    pcibr_dmamap->bd_dev,
-		    "pcibr_dmamap_addr (direct32): wanted paddr [0x%x..0x%x] "
-		    "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
+		    "pcibr_dmamap_addr (direct32): wanted paddr [0x%lx..0x%lx] "
+		    "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",
 		    paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
 
     } else {
-	bridge_t               *bridge = pcibr_soft->bs_base;
 	iopaddr_t               offset = IOPGOFF(xio_addr);
 	bridge_ate_t            ate_proto = pcibr_dmamap->bd_ate_proto;
 	int                     ate_count = IOPG(offset + req_size - 1) + 1;
-
 	int                     ate_index = pcibr_dmamap->bd_ate_index;
-	unsigned                cmd_regs[8];
-	unsigned                s;
-
-#if PCIBR_FREEZE_TIME
-	int                     ate_total = ate_count;
-	unsigned                freeze_time;
-#endif
-	bridge_ate_p            ate_ptr = pcibr_dmamap->bd_ate_ptr;
 	bridge_ate_t            ate;
 
-	/* Bridge Hardware WAR #482836:
-	 * If the transfer is not cache aligned
-	 * and the Bridge Rev is <= B, force
-	 * prefetch to be off.
-	 */
-	if (flags & PCIBR_NOPREFETCH)
-	    ate_proto &= ~ATE_PREF;
+	ate = ate_proto | (xio_addr - offset);
 
-	ate = ate_proto
-	    | (xio_port << ATE_TIDSHIFT)
-	    | (xio_addr - offset);
+	/* If PIC, put the targetid in the ATE */
+	if (IS_PIC_SOFT(pcibr_soft)) {
+	    ate |= (xio_port << PIC_ATE_TARGETID_SHFT);
+	}
 
 	pci_addr = pcibr_dmamap->bd_pci_addr + offset;
 
@@ -3282,22 +2105,11 @@
 
 	ASSERT(ate_count > 0);
 	if (ate_count <= pcibr_dmamap->bd_ate_count) {
-		ATE_FREEZE();
-		ATE_WRITE();
-		ATE_THAW();
-		if ( IS_PIC_SOFT(pcibr_soft) ) {
-			bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
-		}
-		else {
-			if (io_get_sh_swapper(NASID_GET(bridge))) {
-				BRIDGE_REG_GET32((&bridge->b_wid_tflush));
-			} else {
-				bridge->b_wid_tflush;
-			}
-		}
+		ate_write(pcibr_soft, pcibr_dmamap, ate_index, ate_count, ate);
+
 		PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
 			    "pcibr_dmamap_addr (PMU) : wanted paddr "
-			    "[0x%x..0x%x] returning PCI 0x%x\n", 
+			    "[0x%lx..0x%lx] returning PCI 0x%lx\n", 
 			    paddr, paddr + req_size - 1, pci_addr));
 
 	} else {
@@ -3311,7 +2123,7 @@
 		 */
 		PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev, 
 		            "pcibr_dmamap_addr (PMU) : wanted paddr "
-			    "[0x%x..0x%x] ate_count 0x%x bd_ate_count 0x%x "
+			    "[0x%lx..0x%lx] ate_count 0x%x bd_ate_count 0x%x "
 			    "ATE's required > number allocated\n",
 			     paddr, paddr + req_size - 1,
 			     ate_count, pcibr_dmamap->bd_ate_count));
@@ -3326,23 +2138,10 @@
 void
 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
 {
-    /*
-     * We could go through and invalidate ATEs here;
-     * for performance reasons, we don't.
-     * We also don't enforce the strict alternation
-     * between _addr/_list and _done, but Hub does.
-     */
-
-    if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
-	pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
-
-	if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
-	    atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
-    }
     xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
 
     PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
-		"pcibr_dmamap_done: pcibr_dmamap=0x%x\n", pcibr_dmamap));
+		"pcibr_dmamap_done: pcibr_dmamap=0x%lx\n", pcibr_dmamap));
 }
 
 
@@ -3358,7 +2157,7 @@
 pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
 {
 
-	pciio_info_t	pciio_info = pciio_info_get(pconn_vhdl);
+	pciio_info_t	pciio_info = pciio_hostinfo_get(pconn_vhdl);
 	pcibr_soft_t	pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
 
 	return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
@@ -3372,7 +2171,7 @@
 		    size_t req_size,
 		    unsigned flags)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
@@ -3391,10 +2190,11 @@
 
     xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
 				   flags & DMAMAP_FLAGS);
+
     if (!xio_addr) {
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		    "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
-		    "xtalk_dmatrans_addr failed with 0x%x\n",
+		    "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
+		    "xtalk_dmatrans_addr failed with 0x%lx\n",
 		    paddr, paddr + req_size - 1, xio_addr));
 	return 0;
     }
@@ -3404,7 +2204,7 @@
     if (XIO_PACKED(xio_addr)) {
 	if (xio_addr == XIO_NOWHERE) {
 	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		        "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
+		        "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
 		        "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
 		        paddr, paddr + req_size - 1));
 	    return 0;
@@ -3425,8 +2225,8 @@
     if (xio_port == pcibr_soft->bs_xid) {
 	pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
         PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		    "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
-		    "xio_port=0x%x, pci_addr=0x%x\n",
+		    "pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
+		    "xio_port=0x%x, pci_addr=0x%lx\n",
 		    paddr, paddr + req_size - 1, xio_port, pci_addr));
 	return pci_addr;
     }
@@ -3444,23 +2244,20 @@
 	if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
 	    (flags == slotp->bss_d64_flags)) {
 
-	    pci_addr |=  xio_addr
+	    pci_addr |= xio_addr
 		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
 
-#if HWG_PERF_CHECK
-	    if (xio_addr != 0x20000000)
-#endif
-		PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-			    "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
-			    "xio_port=0x%x, direct64: pci_addr=0x%x\n",
-			    paddr, paddr + req_size - 1, xio_addr, pci_addr));
+	    PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
+			"pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
+			"xio_port=0x%x, direct64: pci_addr=0x%lx\n",
+			paddr, paddr + req_size - 1, xio_addr, pci_addr));
 	    return (pci_addr);
 	}
 	if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
 	    pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
 	    slotp->bss_d64_flags = flags;
 	    slotp->bss_d64_base = pci_addr;
-            pci_addr |= xio_addr
+	    pci_addr |= xio_addr
 		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
 
 	    /* If in PCI mode, make sure we have an RRB (or two).
@@ -3476,34 +2273,39 @@
 		    else
 			min_rrbs = 1;
 		    if (have_rrbs < min_rrbs)
-			do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
+			pcibr_rrb_alloc_more(pcibr_soft, pciio_slot, vchan,
 					       min_rrbs - have_rrbs);
 		}
 	    }
-#if HWG_PERF_CHECK
-	    if (xio_addr != 0x20000000)
-#endif
-		PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-			    "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
-			    "xio_port=0x%x, direct64: pci_addr=0x%x, "
-			    "new flags: 0x%x\n", paddr, paddr + req_size - 1,
-			    xio_addr, pci_addr, (uint64_t) flags));
+	    PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
+			"pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
+			"xio_port=0x%x, direct64: pci_addr=0x%lx, "
+			"new flags: 0x%x\n", paddr, paddr + req_size - 1,
+			xio_addr, pci_addr, (uint64_t) flags));
 	    return (pci_addr);
 	}
 
 	PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		    "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
+		    "pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
 		    "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
 		    paddr, paddr + req_size - 1, xio_addr));
 
-	/* PIC only supports 64-bit direct mapping in PCI-X mode */
+	/* BUS in PCI-X mode only supports 64-bit direct mapping so
+	 * don't fall thru and try 32-bit direct mapping
+	 */
 	if (IS_PCIX(pcibr_soft)) {
 	    return 0;
 	}
 
 	/* our flags conflict with Device(x). try direct32*/
 	flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
+    } else {
+	/* BUS in PCI-X mode only supports 64-bit direct mapping */
+	if (IS_PCIX(pcibr_soft)) {
+	    return 0;
+	}
     }
+
     /* Try to satisfy the request with the 32-bit direct
      * map. This can fail if the configuration bits in
      * Device(x) conflict with our flags, or if the
@@ -3521,7 +2323,7 @@
 	    (endoff > map_size)) {
 
 	    PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-			"pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
+			"pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
 			"xio_port=0x%x, xio region outside direct32 target\n",
 			paddr, paddr + req_size - 1, xio_addr));
 	} else {
@@ -3532,8 +2334,8 @@
 		pci_addr |= offset;
 
 		PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-                            "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
-                            "xio_port=0x%x, direct32: pci_addr=0x%x\n",
+                            "pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx],"
+                            " xio_port=0x%x, direct32: pci_addr=0x%lx\n",
                             paddr, paddr + req_size - 1, xio_addr, pci_addr));
 
 		return (pci_addr);
@@ -3550,21 +2352,18 @@
 		if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
 		    have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
 		    if (have_rrbs < 2) {
-			if (slotp->bss_device & BRIDGE_DEV_PREF)
+			if (slotp->bss_device & PCIBR_DEV_PREF)
 			    min_rrbs = 2;
 			else
 			    min_rrbs = 1;
 			if (have_rrbs < min_rrbs)
-			    do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, 
+			    pcibr_rrb_alloc_more(pcibr_soft, pciio_slot, 
 						   vchan, min_rrbs - have_rrbs);
 		    }
 		}
-#if HWG_PERF_CHECK
-		if (xio_addr != 0x20000000)
-#endif
-                    PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-                            "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
-                            "xio_port=0x%x, direct32: pci_addr=0x%x, "
+		PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
+                            "pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx],"
+                            " xio_port=0x%x, direct32: pci_addr=0x%lx, "
 			    "new flags: 0x%x\n", paddr, paddr + req_size - 1,
 			    xio_addr, pci_addr, (uint64_t) flags));
 
@@ -3573,14 +2372,14 @@
 	    /* our flags conflict with Device(x).
 	     */
 	    PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-                    "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
+                    "pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
                     "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
                     paddr, paddr + req_size - 1, xio_port));
 	}
     }
 
     PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
-		"pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
+		"pcibr_dmatrans_addr:  wanted paddr [0x%lx..0x%lx], "
 		"xio_port=0x%x, No acceptable PCI address found\n",
 		paddr, paddr + req_size - 1, xio_port));
 
@@ -3598,7 +2397,7 @@
 		    paddr_t paddr,
 		    size_t bytes)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
 
@@ -3609,7 +2408,7 @@
 pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
 		    alenlist_t list)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
 
@@ -3643,23 +2442,102 @@
 {
 }
 
+#ifdef PCI_HOTPLUG
+int
+pcibr_reset(vertex_hdl_t conn)
+{
+    pciio_info_t            pciio_info = pciio_hostinfo_get(conn);
+    pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    pci_bridge_t	   *bridge = pcibr_soft->bs_base;
+#ifndef SN1
+    timespec_t              ts;
+#endif
+    unsigned                cfgctl[8];
+    unsigned long	    s;
+    int                     f, nf;
+    pcibr_info_h            pcibr_infoh;
+    pcibr_info_t            pcibr_info;
+    int                     win;
+    int                     error = 0;
+
+    if (pcibr_soft->bs_slot[pciio_slot].has_host) {
+	pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
+	pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
+    }
+
+    if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
+        (pciio_slot <= pcibr_soft->bs_last_reset)) {
+	s = pcibr_lock(pcibr_soft);
+	nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
+	pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
+	for (f = 0; f < nf; ++f)
+	    if (pcibr_infoh[f])
+		cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
+							PCI_CFG_COMMAND);
+
+#ifdef SN1 
+        error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
+                             pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
+                             PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
+                             NULL,
+			     pcibr_soft->bs_xid);
+#else /* SN1 */
+        ts.tv_sec = 0;                    /* 0 secs */
+        ts.tv_nsec = 1 * (1000 * 1000);   /* 1 msecs */
+
+	pcireg_control_bit_clr(pcibr_soft, PCIBR_CTRL_RST_PIN(pciio_slot));
+	nano_delay(&ts);
+	pcireg_control_bit_set(pcibr_soft, PCIBR_CTRL_RST_PIN(pciio_slot));	
+        nano_delay(&ts);
+#endif /* SN1 */
+
+	for (f = 0; f < nf; ++f)
+	    if (pcibr_info = pcibr_infoh[f])
+		for (win = 0; win < 6; ++win)
+		    if (pcibr_info->f_window[win].w_base != 0)
+			pcibr_func_config_set(bridge, pciio_slot, f,
+					PCI_CFG_BASE_ADDR(win), 
+					pcibr_info->f_window[win].w_base);
+	for (f = 0; f < nf; ++f)
+	    if (pcibr_infoh[f])
+		pcibr_func_config_set(bridge, pciio_slot, f,
+					PCI_CFG_COMMAND,
+					cfgctl[f]);
+	pcibr_unlock(pcibr_soft, s);
+
+        if (error)
+            return(-1);
+
+	return 0;
+    }
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
+    		"pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
+    return -1;
+}
+
+#else /* PCI_HOTPLUG */
+
 int
 pcibr_reset(vertex_hdl_t conn)
 {
 	BUG();
-	return -1;
+	return(-1);
 }
 
+#endif /* PCI_HOTPLUG */
+
 pciio_endian_t
 pcibr_endian_set(vertex_hdl_t pconn_vhdl,
 		 pciio_endian_t device_end,
 		 pciio_endian_t desired_end)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    bridgereg_t             devreg;
-    unsigned long           s;
+    uint64_t		    devreg;
+    unsigned long	    s;
 
     /*
      * Bridge supports hardware swapping; so we can always
@@ -3669,137 +2547,26 @@
     s = pcibr_lock(pcibr_soft);
     devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
     if (device_end != desired_end)
-	devreg |= BRIDGE_DEV_SWAP_BITS;
+	devreg |= PCIBR_DEV_SWAP_DIR;
     else
-	devreg &= ~BRIDGE_DEV_SWAP_BITS;
+	devreg &= ~PCIBR_DEV_SWAP_DIR;
 
     /* NOTE- if we ever put SWAP bits
      * onto the disabled list, we will
      * have to change the logic here.
      */
     if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
-	bridge_t               *bridge = pcibr_soft->bs_base;
-
-	if ( IS_PIC_SOFT(pcibr_soft) ) {
-		bridge->b_device[pciio_slot].reg = devreg;
-		pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-		bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-	}
-	else {
-		if (io_get_sh_swapper(NASID_GET(bridge))) {
-			BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
-		} else {
-			bridge->b_device[pciio_slot].reg = devreg;
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
-		}
-	}
-    }
-    pcibr_unlock(pcibr_soft, s);
-
-    printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
-    return desired_end;
-}
-
-/* This (re)sets the GBR and REALTIME bits and also keeps track of how
- * many sets are outstanding. Reset succeeds only if the number of outstanding
- * sets == 1.
- */
-int
-pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
-			pciio_slot_t pciio_slot,
-			pciio_priority_t device_prio)
-{
-    unsigned long           s;
-    int                    *counter;
-    bridgereg_t             rtbits = 0;
-    bridgereg_t             devreg;
-    int                     rc = PRIO_SUCCESS;
-
-    /* in dual-slot configurations, the host and the
-     * guest have separate DMA resources, so they
-     * have separate requirements for priority bits.
-     */
-
-    counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
-
-    /*
-     * Bridge supports PCI notions of LOW and HIGH priority
-     * arbitration rings via a "REAL_TIME" bit in the per-device
-     * Bridge register. The "GBR" bit controls access to the GBR
-     * ring on the xbow. These two bits are (re)set together.
-     *
-     * XXX- Bug in Rev B Bridge Si:
-     * Symptom: Prefetcher starts operating incorrectly. This happens
-     * due to corruption of the address storage ram in the prefetcher
-     * when a non-real time PCI request is pulled and a real-time one is
-     * put in it's place. Workaround: Use only a single arbitration ring
-     * on PCI bus. GBR and RR can still be uniquely used per
-     * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
-     */
-
-    if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
-	rtbits |= BRIDGE_DEV_RT;
-
-    /* NOTE- if we ever put DEV_RT or DEV_GBR on
-     * the disabled list, we will have to take
-     * it into account here.
-     */
-
-    s = pcibr_lock(pcibr_soft);
-    devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
-    if (device_prio == PCI_PRIO_HIGH) {
-	if ((++*counter == 1)) {
-	    if (rtbits)
-		devreg |= rtbits;
-	    else
-		rc = PRIO_FAIL;
-	}
-    } else if (device_prio == PCI_PRIO_LOW) {
-	if (*counter <= 0)
-	    rc = PRIO_FAIL;
-	else if (--*counter == 0)
-	    if (rtbits)
-		devreg &= ~rtbits;
-    }
-    if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
-	bridge_t               *bridge = pcibr_soft->bs_base;
-
-	if ( IS_PIC_SOFT(pcibr_soft) ) {
-		bridge->b_device[pciio_slot].reg = devreg;
-		pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-		bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-	}
-	else {
-		if (io_get_sh_swapper(NASID_GET(bridge))) {
-			BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
-		} else {
-			bridge->b_device[pciio_slot].reg = devreg;
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
-		}
-	}
+	pcireg_device_set(pcibr_soft, pciio_slot, devreg);
+	pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
+	pcireg_tflush_get(pcibr_soft);
     }
     pcibr_unlock(pcibr_soft, s);
 
-    return rc;
-}
-
-pciio_priority_t
-pcibr_priority_set(vertex_hdl_t pconn_vhdl,
-		   pciio_priority_t device_prio)
-{
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
-    pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
-    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
-    (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
+    		"pcibr_endian_set: Device(%d): 0x%x\n",
+		pciio_slot, devreg));
 
-    return device_prio;
+    return desired_end;
 }
 
 /*
@@ -3817,73 +2584,62 @@
 pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
 		       pcibr_device_flags_t flags)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    bridgereg_t             set = 0;
-    bridgereg_t             clr = 0;
+    uint64_t		    set = 0;
+    uint64_t		    clr = 0;
 
     ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
 
     if (flags & PCIBR_WRITE_GATHER)
-	set |= BRIDGE_DEV_PMU_WRGA_EN;
+	set |= PCIBR_DEV_DIR_WRGA_EN;
     if (flags & PCIBR_NOWRITE_GATHER)
-	clr |= BRIDGE_DEV_PMU_WRGA_EN;
-
-    if (flags & PCIBR_WRITE_GATHER)
-	set |= BRIDGE_DEV_DIR_WRGA_EN;
-    if (flags & PCIBR_NOWRITE_GATHER)
-	clr |= BRIDGE_DEV_DIR_WRGA_EN;
+	clr |= PCIBR_DEV_DIR_WRGA_EN;
 
     if (flags & PCIBR_PREFETCH)
-	set |= BRIDGE_DEV_PREF;
+	set |= PCIBR_DEV_PREF;
     if (flags & PCIBR_NOPREFETCH)
-	clr |= BRIDGE_DEV_PREF;
+	clr |= PCIBR_DEV_PREF;
 
     if (flags & PCIBR_PRECISE)
-	set |= BRIDGE_DEV_PRECISE;
+	set |= PCIBR_DEV_PRECISE;
     if (flags & PCIBR_NOPRECISE)
-	clr |= BRIDGE_DEV_PRECISE;
+	clr |= PCIBR_DEV_PRECISE;
 
     if (flags & PCIBR_BARRIER)
-	set |= BRIDGE_DEV_BARRIER;
+	set |= PCIBR_DEV_BARRIER;
     if (flags & PCIBR_NOBARRIER)
-	clr |= BRIDGE_DEV_BARRIER;
+	clr |= PCIBR_DEV_BARRIER;
 
     if (flags & PCIBR_64BIT)
-	set |= BRIDGE_DEV_DEV_SIZE;
+	set |= PCIBR_DEV_DEV_SIZE;
     if (flags & PCIBR_NO64BIT)
-	clr |= BRIDGE_DEV_DEV_SIZE;
+	clr |= PCIBR_DEV_DEV_SIZE;
+
+    /* PIC BRINGUP WAR (PV# 878674):   Don't allow 64bit PIO accesses */
+    if (IS_PIC_SOFT(pcibr_soft) && (flags & PCIBR_64BIT) &&
+				PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
+	set &= ~PCIBR_DEV_DEV_SIZE;
+    }
 
     if (set || clr) {
-	bridgereg_t             devreg;
-	unsigned long           s;
+	uint64_t		devreg;
+	unsigned long		s;
 
 	s = pcibr_lock(pcibr_soft);
 	devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
 	devreg = (devreg & ~clr) | set;
 	if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
-	    bridge_t               *bridge = pcibr_soft->bs_base;
-
-	    if ( IS_PIC_SOFT(pcibr_soft) ) {
-		bridge->b_device[pciio_slot].reg = devreg;
-		pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-		bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
-	    }
-	    else {
-		if (io_get_sh_swapper(NASID_GET(bridge))) {
-			BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
-		} else {
-			bridge->b_device[pciio_slot].reg = devreg;
-			pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
-			bridge->b_wid_tflush;       /* wait until Bridge PIO complete */
-		}
-	    }
+	    pcireg_device_set(pcibr_soft, pciio_slot, devreg);
+	    pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
+	    pcireg_tflush_get(pcibr_soft);
 	}
 	pcibr_unlock(pcibr_soft, s);
-	printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
+		    "pcibr_device_flags_set: Device(%d): 0x%x\n",
+		    pciio_slot, devreg));
     }
     return (1);
 }
@@ -3908,10 +2664,10 @@
 
     if (pcibr_soft->bs_pcix_num_funcs) {
 	if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
-	    printk(KERN_WARNING
-		"%lx: Must oversubscribe Read Buffer Attribute Registers"
+	    KERN_MSG(K_WARN,
+		"%s: Must oversubscribe Read Buffer Attribute Registers"
 		"(RBAR).  Bus has %d RBARs but %d funcs need them.\n",
-		(unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
+		pcibr_soft->bs_name, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
 	    percent_allowed = 0;
 	} else {
 	    percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
@@ -3934,58 +2690,6 @@
     return(percent_allowed);
 }
 
-pciio_provider_t        pcibr_provider =
-{
-    (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
-    (pciio_piomap_free_f *) pcibr_piomap_free,
-    (pciio_piomap_addr_f *) pcibr_piomap_addr,
-    (pciio_piomap_done_f *) pcibr_piomap_done,
-    (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
-    (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
-    (pciio_piospace_free_f *) pcibr_piospace_free,
-
-    (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
-    (pciio_dmamap_free_f *) pcibr_dmamap_free,
-    (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
-    (pciio_dmamap_done_f *) pcibr_dmamap_done,
-    (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
-    (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
-    (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
-    (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
-
-    (pciio_intr_alloc_f *) pcibr_intr_alloc,
-    (pciio_intr_free_f *) pcibr_intr_free,
-    (pciio_intr_connect_f *) pcibr_intr_connect,
-    (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
-    (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
-
-    (pciio_provider_startup_f *) pcibr_provider_startup,
-    (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
-    (pciio_reset_f *) pcibr_reset,
-    (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
-    (pciio_endian_set_f *) pcibr_endian_set,
-    (pciio_priority_set_f *) pcibr_priority_set,
-    (pciio_config_get_f *) pcibr_config_get,
-    (pciio_config_set_f *) pcibr_config_set,
-    (pciio_error_devenable_f *) 0,
-    (pciio_error_extract_f *) 0,
-    (pciio_driver_reg_callback_f *) 0,
-    (pciio_driver_unreg_callback_f *) 0,
-    (pciio_device_unregister_f 	*) pcibr_device_unregister,
-    (pciio_dma_enabled_f		*) pcibr_dma_enabled,
-};
-
-int
-pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
-{
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
-    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-	
-
-    return xtalk_dma_enabled(pcibr_soft->bs_conn);
-}
-
-
 /*
  * pcibr_debug() is used to print pcibr debug messages to the console.  A
  * user enables tracing by setting the following global variables:
@@ -4007,9 +2711,12 @@
 {
     char hwpath[MAXDEVNAME] = "\0";
     char copy_of_hwpath[MAXDEVNAME];
+    char buffer[1024];
     char *module = "all";
-    short widget = -1;
+    short widget = -1;	
+    short corelet = -1;
     short slot = -1;
+    short xtalk = 0;	/* xtalk or coretalk? */
     va_list ap;
 
     if (pcibr_debug_mask & type) {
@@ -4018,56 +2725,124 @@
                 char *cp;
 
                 if (strcmp(module, pcibr_debug_module)) {
-		    /* use a copy */
+		    /* strtok_r() wipes out string, use a copy */
                     (void)strcpy(copy_of_hwpath, hwpath);
-                    cp = strstr(copy_of_hwpath, "/module/");
+		    cp = strstr(copy_of_hwpath, "/" EDGE_LBL_MODULE "/");
                     if (cp) {
-                        cp += strlen("/module");
-                        module = strsep(&cp, "/");
+                        cp += strlen("/" EDGE_LBL_MODULE "/");
+			module = strsep(&cp, "/");
                     }
                 }
                 if (pcibr_debug_widget != -1) {
-                    cp = strstr(hwpath, "/xtalk/");
+		    cp = strstr(hwpath, "/" EDGE_LBL_XTALK "/");
                     if (cp) {
-                        cp += strlen("/xtalk/");
+			xtalk++;
+			cp += strlen("/" EDGE_LBL_XTALK "/");
+
                         widget = simple_strtoul(cp, NULL, 0);
-                    }
+                    } else {
+			cp = strstr(hwpath, "/" EDGE_LBL_CORETALK "/");
+			if (cp) {
+			    cp += strlen("/" EDGE_LBL_CORETALK "/");
+			    corelet = simple_strtoul(cp, NULL, 0);
+			}
+		    }
                 }
                 if (pcibr_debug_slot != -1) {
-                    cp = strstr(hwpath, "/pci/");
+		    cp = strstr(hwpath, "/" EDGE_LBL_PCIX_0 "/");
+		    if (!cp) {
+			cp = strstr(hwpath, "/" EDGE_LBL_PCIX_1 "/");
+		    }
                     if (cp) {
-                        cp += strlen("/pci/");
+                        cp += strlen("/" EDGE_LBL_PCIX_0 "/");
                         slot = simple_strtoul(cp, NULL, 0);
                     }
                 }
             }
         }
+
         if ((vhdl == NULL) ||
             (!strcmp(module, pcibr_debug_module) &&
-             (widget == pcibr_debug_widget) &&
+             ((xtalk ? widget : corelet) == pcibr_debug_widget) &&
              (slot == pcibr_debug_slot))) {
-#ifdef LATER
-            printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
-#else
-            printk("PCIBR_DEBUG\t: %s :", hwpath);
-#endif
+            printk("PCIBR_DEBUG<%d>\t: %s :", smp_processor_id(), hwpath);
 	    /*
-	     * Kernel printk translates to this 3 line sequence.
-	     * Since we have a variable length argument list, we
-	     * need to call printk this way rather than directly
+	     * KERN_MSG translates to this 3 line sequence. Since
+	     * we have a variable length argument list, we need to
+	     * call KERN_MSG this way rather than directly
 	     */
-	    {
-		char buffer[500];
-
-		va_start(ap, format);
-		vsnprintf(buffer, 500, format, ap);
-		va_end(ap);
-		buffer[499] = (char)0;	/* just to be safe */
-		printk("%s", buffer);
-	    }
+	    va_start(ap, format);
+	    vsprintf(buffer, format, ap);
+	    va_end(ap);
+	    KERN_MSG("", "%s", buffer);
         }
     }
 }
+
+/*
+ * Return generic bus info.  Note that some/all bs_businfo information is
+ * not kept up to date in real time, so all fields should be filled in here.
+ */
+
+pciio_businfo_t
+pcibr_businfo_get(vertex_hdl_t conn)
+{
+	pciio_info_t	 info = pciio_info_get(conn);
+	pcibr_soft_t	 soft = (pcibr_soft_t)pciio_info_mfast_get(info);
+	pciio_businfo_t  businfo = &soft->bs_businfo;
+
+	if (IS_PIC_SOFT(soft)) {
+	    businfo->bi_asic_type = PCIIO_ASIC_TYPE_PIC;
+	} else if (IS_TIOCP_SOFT(soft)) {
+	    businfo->bi_asic_type = PCIIO_ASIC_TYPE_TIOCP;
+	} else {
+	    businfo->bi_asic_type = PCIIO_ASIC_TYPE_UNKNOWN;
+	}
+
+	if (IS_PCI(soft)) {
+	    businfo->bi_multi_master = 0;/* Bridge & its derivatives do not */
+					 /* generally support multiple DMA */
+					 /* masters behind a single slot due */
+					 /* to RRB thrashing issues. */
+	    businfo->bi_bus_type = PCIIO_BUS_TYPE_PCI;
+	} else {
+	    businfo->bi_multi_master = 1; /* no RRB issues for PCI-X */
+	    businfo->bi_bus_type = PCIIO_BUS_TYPE_PCIX;
+	}
+	if (IS_33MHZ(soft))
+	    businfo->bi_bus_speed = PCIIO_BUS_SPEED_33;
+	if (IS_66MHZ(soft))
+	    businfo->bi_bus_speed = PCIIO_BUS_SPEED_66;
+	if (IS_100MHZ(soft))
+	    businfo->bi_bus_speed = PCIIO_BUS_SPEED_100;
+	if (IS_133MHZ(soft))
+	    businfo->bi_bus_speed = PCIIO_BUS_SPEED_133;
+
+	return businfo;
+}
+
+/*
+ * given a xconn_vhdl and a bus number under that widget, return a 
+ * pci_bridge_t pointer.
+ */
+pci_bridge_t *
+pcibr_bridge_ptr_get(vertex_hdl_t widget_vhdl, int bus_num)
+{
+    pci_bridge_t       *bridge;
+
+    bridge = (pci_bridge_t *)xtalk_piotrans_addr(widget_vhdl, 0, 0, 
+							sizeof(bridge), 0);
+
+    /* PIC ASIC has two bridges (ie. two busses) under a single  widget */
+    if (is_pic(bridge)) {
+	if (bus_num == 1) {
+	    bridge = (pci_bridge_t *)((char *)bridge + PIC_BUS1_OFFSET);
+	}
+    }
+
+    return (bridge);
+}		
+
 
 int
 isIO9(nasid_t nasid) {
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,28 +7,19 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/interrupt.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
 
+
+#if !defined(IP30) && !defined(SN0)
 extern int	hubii_check_widget_disabled(nasid_t, int);
+#endif
 
 
 /* =====================================================================
@@ -43,26 +33,24 @@
 #define BRIDGE_PIOERR_TIMEOUT	40	/* Timeout in debug mode  */
 #endif
 #else
-#define BRIDGE_PIOERR_TIMEOUT	1	/* Timeout in non-debug mode                            */
+#define BRIDGE_PIOERR_TIMEOUT	1	/* Timeout in non-debug mode */
 #endif
 
-/* PIC has 64bit interrupt error registers, but BRIDGE has 32bit registers.
- * Thus 'bridge_errors_to_dump needs' to default to the larger of the two.
- */
 #ifdef  DEBUG
 #ifdef ERROR_DEBUG
-uint64_t bridge_errors_to_dump = ~BRIDGE_ISR_INT_MSK;
+uint64_t bridge_errors_to_dump = ~PCIBR_ISR_INT_MSK;
 #else
-uint64_t bridge_errors_to_dump = BRIDGE_ISR_ERROR_DUMP;
+uint64_t bridge_errors_to_dump = PCIBR_ISR_ERROR_DUMP;
 #endif
 #else
-uint64_t bridge_errors_to_dump = BRIDGE_ISR_ERROR_FATAL |
-                                   BRIDGE_ISR_PCIBUS_PIOERR;
+uint64_t bridge_errors_to_dump = PCIBR_ISR_ERROR_FATAL |
+                                 PCIBR_ISR_PCIBUS_PIOERR;
 #endif
 
-int                     pcibr_llp_control_war_cnt; /* PCIBR_LLP_CONTROL_WAR */
+int pcibr_pioerr_dump = 1;	/* always dump pio errors */
 
-static struct reg_values xio_cmd_pactyp[] =
+/* Crosstalk Packet Types */
+static struct reg_values xtalk_cmd_pactyp[] =
 {
     {0x0, "RdReq"},
     {0x1, "RdResp"},
@@ -83,11 +71,34 @@
     {0}
 };
 
-static struct reg_desc   xio_cmd_bits[] =
+/* Coretalk Packet Types */
+static struct reg_values ctalk_cmd_pactyp[] =
+{
+    {0x0, "RdReq"},
+    {0x1, "RdResp"},
+    {0x2, "WrReqWithResp"},
+    {0x3, "WrResp"},
+    {0x4, "WrReqNoResp"},
+    {0x5, "Reserved(5)"},
+    {0x6, "FetchAndOpReq"},
+    {0x7, "FetchAndOpResp"},
+    {0x8, "StoreAndOpReq"},
+    {0x9, "StoreAndOpResp"},
+    {0xa, "GfxWrReq"},
+    {0xb, "GfxWrCrdOrErr"},
+    {0xc, "Reserved(c)"},
+    {0xd, "Invalidate"},
+    {0xe, "Reserved(e)"},
+    {0xf, "Reserved(f)"},
+    {0}
+};
+
+/* Command Word of a Crosstalk Packet */
+static struct reg_desc   xtalk_cmd_bits[] =
 {
     {WIDGET_DIDN, -28, "DIDN", "%x"},
     {WIDGET_SIDN, -24, "SIDN", "%x"},
-    {WIDGET_PACTYP, -20, "PACTYP", 0, xio_cmd_pactyp},
+    {WIDGET_PACTYP, -20, "PACTYP", 0, xtalk_cmd_pactyp},
     {WIDGET_TNUM, -15, "TNUM", "%x"},
     {WIDGET_COHERENT, 0, "COHERENT"},
     {WIDGET_DS, 0, "DS"},
@@ -98,9 +109,98 @@
     {0}
 };
 
+/* Command Word of a Coretalk Packet */
+static struct reg_desc   ctalk_cmd_bits[] =
+{
+    {CTALK_VALID, 0, "VALID"},
+    {CTALK_CW_ERR, 0, "CW_ERR"},
+    {CTALK_PIO_OP, 0, "PIO_OP"},
+    {CTALK_RD_PARM, -32, "RD_PARM", "%x"},
+    {CTALK_DW_DATA_EN, -16, "DW_DATA_EN", "%x"},
+    {CTALK_TNUM, -8, "TNUM", "%x"},
+    {CTALK_DATA_SIZE, -6, "DATASIZE", "%x"},
+    {CTALK_PACTYP, 0, "PACTYP", 0, ctalk_cmd_pactyp},
+    {0}
+};
+
 #define F(s,n)          { 1l<<(s),-(s), n }
 
-static struct reg_values       space_v[] =
+#if defined(FORCE_ERRORS)
+#if 0	/* currently not used */
+static struct reg_desc         bridge_int_status_desc[] =
+{
+    F(47, "CWRT_REQ_TOUT"),	/* TIOCP ONLY */
+    F(46, "CTALK_PROTO_ERROR"),	/* TIOCP ONLY */
+    F(45, "PCI_X_SPLIT_MES_PE"),
+    F(44, "PCI_X_SPLIT_EMES"),
+    F(43, "PCI_X_SPLIT_TO"),
+    F(42, "PCI_X_UNEX_COMP"),
+    F(41, "INT_RAM_PERR"),
+    F(40, "PCI_X_ARB_ERR"),
+    F(39, "PCI_X_REQ_TOUT"),
+    F(38, "PCI_X_TABORT"),
+    F(37, "PCI_X_PERR"),
+    F(36, "PCI_X_SERR"),
+    F(35, "PCI_X_MRETRY"),
+    F(34, "PCI_X_MTOUT"),
+    F(33, "PCI_X_DA_PARITY"),
+    F(32, "PCI_X_AD_PARITY"),
+    F(31, "RESERVED"),
+    F(30, "PMU_ESIZE_EFAULT"),
+    F(29, "UNEXPECTED_RESP"),
+    F(28, "BAD_XRESP_PACKET"),
+    F(27, "BAD_XREQ_PACKET"),
+    F(26, "RESP_XTALK_ERROR"),
+    F(25, "REQ_XTALK_ERROR"),
+    F(24, "INVALID_ADDRESS"),
+    F(23, "UNSUPPORTED_XOP"),
+    F(22, "XREQ_FIFO_OFLOW"),
+    F(21, "LLP_REC_SNERROR"),	/* PIC ONLY */
+    F(20, "LLP_REC_CBERROR"),	/* PIC ONLY */
+    F(19, "LLP_RCTY"),		/* PIC ONLY */
+    F(18, "LLP_TX_RETRY"),	/* PIC ONLY */
+    F(17, "LLP_TCTY"),		/* PIC ONLY */
+    F(16, "RESERVED"),
+    F(15, "PCI_ABORT"),
+    F(14, "PCI_PARITY"),
+    F(13, "PCI_SERR"),
+    F(12, "PCI_PERR"),
+    F(11, "PCI_MASTER_TOUT"),
+    F(10, "PCI_RETRY_CNT"),
+    F(9, "XREAD_REQ_TOUT"),
+    F(8, "RESERVED"),
+    F(7, "INT7"),
+    F(6, "INT6"),
+    F(5, "INT5"),
+    F(4, "INT4"),
+    F(3, "INT3"),
+    F(2, "INT2"),
+    F(1, "INT1"),
+    F(0, "INT0"),
+    {0}
+};
+#endif	/* 0 */
+#endif	/* FORCE_ERRORS */
+
+char *pci_space[] = {"NONE",
+                     "ROM",
+                     "IO",
+                     "",
+                     "MEM",
+                     "MEM32",
+                     "MEM64",
+                     "CFG",
+                     "WIN0",
+                     "WIN1",
+                     "WIN2",
+                     "WIN3",
+                     "WIN4",
+                     "WIN5",
+                     "",
+                     "BAD"};
+
+#if defined(FORCE_ERRORS)
+struct reg_values       space_v[] =
 {
     {PCIIO_SPACE_NONE, "none"},
     {PCIIO_SPACE_ROM, "ROM"},
@@ -118,46 +218,38 @@
     {PCIIO_SPACE_BAD, "BAD"},
     {0}
 };
-static struct reg_desc         space_desc[] =
+struct reg_desc         space_desc[] =
 {
     {0xFF, 0, "space", 0, space_v},
     {0}
 };
-#define	device_desc	device_bits
-static struct reg_desc   device_bits[] =
+#define	device_reg_desc	device_bits
+struct reg_desc   device_bits[] =
 {
-    {BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
-    {BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
-    {BRIDGE_DEV_FORCE_PCI_PAR, 0, "FORCE_PCI_PAR"},
-    {BRIDGE_DEV_VIRTUAL_EN, 0, "VIRTUAL_EN"},
-    {BRIDGE_DEV_PMU_WRGA_EN, 0, "PMU_WRGA_EN"},
-    {BRIDGE_DEV_DIR_WRGA_EN, 0, "DIR_WRGA_EN"},
-    {BRIDGE_DEV_DEV_SIZE, 0, "DEV_SIZE"},
-    {BRIDGE_DEV_RT, 0, "RT"},
-    {BRIDGE_DEV_SWAP_PMU, 0, "SWAP_PMU"},
-    {BRIDGE_DEV_SWAP_DIR, 0, "SWAP_DIR"},
-    {BRIDGE_DEV_PREF, 0, "PREF"},
-    {BRIDGE_DEV_PRECISE, 0, "PRECISE"},
-    {BRIDGE_DEV_COH, 0, "COH"},
-    {BRIDGE_DEV_BARRIER, 0, "BARRIER"},
-    {BRIDGE_DEV_GBR, 0, "GBR"},
-    {BRIDGE_DEV_DEV_SWAP, 0, "DEV_SWAP"},
-    {BRIDGE_DEV_DEV_IO_MEM, 0, "DEV_IO_MEM"},
-    {BRIDGE_DEV_OFF_MASK, BRIDGE_DEV_OFF_ADDR_SHFT, "DEV_OFF", "%x"},
+    {PCIBR_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
+    {PCIBR_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
+    {PCIBR_DEV_FORCE_PCI_PAR, 0, "FORCE_PCI_PAR"},
+    {PCIBR_DEV_VIRTUAL_EN, 0, "VIRTUAL_EN"},
+    {PCIBR_DEV_DIR_WRGA_EN, 0, "DIR_WRGA_EN"},
+    {PCIBR_DEV_DEV_SIZE, 0, "DEV_SIZE"},
+    {PCIBR_DEV_RT, 0, "RT"},
+    {PCIBR_DEV_SWAP_DIR, 0, "SWAP_DIR"},
+    {PCIBR_DEV_PREF, 0, "PREF"},
+    {PCIBR_DEV_PRECISE, 0, "PRECISE"},
+    {PCIBR_DEV_COH, 0, "COH"},
+    {PCIBR_DEV_BARRIER, 0, "BARRIER"},
+    {PCIBR_DEV_GBR, 0, "GBR"},
+    {PCIBR_DEV_DEV_SWAP, 0, "DEV_SWAP"},
+    {PCIBR_DEV_DEV_IO_MEM, 0, "DEV_IO_MEM"},
+    {PCIBR_DEV_OFF_MASK, PCIBR_DEV_OFF_ADDR_SHFT, "DEV_OFF", "%x"},
     {0}
 };
-
-static void
-print_bridge_errcmd(uint32_t cmdword, char *errtype)
-{
-    printk("\t    Bridge %s Error Command Word Register ", errtype);
-    print_register(cmdword, xio_cmd_bits);
-}
+#endif /* FORCE_ERRORS */
 
 static char             *pcibr_isr_errs[] =
 {
     "", "", "", "", "", "", "", "",
-    "08: GIO non-contiguous byte enable in crosstalk packet", /* BRIDGE ONLY */
+    "08: Reserved Bit 08",
     "09: PCI to Crosstalk read request timeout",
     "10: PCI retry operation count exhausted.",
     "11: PCI bus device select timeout",
@@ -165,12 +257,12 @@
     "13: PCI Address/Cmd parity error ",
     "14: PCI Bridge detected parity error",
     "15: PCI abort condition",
-    "16: SSRAM parity error", /* BRIDGE ONLY */
-    "17: LLP Transmitter Retry count wrapped",
-    "18: LLP Transmitter side required Retry",
-    "19: LLP Receiver retry count wrapped",
-    "20: LLP Receiver check bit error",
-    "21: LLP Receiver sequence number error",
+    "16: Reserved Bit 16",
+    "17: LLP Transmitter Retry count wrapped",	/* PIC ONLY */
+    "18: LLP Transmitter side required Retry",	/* PIC ONLY */
+    "19: LLP Receiver retry count wrapped",	/* PIC ONLY */
+    "20: LLP Receiver check bit error",		/* PIC ONLY */
+    "21: LLP Receiver sequence number error",	/* PIC ONLY */
     "22: Request packet overflow",
     "23: Request operation not supported by bridge",
     "24: Request packet has invalid address for bridge widget",
@@ -180,9 +272,7 @@
     "28: Framing error, response cmd data size does not match actual",
     "29: Unexpected response arrived",
     "30: PMU Access Fault",
-    "31: Multiple errors occurred", /* BRIDGE ONLY */
-    
-    /* bits 32-45 are PIC ONLY */
+    "31: Reserved Bit 31",
     "32: PCI-X address or attribute cycle parity error",
     "33: PCI-X data cycle parity error",
     "34: PCI-X master timeout (ie. master abort)",
@@ -197,21 +287,17 @@
     "43: PCI-X split completion timeout",
     "44: PCI-X split completion error message",
     "45: PCI-X split completion message parity error",
+    "46: CoreTalk protocol error was detected",	/* TIOCP only */
+    "47: PCI[X] to Coretalk write req timeout",	/* TIOCP only */
 };
 
-#define BEM_ADD_STR(s)  printk("%s", (s))
-#define BEM_ADD_VAR(v)  printk("\t%20s: 0x%llx\n", #v, ((unsigned long long)v))
-#define BEM_ADD_REG(r)  printk("\t%20s: ", #r); print_register((r), r ## _desc)
-#define BEM_ADD_NSPC(n,s)       printk("\t%20s: ", n); print_register(s, space_desc)
-#define BEM_ADD_SPC(s)          BEM_ADD_NSPC(#s, s)
-
 /*
  * display memory directory state
  */
-static void
+void
 pcibr_show_dir_state(paddr_t paddr, char *prefix)
 {
-#ifdef LATER
+#ifdef PCIBR_LATER
 	int state;
 	uint64_t vec_ptr;
 	hubreg_t elo;
@@ -220,9 +306,23 @@
 
 	get_dir_ent(paddr, &state, &vec_ptr, &elo);
 
-	printk("%saddr 0x%lx: state 0x%x owner 0x%lx (%s)\n", 
-		prefix, paddr, state, vec_ptr, dir_state_str[state]);
-#endif
+	printf("%saddr 0x%lx: state 0x%x owner 0x%lx (%s)\n", 
+		prefix, (uint64_t)paddr, state, (uint64_t)vec_ptr, 
+		dir_state_str[state]);
+#endif /* PCIBR_LATER */
+}
+
+
+void
+print_bridge_errcmd(pcibr_soft_t pcibr_soft, uint32_t cmdword, char *errtype)
+{
+    KERN_MSG(K_CONT,
+	    "\t    Bridge %sError Command Word Register ", errtype);
+    if (IS_PIC_SOFT(pcibr_soft)) {
+	print_register(cmdword, xtalk_cmd_bits);
+    } else if (IS_TIOCP_SOFT(pcibr_soft)) {
+	print_register(cmdword, ctalk_cmd_bits);
+    }
 }
 
 
@@ -233,36 +333,15 @@
 void
 pcibr_error_dump(pcibr_soft_t pcibr_soft)
 {
-    bridge_t               *bridge = pcibr_soft->bs_base;
     uint64_t		    int_status;
-    bridgereg_t             int_status_32;
-    picreg_t		    int_status_64;
     uint64_t		    mult_int;
-    bridgereg_t             mult_int_32;
-    picreg_t		    mult_int_64;
     uint64_t		    bit;
-    int			    number_bits;
     int                     i;
-    char		    *reg_desc;
-    paddr_t		    addr = (paddr_t)0;
 
-    /* We read the INT_STATUS register as a 64bit picreg_t for PIC and a
-     * 32bit bridgereg_t for BRIDGE, but always process the result as a
-     * 64bit value so the code can be "common" for both PIC and BRIDGE...
-     */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
-	int_status = (uint64_t)int_status_64;
-	number_bits = PCIBR_ISR_MAX_ERRS_PIC;
-    } else {
-	int_status_32 = (bridge->b_int_status & ~BRIDGE_ISR_INT_MSK);
-	int_status = ((uint64_t)int_status_32) & 0xffffffff;
-	number_bits = PCIBR_ISR_MAX_ERRS_BRIDGE;
-    }
+    int_status = (pcireg_intr_status_get(pcibr_soft) & ~PCIBR_ISR_INT_MSK);
 
     if (!int_status) {
-	/* No error bits set */
-	return;
+	return;		/* No error bits set */
     }
 
     /* Check if dumping the same error information multiple times */
@@ -270,280 +349,208 @@
 	return;
     pcibr_soft->bs_errinfo.bserr_intstat = int_status;
 
-    printk(KERN_ALERT "PCI BRIDGE ERROR: int_status is 0x%lx for %s\n"
+    KERN_MSG(K_ALERT, "PCI BRIDGE ERROR: int_status is 0x%lx for %s\n"
 	"    Dumping relevant %s registers for each bit set...\n",
-	    int_status, pcibr_soft->bs_name,
-	    (IS_PIC_SOFT(pcibr_soft) ? "PIC" : 
-	        (IS_BRIDGE_SOFT(pcibr_soft) ? "BRIDGE" : "XBRIDGE")));
+	    int_status, pcibr_soft->bs_name, pcibr_soft->bs_asic_name);
 
-    for (i = PCIBR_ISR_ERR_START; i < number_bits; i++) {
+    for (i = PCIBR_ISR_ERR_START; i < 64; i++) {
 	bit = 1ull << i;
 
-	/*
-	 * A number of int_status bits are only defined for Bridge.
-	 * Ignore them in the case of an XBridge or PIC.
-	 */
-	if ((IS_XBRIDGE_SOFT(pcibr_soft) || IS_PIC_SOFT(pcibr_soft)) &&
-	    ((bit == BRIDGE_ISR_MULTI_ERR) ||
-	     (bit == BRIDGE_ISR_SSRAM_PERR) ||
-	     (bit == BRIDGE_ISR_GIO_B_ENBL_ERR))) {
-	    continue;
-	}
-
 	/* A number of int_status bits are only valid for PIC's bus0 */
 	if ((IS_PIC_SOFT(pcibr_soft) && (pcibr_soft->bs_busnum != 0)) && 
-	    ((bit == BRIDGE_ISR_UNSUPPORTED_XOP) ||
-	     (bit == BRIDGE_ISR_LLP_REC_SNERR) ||
-	     (bit == BRIDGE_ISR_LLP_REC_CBERR) ||
-	     (bit == BRIDGE_ISR_LLP_RCTY) ||
-	     (bit == BRIDGE_ISR_LLP_TX_RETRY) ||
-	     (bit == BRIDGE_ISR_LLP_TCTY))) {
+	    ((bit == PCIBR_ISR_UNSUPPORTED_XOP) ||
+	     (bit == PCIBR_ISR_LLP_REC_SNERR) ||
+	     (bit == PCIBR_ISR_LLP_REC_CBERR) ||
+	     (bit == PCIBR_ISR_LLP_RCTY) ||
+	     (bit == PCIBR_ISR_LLP_TX_RETRY) ||
+	     (bit == PCIBR_ISR_LLP_TCTY))) {
 	    continue;
 	}
 
 	if (int_status & bit) {
-	    printk("\t%s\n", pcibr_isr_errs[i]);
+	    KERN_MSG(K_CONT, "\t%s\n", pcibr_isr_errs[i]);
 
 	    switch (bit) {
 
-	    case PIC_ISR_INT_RAM_PERR:	    /* bit41	INT_RAM_PERR */
+	    case TIOCP_ISR_CWRT_REQ_TOUT:	/* bit47    CWRT_REQ_TOUT */
+		/* XXX habeck: need to goto ICE to get status registers
+		 * that have info about this error interrupt.
+		 */
+		break;
+
+	    case TIOCP_ISR_CTALK_PROT_ERR:	/* bit46    CTALK_PROT_ERR */
+		/* XXX habeck : need to goto ICE to get status registers 
+		 * that have info about this error interrupt.
+		 */
+		break;
+
+	    case PCIBR_ISR_INT_RAM_PERR:	/* bit41    INT_RAM_PERR */
 		/* XXX: should breakdown meaning of bits in reg */
-		printk( "\t	Internal RAM Parity Error: 0x%lx\n",
-		    bridge->p_ate_parity_err_64);
+		KERN_MSG(K_CONT, "\t	Internal RAM Parity Error: 0x%lx\n",
+		    pcireg_parity_err_get(pcibr_soft));
 		break;
 
-	    case PIC_ISR_PCIX_ARB_ERR:	    /* bit40	PCI_X_ARB_ERR */
+	    case PCIBR_ISR_PCIX_ARB_ERR:	/* bit40    PCI_X_ARB_ERR */
 		/* XXX: should breakdown meaning of bits in reg */
-		printk( "\t	Arbitration Reg: 0x%x\n",
-		    bridge->b_arb);
+		KERN_MSG(K_CONT, "\t	Arbitration Reg: 0x%lx\n",
+		    pcireg_arbitration_get(pcibr_soft));
 		break;
 
-	    case PIC_ISR_PCIX_REQ_TOUT:	    /* bit39	PCI_X_REQ_TOUT */
+	    case PCIBR_ISR_PCIX_REQ_TOUT:	/* bit39    PCI_X_REQ_TOUT */
 		/* XXX: should breakdown meaning of attribute bit */
-		printk(
+		KERN_MSG(K_CONT,
 		    "\t	   PCI-X DMA Request Error Address Reg: 0x%lx\n"
 		    "\t	   PCI-X DMA Request Error Attribute Reg: 0x%lx\n",
-		    bridge->p_pcix_dma_req_err_addr_64,
-		    bridge->p_pcix_dma_req_err_attr_64);
+		    pcireg_pcix_req_err_addr_get(pcibr_soft),
+		    pcireg_pcix_req_err_attr_get(pcibr_soft));
 		break;
 
-	    case PIC_ISR_PCIX_SPLIT_MSG_PE: /* bit45	PCI_X_SPLIT_MES_PE */
-	    case PIC_ISR_PCIX_SPLIT_EMSG:   /* bit44	PCI_X_SPLIT_EMESS */
-	    case PIC_ISR_PCIX_SPLIT_TO:	    /* bit43	PCI_X_SPLIT_TO */
+	    case PCIBR_ISR_PCIX_SPLIT_MSG_PE:	/* bit45    PCI_X_SPLIT_MES_PE */
+	    case PCIBR_ISR_PCIX_SPLIT_EMSG:	/* bit44    PCI_X_SPLIT_EMESS */
+	    case PCIBR_ISR_PCIX_SPLIT_TO:	/* bit43    PCI_X_SPLIT_TO */
 		/* XXX: should breakdown meaning of attribute bit */
-		printk(
+		KERN_MSG(K_CONT,
 		    "\t	   PCI-X Split Request Address Reg: 0x%lx\n"
 		    "\t	   PCI-X Split Request Attribute Reg: 0x%lx\n",
-		    bridge->p_pcix_pio_split_addr_64,
-		    bridge->p_pcix_pio_split_attr_64);
+		    pcireg_pcix_pio_split_addr_get(pcibr_soft),
+		    pcireg_pcix_pio_split_attr_get(pcibr_soft));
 		/* FALL THRU */
 
-	    case PIC_ISR_PCIX_UNEX_COMP:    /* bit42	PCI_X_UNEX_COMP */
-	    case PIC_ISR_PCIX_TABORT:	    /* bit38	PCI_X_TABORT */
-	    case PIC_ISR_PCIX_PERR:	    /* bit37	PCI_X_PERR */
-	    case PIC_ISR_PCIX_SERR:	    /* bit36	PCI_X_SERR */
-	    case PIC_ISR_PCIX_MRETRY:	    /* bit35	PCI_X_MRETRY */
-	    case PIC_ISR_PCIX_MTOUT:	    /* bit34	PCI_X_MTOUT */
-	    case PIC_ISR_PCIX_DA_PARITY:    /* bit33	PCI_X_DA_PARITY */
-	    case PIC_ISR_PCIX_AD_PARITY:    /* bit32	PCI_X_AD_PARITY */
+	    case PCIBR_ISR_PCIX_UNEX_COMP:	/* bit42    PCI_X_UNEX_COMP */
+	    case PCIBR_ISR_PCIX_TABORT:		/* bit38    PCI_X_TABORT */
+	    case PCIBR_ISR_PCIX_PERR:		/* bit37    PCI_X_PERR */
+	    case PCIBR_ISR_PCIX_SERR:		/* bit36    PCI_X_SERR */
+	    case PCIBR_ISR_PCIX_MRETRY:		/* bit35    PCI_X_MRETRY */
+	    case PCIBR_ISR_PCIX_MTOUT:		/* bit34    PCI_X_MTOUT */
+	    case PCIBR_ISR_PCIX_DA_PARITY:	/* bit33    PCI_X_DA_PARITY */
+	    case PCIBR_ISR_PCIX_AD_PARITY:	/* bit32    PCI_X_AD_PARITY */
 		/* XXX: should breakdown meaning of attribute bit */
-		printk(
+		KERN_MSG(K_CONT,
 		    "\t	   PCI-X Bus Error Address Reg: 0x%lx\n"
 		    "\t	   PCI-X Bus Error Attribute Reg: 0x%lx\n"
 		    "\t	   PCI-X Bus Error Data Reg: 0x%lx\n",
-		    bridge->p_pcix_bus_err_addr_64,
-		    bridge->p_pcix_bus_err_attr_64,
-		    bridge->p_pcix_bus_err_data_64);
+		    pcireg_pcix_bus_err_addr_get(pcibr_soft),
+		    pcireg_pcix_bus_err_attr_get(pcibr_soft),
+		    pcireg_pcix_bus_err_data_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_PAGE_FAULT:	    /* bit30	PMU_PAGE_FAULT */
-		if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
-		    reg_desc = "Map Fault Address";
-		else
-		    reg_desc = "SSRAM Parity Error";
-
-		printk( "\t    %s Register: 0x%x\n", reg_desc,
-		    bridge->b_ram_perr_or_map_fault);
+	    case PCIBR_ISR_PMU_PAGE_FAULT:	/* bit30    PMU_PAGE_FAULT */
+		KERN_MSG(K_CONT, "\t    Map Fault Address Reg: 0x%lx\n",
+		    pcireg_map_fault_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_UNEXP_RESP:    /* bit29	UNEXPECTED_RESP */
-		print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
+	    case PCIBR_ISR_UNEXP_RESP:		/* bit29    UNEXPECTED_RESP */
+		print_bridge_errcmd(pcibr_soft,
+			    pcireg_linkside_err_get(pcibr_soft), "Aux ");
 
-		/* PIC in PCI-X mode, dump the PCIX DMA Request registers */
-		if (IS_PIC_SOFT(pcibr_soft) && IS_PCIX(pcibr_soft)) {
+		/* PCI-X mode, dump the PCIX DMA Request registers */
+		if (IS_PCIX(pcibr_soft)) {
 		    /* XXX: should breakdown meaning of attr bit */
-		    printk( 
+		    KERN_MSG(K_CONT, 
 			"\t    PCI-X DMA Request Error Addr Reg: 0x%lx\n"
 			"\t    PCI-X DMA Request Error Attr Reg: 0x%lx\n",
-			bridge->p_pcix_dma_req_err_addr_64,
-			bridge->p_pcix_dma_req_err_attr_64);
+			pcireg_pcix_req_err_addr_get(pcibr_soft),
+			pcireg_pcix_req_err_attr_get(pcibr_soft));
 		}
 		break;
 
-	    case BRIDGE_ISR_BAD_XRESP_PKT:  /* bit28	BAD_RESP_PACKET */
-	    case BRIDGE_ISR_RESP_XTLK_ERR:  /* bit26	RESP_XTALK_ERROR */
-		if (IS_PIC_SOFT(pcibr_soft)) {
-		    print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
-		}
+	    case PCIBR_ISR_BAD_XRESP_PKT:	/* bit28    BAD_RESP_PACKET */
+	    case PCIBR_ISR_RESP_XTLK_ERR:	/* bit26    RESP_XTALK_ERROR */
+		print_bridge_errcmd(pcibr_soft,
+				pcireg_linkside_err_get(pcibr_soft), "Aux ");
 		 
-		/* If PIC in PCI-X mode, DMA Request Error registers are
-		 * valid.  But PIC in PCI mode, Response Buffer Address 
-		 * register are valid.
+		/* PCI-X mode, DMA Request Error registers are valid.  But
+		 * in PCI mode, Response Buffer Address register are valid.
 		 */
 		if (IS_PCIX(pcibr_soft)) {
 		    /* XXX: should breakdown meaning of attribute bit */
-		    printk( 
+		    KERN_MSG(K_CONT, 
 			"\t    PCI-X DMA Request Error Addr Reg: 0x%lx\n"
 		        "\t    PCI-X DMA Request Error Attribute Reg: 0x%lx\n",
-		        bridge->p_pcix_dma_req_err_addr_64,
-		        bridge->p_pcix_dma_req_err_attr_64);
+			pcireg_pcix_req_err_addr_get(pcibr_soft),
+			pcireg_pcix_req_err_attr_get(pcibr_soft));
 		} else {
-		    addr= (((uint64_t)(bridge->b_wid_resp_upper & 0xFFFF)<<32)
-			   | bridge->b_wid_resp_lower);
-		    printk("\t    Bridge Response Buf Error Upper Addr Reg: 0x%x\n"
-		        "\t    Bridge Response Buf Error Lower Addr Reg: 0x%x\n"
+		    KERN_MSG(K_CONT,
+		        "\t    Bridge Response Buf Error Addr Reg: 0x%lx\n"
 		        "\t    dev-num %d buff-num %d addr 0x%lx\n",
-		        bridge->b_wid_resp_upper, bridge->b_wid_resp_lower,
-		        ((bridge->b_wid_resp_upper >> 20) & 0x3),
-		        ((bridge->b_wid_resp_upper >> 16) & 0xF),
-		        addr);
-		}
-		if (bit == BRIDGE_ISR_RESP_XTLK_ERR) {
+			pcireg_resp_err_get(pcibr_soft),
+			(int)pcireg_resp_err_dev_get(pcibr_soft),
+			(int)pcireg_resp_err_buf_get(pcibr_soft),
+			pcireg_resp_err_addr_get(pcibr_soft));
+		    if (bit == PCIBR_ISR_RESP_XTLK_ERR) {
 			/* display memory directory associated with cacheline */
-			pcibr_show_dir_state(addr, "\t    ");
+			pcibr_show_dir_state(
+				    pcireg_resp_err_get(pcibr_soft), "\t    ");
+		    }
 		}
 		break;
 
-	    case BRIDGE_ISR_BAD_XREQ_PKT:   /* bit27	BAD_XREQ_PACKET */
-	    case BRIDGE_ISR_REQ_XTLK_ERR:   /* bit25	REQ_XTALK_ERROR */
-	    case BRIDGE_ISR_INVLD_ADDR:	    /* bit24	INVALID_ADDRESS */
-		print_bridge_errcmd(bridge->b_wid_err_cmdword, "");
-		printk( 
-		    "\t    Bridge Error Upper Address Register: 0x%lx\n"
-		    "\t    Bridge Error Lower Address Register: 0x%lx\n"
+	    case PCIBR_ISR_BAD_XREQ_PKT:	/* bit27    BAD_XREQ_PACKET */
+	    case PCIBR_ISR_REQ_XTLK_ERR:	/* bit25    REQ_XTALK_ERROR */
+	    case PCIBR_ISR_INVLD_ADDR:		/* bit24    INVALID_ADDRESS */
+		print_bridge_errcmd(pcibr_soft,
+				pcireg_cmdword_err_get(pcibr_soft), "");
+		KERN_MSG(K_CONT, 
+		    "\t    Bridge Error Address Register: 0x%lx\n"
 		    "\t    Bridge Error Address: 0x%lx\n",
-		    (uint64_t) bridge->b_wid_err_upper,
-		    (uint64_t) bridge->b_wid_err_lower,
-		    (((uint64_t) bridge->b_wid_err_upper << 32) |
-		    bridge->b_wid_err_lower));
+		    pcireg_bus_err_get(pcibr_soft),
+		    pcireg_bus_err_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_UNSUPPORTED_XOP:/* bit23	UNSUPPORTED_XOP */
-		if (IS_PIC_SOFT(pcibr_soft)) {
-		    print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
-		    printk( 
+	    case PCIBR_ISR_UNSUPPORTED_XOP:	/* bit23    UNSUPPORTED_XOP */
+		print_bridge_errcmd(pcibr_soft,
+				pcireg_linkside_err_get(pcibr_soft), "Aux ");
+		KERN_MSG(K_CONT, 
 			"\t    Address Holding Link Side Error Reg: 0x%lx\n",
-			bridge->p_addr_lkerr_64);
-		} else {
-		    print_bridge_errcmd(bridge->b_wid_err_cmdword, "");
-		    printk( 
-			"\t    Bridge Error Upper Address Register: 0x%lx\n"
-		        "\t    Bridge Error Lower Address Register: 0x%lx\n"
-		        "\t    Bridge Error Address: 0x%lx\n",
-		        (uint64_t) bridge->b_wid_err_upper,
-		        (uint64_t) bridge->b_wid_err_lower,
-		        (((uint64_t) bridge->b_wid_err_upper << 32) |
-		        bridge->b_wid_err_lower));
-		}
+			pcireg_linkside_err_addr_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_XREQ_FIFO_OFLOW:/* bit22	XREQ_FIFO_OFLOW */
-		/* Link side error registers are only valid for PIC */
-		if (IS_PIC_SOFT(pcibr_soft)) {
-		    print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
-		    printk(
+	    case PCIBR_ISR_XREQ_FIFO_OFLOW:	/* bit22    XREQ_FIFO_OFLOW */
+		print_bridge_errcmd(pcibr_soft,
+				pcireg_linkside_err_get(pcibr_soft), "Aux ");
+		KERN_MSG(K_CONT,
 			"\t    Address Holding Link Side Error Reg: 0x%lx\n",
-			bridge->p_addr_lkerr_64);
-		}
-		break;
-
-	    case BRIDGE_ISR_SSRAM_PERR:	    /* bit16	SSRAM_PERR */
-		if (IS_BRIDGE_SOFT(pcibr_soft)) {
-		    printk(
-			"\t    Bridge SSRAM Parity Error Register: 0x%x\n",
-			bridge->b_ram_perr);
-		}
+			pcireg_linkside_err_addr_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_PCI_ABORT:	    /* bit15	PCI_ABORT */
-	    case BRIDGE_ISR_PCI_PARITY:	    /* bit14	PCI_PARITY */
-	    case BRIDGE_ISR_PCI_SERR:	    /* bit13	PCI_SERR */
-	    case BRIDGE_ISR_PCI_PERR:	    /* bit12	PCI_PERR */
-	    case BRIDGE_ISR_PCI_MST_TIMEOUT:/* bit11	PCI_MASTER_TOUT */
-	    case BRIDGE_ISR_PCI_RETRY_CNT:  /* bit10	PCI_RETRY_CNT */
-	    case BRIDGE_ISR_GIO_B_ENBL_ERR: /* bit08	GIO BENABLE_ERR */
-		printk( 
-		    "\t    PCI Error Upper Address Register: 0x%lx\n"
-		    "\t    PCI Error Lower Address Register: 0x%lx\n"
+	    case PCIBR_ISR_PCI_ABORT:		/* bit15    PCI_ABORT */
+	    case PCIBR_ISR_PCI_PARITY:		/* bit14    PCI_PARITY */
+	    case PCIBR_ISR_PCI_SERR:		/* bit13    PCI_SERR */
+	    case PCIBR_ISR_PCI_PERR:		/* bit12    PCI_PERR */
+	    case PCIBR_ISR_PCI_MST_TIMEOUT:	/* bit11    PCI_MASTER_TOUT */
+	    case PCIBR_ISR_PCI_RETRY_CNT:	/* bit10    PCI_RETRY_CNT */
+		KERN_MSG(K_CONT, 
+		    "\t    PCI Error Address Register: 0x%lx\n"
 		    "\t    PCI Error Address: 0x%lx\n",
-		    (uint64_t) bridge->b_pci_err_upper,
-		    (uint64_t) bridge->b_pci_err_lower,
-		    (((uint64_t) bridge->b_pci_err_upper << 32) |
-		    bridge->b_pci_err_lower));
+		    pcireg_pci_bus_addr_get(pcibr_soft),
+		    pcireg_pci_bus_addr_addr_get(pcibr_soft));
 		break;
 
-	    case BRIDGE_ISR_XREAD_REQ_TIMEOUT: /* bit09	XREAD_REQ_TOUT */
-		addr = (((uint64_t)(bridge->b_wid_resp_upper & 0xFFFF) << 32)
-		    | bridge->b_wid_resp_lower);
-		printk(
-		    "\t    Bridge Response Buf Error Upper Addr Reg: 0x%x\n"
-		    "\t    Bridge Response Buf Error Lower Addr Reg: 0x%x\n"
+	    case PCIBR_ISR_XREAD_REQ_TIMEOUT:	/* bit09    XREAD_REQ_TOUT */
+		KERN_MSG(K_CONT,
+		    "\t    Bridge Response Buf Error Addr Reg: 0x%lx\n"
 		    "\t    dev-num %d buff-num %d addr 0x%lx\n",
-		    bridge->b_wid_resp_upper, bridge->b_wid_resp_lower,
-		    ((bridge->b_wid_resp_upper >> 20) & 0x3),
-		    ((bridge->b_wid_resp_upper >> 16) & 0xF),
-		    addr);
+		    pcireg_resp_err_get(pcibr_soft),
+		    (int)pcireg_resp_err_dev_get(pcibr_soft),
+		    (int)pcireg_resp_err_buf_get(pcibr_soft),
+		    pcireg_resp_err_get(pcibr_soft));
 		break;
 	    }
 	}
     }
 
-    /* We read the INT_MULT register as a 64bit picreg_t for PIC and a
-     * 32bit bridgereg_t for BRIDGE, but always process the result as a
-     * 64bit value so the code can be "common" for both PIC and BRIDGE...
-     */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	mult_int_64 = (bridge->p_mult_int_64 & ~BRIDGE_ISR_INT_MSK);
-	mult_int = (uint64_t)mult_int_64;
-	number_bits = PCIBR_ISR_MAX_ERRS_PIC;
-    } else {
-	mult_int_32 = (bridge->b_mult_int & ~BRIDGE_ISR_INT_MSK);
-	mult_int = ((uint64_t)mult_int_32) & 0xffffffff;
-	number_bits = PCIBR_ISR_MAX_ERRS_BRIDGE;
-    }
+    mult_int = pcireg_intr_multiple_get(pcibr_soft);
 
-    if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)&&(mult_int & ~BRIDGE_ISR_INT_MSK)) {
-	printk( "    %s Multiple Interrupt Register is 0x%lx\n",
-		IS_PIC_SOFT(pcibr_soft) ? "PIC" : "XBridge", mult_int);
-	for (i = PCIBR_ISR_ERR_START; i < number_bits; i++) {
+    if (mult_int & ~PCIBR_ISR_INT_MSK) {
+	KERN_MSG(K_CONT, "    %s Multiple Interrupt Register is 0x%lx\n",
+		pcibr_soft->bs_asic_name, mult_int);
+	for (i = PCIBR_ISR_ERR_START; i < 64; i++) {
 	    if (mult_int & (1ull << i))
-		printk( "\t%s\n", pcibr_isr_errs[i]);
+		KERN_MSG(K_CONT, "\t%s\n", pcibr_isr_errs[i]);
 	}
     }
 }
 
-static uint32_t
-pcibr_errintr_group(uint32_t error)
-{
-    uint32_t              group = BRIDGE_IRR_MULTI_CLR;
-
-    if (error & BRIDGE_IRR_PCI_GRP)
-	group |= BRIDGE_IRR_PCI_GRP_CLR;
-    if (error & BRIDGE_IRR_SSRAM_GRP)
-	group |= BRIDGE_IRR_SSRAM_GRP_CLR;
-    if (error & BRIDGE_IRR_LLP_GRP)
-	group |= BRIDGE_IRR_LLP_GRP_CLR;
-    if (error & BRIDGE_IRR_REQ_DSP_GRP)
-	group |= BRIDGE_IRR_REQ_DSP_GRP_CLR;
-    if (error & BRIDGE_IRR_RESP_BUF_GRP)
-	group |= BRIDGE_IRR_RESP_BUF_GRP_CLR;
-    if (error & BRIDGE_IRR_CRP_GRP)
-	group |= BRIDGE_IRR_CRP_GRP_CLR;
-
-    return group;
-
-}
-
 
 /* pcibr_pioerr_check():
  *	Check to see if this pcibr has a PCI PIO
@@ -553,12 +560,7 @@
 static void
 pcibr_pioerr_check(pcibr_soft_t soft)
 {
-    bridge_t		   *bridge;
-    uint64_t              int_status;
-    bridgereg_t             int_status_32;
-    picreg_t                int_status_64;
-    bridgereg_t		    pci_err_lower;
-    bridgereg_t		    pci_err_upper;
+    uint64_t		    int_status;
     iopaddr_t		    pci_addr;
     pciio_slot_t	    slot;
     pcibr_piomap_t	    map;
@@ -567,26 +569,10 @@
     unsigned		    win;
     int			    func;
 
-    bridge = soft->bs_base;
+    int_status = pcireg_intr_status_get(soft);
 
-    /* We read the INT_STATUS register as a 64bit picreg_t for PIC and a
-     * 32bit bridgereg_t for BRIDGE, but always process the result as a
-     * 64bit value so the code can be "common" for both PIC and BRIDGE...
-     */
-    if (IS_PIC_SOFT(soft)) {
-        int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
-        int_status = (uint64_t)int_status_64;
-    } else {
-        int_status_32 = (bridge->b_int_status & ~BRIDGE_ISR_INT_MSK);
-        int_status = ((uint64_t)int_status_32) & 0xffffffff;
-    }
-
-    if (int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
-	pci_err_lower = bridge->b_pci_err_lower;
-	pci_err_upper = bridge->b_pci_err_upper;
-
-	pci_addr = pci_err_upper & BRIDGE_ERRUPPR_ADDRMASK;
-	pci_addr = (pci_addr << 32) | pci_err_lower;
+    if (int_status & PCIBR_ISR_PCIBUS_PIOERR) {
+	pci_addr = pcireg_pci_bus_addr_get(soft);
 
 	slot = PCIBR_NUM_SLOTS(soft);
 	while (slot-- > 0) {
@@ -609,13 +595,14 @@
 		    else if (map->bp_space == PCIIO_SPACE_ROM)
 			base += pcibr_info->f_rbase;
 		    if ((pci_addr >= base) && (pci_addr < (base + size)))
-			atomic_inc(&map->bp_toc[0]);
+			ATOMIC_INC(&map->bp_toc);
 		}
 	    }
 	}
     }
 }
 
+
 /*
  * PCI Bridge Error interrupt handler.
  *      This gets invoked, whenever a PCI bridge sends an error interrupt.
@@ -636,19 +623,19 @@
  *                due to read or write error!.
  */
 
-void
+irqreturn_t
 pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
 {
     pcibr_soft_t            pcibr_soft;
-    bridge_t               *bridge;
-    uint64_t              int_status;
-    uint64_t              err_status;
-    bridgereg_t             int_status_32;
-    picreg_t                int_status_64;
-    int			    number_bits;
+    uint64_t		    int_status;
+    uint64_t		    err_status;
     int                     i;
+
+    /* REFERENCED */
     uint64_t		    disable_errintr_mask = 0;
+#if !defined(IP30) && !defined(SN0)
     nasid_t		    nasid;
+#endif
 
 
 #if PCIBR_SOFT_LIST
@@ -662,7 +649,8 @@
 	entry = pcibr_list;
 	while (1) {
 	    if (entry == NULL) {
-		PRINT_PANIC("pcibr_error_intr_handler:\tmy parameter (0x%p) is not a pcibr_soft!", arg);
+		panic("pcibr_error_intr_handler: (0x%lx) is not a pcibr_soft!",
+	 	      (uint64_t)arg);
 	    }
 	    if ((intr_arg_t) entry->bl_soft == arg)
 		break;
@@ -670,8 +658,8 @@
 	}
     }
 #endif /* PCIBR_SOFT_LIST */
+
     pcibr_soft = (pcibr_soft_t) arg;
-    bridge = pcibr_soft->bs_base;
 
     /*
      * pcibr_error_intr_handler gets invoked whenever bridge encounters
@@ -697,6 +685,8 @@
      * later.
      */
 
+  {
+    pci_bridge_t *bridge = pcibr_soft->bs_base;
     nasid = NASID_GET(bridge);
     if (hubii_check_widget_disabled(nasid, pcibr_soft->bs_xid)) {
 	DECLARE_WAIT_QUEUE_HEAD(wq);
@@ -705,50 +695,37 @@
 	/* Let's go recursive */
 	return(pcibr_error_intr_handler(irq, arg, ep));
     }
+  }
 
-    /* We read the INT_STATUS register as a 64bit picreg_t for PIC and a
-     * 32bit bridgereg_t for BRIDGE, but always process the result as a
-     * 64bit value so the code can be "common" for both PIC and BRIDGE...
-     */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-        int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
-        int_status = (uint64_t)int_status_64;
-        number_bits = PCIBR_ISR_MAX_ERRS_PIC;
-    } else {
-        int_status_32 = (bridge->b_int_status & ~BRIDGE_ISR_INT_MSK);
-        int_status = ((uint64_t)int_status_32) & 0xffffffff;
-        number_bits = PCIBR_ISR_MAX_ERRS_BRIDGE;
-    }
+    int_status = pcireg_intr_status_get(pcibr_soft);
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ERROR, pcibr_soft->bs_conn,
-		"pcibr_error_intr_handler: int_status=0x%x\n", int_status));
+		"pcibr_error_intr_handler: int_status=0x%lx\n", int_status));
 
     /* int_status is which bits we have to clear;
      * err_status is the bits we haven't handled yet.
      */
-    err_status = int_status & ~BRIDGE_ISR_MULTI_ERR;
+    err_status = int_status;
 
-    if (!(int_status & ~BRIDGE_ISR_INT_MSK)) {
-	/*
-	 * No error bit set!!.
-	 */
-	return;
+    if (!(int_status & ~PCIBR_ISR_INT_MSK)) {
+	return IRQ_HANDLED;	    /* Return, No error bit set!!. */
     }
     /*
      * If we have a PCIBUS_PIOERR, hand it to the logger.
      */
-    if (int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
+    if (int_status & PCIBR_ISR_PCIBUS_PIOERR) {
 	pcibr_pioerr_check(pcibr_soft);
     }
 
     if (err_status) {
-	struct bs_errintr_stat_s *bs_estat = pcibr_soft->bs_errintr_stat;
+	struct bs_errintr_stat_s *bs_estat ;
+        bs_estat = &pcibr_soft->bs_errintr_stat[PCIBR_ISR_ERR_START];
 
-	for (i = PCIBR_ISR_ERR_START; i < number_bits; i++, bs_estat++) {
+	for (i = PCIBR_ISR_ERR_START; i < 64; i++, bs_estat++) {
 	    if (err_status & (1ull << i)) {
-		uint32_t              errrate = 0;
-		uint32_t              errcount = 0;
-		uint32_t              errinterval = 0, current_tick = 0;
+		uint32_t		errrate = 0;
+		uint32_t		errcount = 0;
+		uint32_t		errinterval = 0, current_tick = 0;
 		int                     llp_tx_retry_errors = 0;
 		int                     is_llp_tx_retry_intr = 0;
 
@@ -761,7 +738,7 @@
 
 		/* LLP interrrupt errors are only valid on BUS0 of the PIC */
 		if (pcibr_soft->bs_busnum == 0)
-		    is_llp_tx_retry_intr = (BRIDGE_ISR_LLP_TX_RETRY==(1ull << i));
+		    is_llp_tx_retry_intr = (PCIBR_ISR_LLP_TX_RETRY==(1ull << i));
 
 		/* Check for the divide by zero condition while
 		 * calculating the error rates.
@@ -815,7 +792,7 @@
 			 * for the transmitter retry interrupt
 			 * exceeded the previously printed rate.
 			 */
-			printk(KERN_WARNING
+			KERN_MSG(K_WARN,
 				"%s: %s, Excessive error interrupts : %d/tick\n",
 				pcibr_soft->bs_name,
 				pcibr_isr_errs[i],
@@ -836,7 +813,7 @@
 		if (errinterval > PCIBR_ERRTIME_THRESHOLD) {
 
 		    if (errrate > PCIBR_ERRRATE_THRESHOLD) {
-			printk(KERN_NOTICE "%s: %s, Error rate %d/tick",
+			KERN_MSG(K_NOTE, "%s: %s, Error rate %d/tick",
 				pcibr_soft->bs_name,
 				pcibr_isr_errs[i],
 				errrate);
@@ -850,20 +827,19 @@
 		}
 		/* PIC BRINGUP WAR (PV# 856155):
 		 * Dont disable PCI_X_ARB_ERR interrupts, we need the
-		 * interrupt inorder to clear the DEV_BROKE bits in
+		 * interrupt inorder to clear the DEV_BROKE bits in 
 		 * b_arb register to re-enable the device.
 		 */
-		if (IS_PIC_SOFT(pcibr_soft) &&
+		if (IS_PIC_SOFT(pcibr_soft) && 
 				!(err_status & PIC_ISR_PCIX_ARB_ERR) &&
 				PCIBR_WAR_ENABLED(PV856155, pcibr_soft)) {
-
 		if (bs_estat->bs_errcount_total > PCIBR_ERRINTR_DISABLE_LEVEL) {
 		    /*
 		     * We have seen a fairly large number of errors of
 		     * this type. Let's disable the interrupt. But flash
 		     * a message about the interrupt being disabled.
 		     */
-		    printk(KERN_NOTICE
+		    KERN_MSG(K_NOTE,
 			    "%s Disabling error interrupt type %s. Error count %d",
 			    pcibr_soft->bs_name,
 			    pcibr_isr_errs[i],
@@ -876,119 +852,276 @@
     }
 
     if (disable_errintr_mask) {
-	unsigned s;
+	unsigned long	s;
 	/*
 	 * Disable some high frequency errors as they
 	 * could eat up too much cpu time.
 	 */
 	s = pcibr_lock(pcibr_soft);
-	if (IS_PIC_SOFT(pcibr_soft)) {
-	    bridge->p_int_enable_64 &= (picreg_t)(~disable_errintr_mask);
-	} else {
-	    bridge->b_int_enable &= (bridgereg_t)(~disable_errintr_mask);
-	}
+	pcireg_intr_enable_bit_clr(pcibr_soft, disable_errintr_mask);
 	pcibr_unlock(pcibr_soft, s);
     }
     /*
      * If we leave the PROM cacheable, T5 might
      * try to do a cache line sized writeback to it,
-     * which will cause a BRIDGE_ISR_INVLD_ADDR.
+     * which will cause a PCIBR_ISR_INVLD_ADDR.
      */
-    if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
-	(0x00000000 == bridge->b_wid_err_upper) &&
-	(0x00C00000 == (0xFFC00000 & bridge->b_wid_err_lower)) &&
-	(0x00402000 == (0x00F07F00 & bridge->b_wid_err_cmdword))) {
-	err_status &= ~BRIDGE_ISR_INVLD_ADDR;
+    if ((err_status & PCIBR_ISR_INVLD_ADDR) &&
+	(0x00C00000 == (pcireg_bus_err_get(pcibr_soft) & 0xFFFFFFFFFFC00000)) &&
+	(0x00402000 == (0x00F07F00 & pcireg_cmdword_err_get(pcibr_soft)))) {
+	err_status &= ~PCIBR_ISR_INVLD_ADDR;
     }
+
     /*
-     * The bridge bug (PCIBR_LLP_CONTROL_WAR), where the llp_config or control registers
-     * need to be read back after being written, affects an MP
-     * system since there could be small windows between writing
-     * the register and reading it back on one cpu while another
-     * cpu is fielding an interrupt. If we run into this scenario,
-     * workaround the problem by ignoring the error. (bug 454474)
-     * pcibr_llp_control_war_cnt keeps an approximate number of
-     * times we saw this problem on a system.
-     */
-
-    if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
-	((((uint64_t) bridge->b_wid_err_upper << 32) | (bridge->b_wid_err_lower))
-	 == (BRIDGE_INT_RST_STAT & 0xff0))) {
-	pcibr_llp_control_war_cnt++;
-	err_status &= ~BRIDGE_ISR_INVLD_ADDR;
+     * pcibr_pioerr_dump is a systune that make be used to not
+     * print bridge registers for interrupts generated by pio-errors.
+     * Some customers do early probes and expect a lot of failed
+     * pios.
+     */
+    if (!pcibr_pioerr_dump) {
+	bridge_errors_to_dump &= ~PCIBR_ISR_PCIBUS_PIOERR;
+    } else {
+	bridge_errors_to_dump |= PCIBR_ISR_PCIBUS_PIOERR;
     }
 
-    bridge_errors_to_dump |= BRIDGE_ISR_PCIBUS_PIOERR;
-
     /* Dump/Log Bridge error interrupt info */
     if (err_status & bridge_errors_to_dump) {
-	printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
+	KERN_MSG(K_CONT, "BRIDGE ERR_STATUS 0x%lx\n", err_status);
 	pcibr_error_dump(pcibr_soft);
     }
 
     /* PIC BRINGUP WAR (PV# 867308):
-     * Make BRIDGE_ISR_LLP_REC_SNERR & BRIDGE_ISR_LLP_REC_CBERR fatal errors
+     * Make PCIBR_ISR_LLP_REC_SNERR and PCIBR_ISR_LLP_REC_CBERR fatal errors
      * so we know we've hit the problem defined in PV 867308 that we believe
      * has only been seen in simulation
      */
     if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV867308, pcibr_soft) &&
-        (err_status & (BRIDGE_ISR_LLP_REC_SNERR | BRIDGE_ISR_LLP_REC_CBERR))) {
-        printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
-        pcibr_error_dump(pcibr_soft);
-        PRINT_PANIC("PCI Bridge Error interrupt killed the system");
+	(err_status & (PCIBR_ISR_LLP_REC_SNERR | PCIBR_ISR_LLP_REC_CBERR))) {
+	KERN_MSG(K_CONT, "BRIDGE ERR_STATUS 0x%lx\n", err_status);
+	pcibr_error_dump(pcibr_soft);
+	/* machine_error_dump(""); */
+	panic("PCI Bridge Error interrupt killed the system");
     }
 
-    if (err_status & BRIDGE_ISR_ERROR_FATAL) {
-	PRINT_PANIC("PCI Bridge Error interrupt killed the system");
+    if (err_status & PCIBR_ISR_ERROR_FATAL) {
+	/* machine_error_dump(""); */
+	panic("PCI Bridge Error interrupt killed the system");
 	    /*NOTREACHED */
     }
 
-
     /*
      * We can't return without re-enabling the interrupt, since
      * it would cause problems for devices like IOC3 (Lost
      * interrupts ?.). So, just cleanup the interrupt, and
      * use saved values later..
-     * 
-     * PIC doesn't require groups of interrupts to be cleared...
      */
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	bridge->p_int_rst_stat_64 = (picreg_t)(int_status | BRIDGE_IRR_MULTI_CLR);
-    } else {
-	bridge->b_int_rst_stat = (bridgereg_t)pcibr_errintr_group(int_status);
-    }
+    pcireg_intr_reset_set(pcibr_soft, (int_status | PCIBR_IRR_MULTI_CLR));
 
     /* PIC BRINGUP WAR (PV# 856155):
      * On a PCI_X_ARB_ERR error interrupt clear the DEV_BROKE bits from
      * the b_arb register to re-enable the device.
      */
-    if (IS_PIC_SOFT(pcibr_soft) &&
+    if (IS_PIC_SOFT(pcibr_soft) && 
 		(err_status & PIC_ISR_PCIX_ARB_ERR) &&
 		PCIBR_WAR_ENABLED(PV856155, pcibr_soft)) {
-	bridge->b_arb |= (0xf << 20);
+	pcireg_arbitration_bit_set(pcibr_soft, (0xf << 20));
     }
 
     /* Zero out bserr_intstat field */
     pcibr_soft->bs_errinfo.bserr_intstat = 0;
+    return IRQ_HANDLED;
+}
+
+/*
+ * pcibr_addr_toslot
+ *      Given the 'pciaddr' find out which slot this address is
+ *      allocated to, and return the slot number.
+ *      While we have the info handy, construct the
+ *      function number, space code and offset as well.
+ *
+ * NOTE: if this routine is called, we don't know whether
+ * the address is in CFG, MEM, or I/O space. We have to guess.
+ * This will be the case on PIO stores, where the only way
+ * we have of getting the address is to check the Bridge, which
+ * stores the PCI address but not the space and not the xtalk
+ * address (from which we could get it).
+ */
+int
+pcibr_addr_toslot(pcibr_soft_t pcibr_soft,
+		  iopaddr_t pciaddr,
+		  pciio_space_t *spacep,
+		  iopaddr_t *offsetp,
+		  pciio_function_t *funcp)
+{
+    int                     s, f = 0, w;
+    iopaddr_t               base;
+    size_t                  size;
+    pciio_piospace_t        piosp;
+
+    /*
+     * Check if the address is in config space
+     */
+
+    if ((pciaddr >= PCIBR_CONFIG_BASE) && (pciaddr < PCIBR_CONFIG_END)) {
+
+	if (pciaddr >= PCIBR_CONFIG_TYPE1_BASE)
+	    pciaddr -= PCIBR_CONFIG_TYPE1_BASE;
+	else
+	    pciaddr -= PCIBR_CONFIG_BASE;
+
+	s = pciaddr / PCIBR_CONFIG_SLOT_SIZE;
+	pciaddr %= PCIBR_CONFIG_SLOT_SIZE;
+
+	if (funcp) {
+	    f = pciaddr / 0x100;
+	    pciaddr %= 0x100;
+	}
+	if (spacep)
+	    *spacep = PCIIO_SPACE_CFG;
+	if (offsetp)
+	    *offsetp = pciaddr;
+	if (funcp)
+	    *funcp = f;
+
+	return s;
+    }
+    for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
+	int                     nf = pcibr_soft->bs_slot[s].bss_ninfo;
+	pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
+
+	for (f = 0; f < nf; f++) {
+	    pcibr_info_t            pcibr_info = pcibr_infoh[f];
+
+	    if (!pcibr_info)
+		continue;
+	    for (w = 0; w < 6; w++) {
+		if (pcibr_info->f_window[w].w_space
+		    == PCIIO_SPACE_NONE) {
+		    continue;
+		}
+		base = pcibr_info->f_window[w].w_base;
+		size = pcibr_info->f_window[w].w_size;
+
+		if ((pciaddr >= base) && (pciaddr < (base + size))) {
+		    if (spacep)
+			*spacep = PCIIO_SPACE_WIN(w);
+		    if (offsetp)
+			*offsetp = pciaddr - base;
+		    if (funcp)
+			*funcp = f;
+		    return s;
+		}			/* endif match */
+	    }				/* next window */
+	}				/* next func */
+    }					/* next slot */
+
+    /*
+     * Check if the address was allocated as part of the
+     * pcibr_piospace_alloc calls.
+     */
+    for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
+	int                     nf = pcibr_soft->bs_slot[s].bss_ninfo;
+	pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
+
+	for (f = 0; f < nf; f++) {
+	    pcibr_info_t            pcibr_info = pcibr_infoh[f];
+
+	    if (!pcibr_info)
+		continue;
+	    piosp = pcibr_info->f_piospace;
+	    while (piosp) {
+		if ((piosp->start <= pciaddr) &&
+		    ((piosp->count + piosp->start) > pciaddr)) {
+		    if (spacep)
+			*spacep = piosp->space;
+		    if (offsetp)
+			*offsetp = pciaddr - piosp->start;
+		    return s;
+		}			/* endif match */
+		piosp = piosp->next;
+	    }				/* next piosp */
+	}				/* next func */
+    }					/* next slot */
+
+    /*
+     * Some other random address on the PCI bus ...
+     * we have no way of knowing whether this was
+     * a MEM or I/O access; so, for now, we just
+     * assume that the low 1G is MEM, the next
+     * 3G is I/O, and anything above the 4G limit
+     * is obviously MEM.
+     */
+
+    if (spacep)
+	*spacep = ((pciaddr < (1ul << 30)) ? PCIIO_SPACE_MEM :
+		   (pciaddr < (4ul << 30)) ? PCIIO_SPACE_IO :
+		   PCIIO_SPACE_MEM);
+    if (offsetp)
+	*offsetp = pciaddr;
+
+    return PCIIO_SLOT_NONE;
+
 }
 
 void
 pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
 {
-    bridge_t               *bridge = pcibr_soft->bs_base;
+    uint64_t	clr_bits = PCIBR_IRR_ALL_CLR;
 
     ASSERT(error_code & IOECODE_PIO);
     error_code = error_code;
 
-    if (IS_PIC_SOFT(pcibr_soft)) {
-        bridge->p_int_rst_stat_64 = BRIDGE_IRR_PCI_GRP_CLR |
-				    PIC_PCIX_GRP_CLR |
-				    BRIDGE_IRR_MULTI_CLR;
-    } else {
-        bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR | BRIDGE_IRR_MULTI_CLR;
-    }
+    pcireg_intr_reset_set(pcibr_soft, clr_bits);
 
-    (void) bridge->b_wid_tflush;	/* flushbus */
+    pcireg_tflush_get(pcibr_soft);	/* flushbus */
+}
+
+
+/*
+ * pcibr_error_extract
+ *      Given the 'pcibr vertex handle' find out which slot
+ *      the bridge status error address (from pcibr_soft info
+ *      hanging off the vertex)
+ *      allocated to, and return the slot number.
+ *      While we have the info handy, construct the
+ *      space code and offset as well.
+ *
+ * NOTE: if this routine is called, we don't know whether
+ * the address is in CFG, MEM, or I/O space. We have to guess.
+ * This will be the case on PIO stores, where the only way
+ * we have of getting the address is to check the Bridge, which
+ * stores the PCI address but not the space and not the xtalk
+ * address (from which we could get it).
+ *
+ * XXX- this interface has no way to return the function
+ * number on a multifunction card, even though that data
+ * is available.
+ */
+
+pciio_slot_t
+pcibr_error_extract(vertex_hdl_t pcibr_vhdl,
+		    pciio_space_t *spacep,
+		    iopaddr_t *offsetp)
+{
+    pcibr_soft_t            pcibr_soft = 0;
+    iopaddr_t               bserr_addr;
+    pciio_slot_t            slot = PCIIO_SLOT_NONE;
+    arbitrary_info_t	    rev;
+
+    /* Do a sanity check as to whether we really got a 
+     * bridge vertex handle.
+     */
+    if (hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &rev) !=
+	GRAPH_SUCCESS) 
+	return(slot);
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (pcibr_soft) {
+	bserr_addr = pcireg_pci_bus_addr_get(pcibr_soft);
+	slot = pcibr_addr_toslot(pcibr_soft, bserr_addr,
+				 spacep, offsetp, NULL);
+    }
+    return slot;
 }
 
 /*ARGSUSED */
@@ -1021,8 +1154,37 @@
  *      to handle the error, it expects the bus-interface to disable that
  *      device, and takes any steps needed here to take away any resources
  *      associated with this device.
+ *
+ * A note about slots:
+ *
+ * 	PIC-based bridges use zero-based device numbering when devices to
+ * 	internal registers.  However, the physical slots are numbered using a
+ *	one-based scheme because in PCI-X, device 0 is reserved (see comments
+ * 	in pcibr_private.h for a better description).
+ *
+ * 	When building up the hwgraph, we use the external (one-based) number
+ *	scheme when numbering slot components so that hwgraph more accuratly
+ * 	reflects what is silkscreened on the bricks.
+ *
+ * 	Since pciio_error_handler() needs to ultimatly be able to do a hwgraph
+ *	lookup, the ioerror that gets built up in pcibr_pioerror() encodes the
+ *	external (one-based) slot number.  However, loops in pcibr_pioerror() 
+ * 	which attempt to translate the virtual address into the correct
+ * 	PCI physical address use the device (zero-based) numbering when 
+ * 	walking through bridge structures.
+ *
+ * 	To that end, pcibr_pioerror() uses device to denote the 
+ *	zero-based device number, and external_slot to denote the corresponding
+ *	one-based slot number.  Loop counters (eg. cs) are always device based.
  */
 
+#define BEM_ADD_STR(s)	KERN_MSG(K_CONT, "%s", (s))
+#define BEM_ADD_VAR(v)	KERN_MSG(K_CONT, "\t%20s: 0x%lx\n", #v, (uint64_t)(v))
+#define BEM_ADD_REG(r)	KERN_MSG(K_CONT, "\t%20s: ", #r); print_register((r), r ## _desc)
+
+#define BEM_ADD_NSPC(n,s)    KERN_MSG(K_CONT, "\t%20s: %s\n", n, pci_space[s])
+#define BEM_ADD_SPC(s)	BEM_ADD_NSPC(#s, s)
+
 /* BEM_ADD_IOE doesn't dump the whole ioerror, it just
  * decodes the PCI specific portions -- we count on our
  * callers to dump the raw IOE data.
@@ -1042,26 +1204,32 @@
 									\
 		switch (spc) {						\
 		case PCIIO_SPACE_CFG:					\
-		    printk("\tPCI Slot %d Func %d CFG space Offset 0x%lx\n",\
-			    	pciio_widgetdev_slot_get(widdev),	\
-	    			pciio_widgetdev_func_get(widdev),	\
-				busaddr);				\
+		    KERN_MSG(K_CONT,					\
+			    "\tPCI Slot %d Func %d CFG space Offset 0x%lx\n",\
+			    pciio_widgetdev_slot_get(widdev),		\
+			    pciio_widgetdev_func_get(widdev),		\
+			    busaddr);					\
 		    break;						\
 		case PCIIO_SPACE_IO:					\
-		    printk("\tPCI I/O space  Offset 0x%lx\n", busaddr);	\
+		    KERN_MSG(K_CONT,					\
+			    "\tPCI I/O space  Offset 0x%lx\n",		\
+			    busaddr);					\
 		    break;						\
 		case PCIIO_SPACE_MEM:					\
 		case PCIIO_SPACE_MEM32:					\
 		case PCIIO_SPACE_MEM64:					\
-		    printk("\tPCI MEM space Offset 0x%lx\n", busaddr);	\
+		    KERN_MSG(K_CONT,					\
+			    "\tPCI MEM space Offset 0x%lx\n",		\
+			    busaddr);					\
 		    break;						\
 		default:						\
 		    if (win < 6) {					\
-		    printk("\tPCI Slot %d Func %d Window %ld Offset 0x%lx\n",\
-	    			pciio_widgetdev_slot_get(widdev),	\
-	    			pciio_widgetdev_func_get(widdev),	\
-			    	win,					\
-			    	busaddr);				\
+		    KERN_MSG(K_CONT,					\
+			    "\tPCI Slot %d Func %d Window %ld Offset 0x%lx\n",\
+			    pciio_widgetdev_slot_get(widdev),		\
+			    pciio_widgetdev_func_get(widdev),		\
+			    win,					\
+			    busaddr);					\
 		    }							\
 		    break;						\
 		}							\
@@ -1079,15 +1247,14 @@
     int                     retval = IOERROR_HANDLED;
 
     vertex_hdl_t            pcibr_vhdl = pcibr_soft->bs_vhdl;
-    bridge_t               *bridge = pcibr_soft->bs_base;
-
     iopaddr_t               bad_xaddr;
 
     pciio_space_t           raw_space;	/* raw PCI space */
     iopaddr_t               raw_paddr;	/* raw PCI address */
 
     pciio_space_t           space;	/* final PCI space */
-    pciio_slot_t            slot;	/* final PCI slot, if appropriate */
+    pciio_slot_t            device;	/* final PCI device if appropriate */
+    pciio_slot_t            external_slot;/* external slot for device */
     pciio_function_t        func;	/* final PCI func, if appropriate */
     iopaddr_t               offset;	/* final PCI offset */
     
@@ -1106,19 +1273,19 @@
     IOERROR_GETVALUE(bad_xaddr, ioe, xtalkaddr);
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
-                "pcibr_pioerror: pcibr_soft=0x%x, bad_xaddr=0x%x\n",
+                "pcibr_pioerror: pcibr_soft=0x%lx, bad_xaddr=0x%lx\n",
 		pcibr_soft, bad_xaddr));
 
-    slot = PCIIO_SLOT_NONE;
+    device = PCIIO_SLOT_NONE;
     func = PCIIO_FUNC_NONE;
     raw_space = PCIIO_SPACE_NONE;
     raw_paddr = 0;
 
-    if ((bad_xaddr >= PCIBR_BUS_TYPE0_CFG_DEV0(pcibr_soft)) &&
+    if ((bad_xaddr >= PCIBR_BUS_TYPE0_CFG_DEV(pcibr_soft, 0)) &&
 	(bad_xaddr < PCIBR_TYPE1_CFG(pcibr_soft))) {
-	raw_paddr = bad_xaddr - PCIBR_BUS_TYPE0_CFG_DEV0(pcibr_soft);
-	slot = raw_paddr / BRIDGE_TYPE0_CFG_SLOT_OFF;
-	raw_paddr = raw_paddr % BRIDGE_TYPE0_CFG_SLOT_OFF;
+	raw_paddr = bad_xaddr - PCIBR_BUS_TYPE0_CFG_DEV(pcibr_soft, 0);
+	device = raw_paddr / PCIBR_CONFIG_SLOT_SIZE;
+	raw_paddr = raw_paddr % PCIBR_CONFIG_SLOT_SIZE;
 	raw_space = PCIIO_SPACE_CFG;
     }
     if ((bad_xaddr >= PCIBR_TYPE1_CFG(pcibr_soft)) &&
@@ -1130,16 +1297,16 @@
 	raw_paddr = bad_xaddr - PCIBR_TYPE1_CFG(pcibr_soft);
 	raw_space = PCIIO_SPACE_CFG;
     }
-    if ((bad_xaddr >= PCIBR_BRIDGE_DEVIO0(pcibr_soft)) &&
-	(bad_xaddr < PCIBR_BRIDGE_DEVIO(pcibr_soft, BRIDGE_DEV_CNT))) {
+    if ((bad_xaddr >= PCIBR_BRIDGE_DEVIO(pcibr_soft, 0)) &&
+	(bad_xaddr < PCIBR_BRIDGE_DEVIO(pcibr_soft, PCIBR_DEV_CNT))) {
 	int                     x;
 
-	raw_paddr = bad_xaddr - PCIBR_BRIDGE_DEVIO0(pcibr_soft);
-	x = raw_paddr / BRIDGE_DEVIO_OFF;
-	raw_paddr %= BRIDGE_DEVIO_OFF;
+	raw_paddr = bad_xaddr - PCIBR_BRIDGE_DEVIO(pcibr_soft, 0);
+	x = raw_paddr / PCIBR_DEVIO_OFF;
+	raw_paddr %= PCIBR_DEVIO_OFF;
 	/* first two devio windows are double-sized */
 	if ((x == 1) || (x == 3))
-	    raw_paddr += BRIDGE_DEVIO_OFF;
+	    raw_paddr += PCIBR_DEVIO_OFF;
 	if (x > 0)
 	    x--;
 	if (x > 1)
@@ -1161,35 +1328,58 @@
 	     * our best decode shot.
 	     */
 	    raw_space = pcibr_soft->bs_slot[x].bss_device
-		& BRIDGE_DEV_DEV_IO_MEM
+		& PCIBR_DEV_DEV_IO_MEM
 		? PCIIO_SPACE_MEM
 		: PCIIO_SPACE_IO;
 	    raw_paddr +=
 		(pcibr_soft->bs_slot[x].bss_device &
-		 BRIDGE_DEV_OFF_MASK) <<
-		BRIDGE_DEV_OFF_ADDR_SHFT;
+		 PCIBR_DEV_OFF_MASK) <<
+		 PCIBR_DEV_OFF_ADDR_SHFT;
 	} else
 	    raw_paddr += pcibr_soft->bs_slot[x].bss_devio.bssd_base;
     }
-    if ((bad_xaddr >= BRIDGE_PCI_MEM32_BASE) &&
-	(bad_xaddr <= BRIDGE_PCI_MEM32_LIMIT)) {
-	raw_space = PCIIO_SPACE_MEM32;
-	raw_paddr = bad_xaddr - BRIDGE_PCI_MEM32_BASE;
-    }
-    if ((bad_xaddr >= BRIDGE_PCI_MEM64_BASE) &&
-	(bad_xaddr <= BRIDGE_PCI_MEM64_LIMIT)) {
-	raw_space = PCIIO_SPACE_MEM64;
-	raw_paddr = bad_xaddr - BRIDGE_PCI_MEM64_BASE;
-    }
-    if ((bad_xaddr >= BRIDGE_PCI_IO_BASE) &&
-	(bad_xaddr <= BRIDGE_PCI_IO_LIMIT)) {
-	raw_space = PCIIO_SPACE_IO;
-	raw_paddr = bad_xaddr - BRIDGE_PCI_IO_BASE;
+
+    if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) {
+    	if ((bad_xaddr >= PICBRIDGE0_PCI_MEM32_BASE) &&
+	    (bad_xaddr <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM32;
+	    raw_paddr = bad_xaddr - PICBRIDGE0_PCI_MEM32_BASE;
+    	}
+    	if ((bad_xaddr >= PICBRIDGE0_PCI_MEM64_BASE) &&
+	    (bad_xaddr <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM64;
+	    raw_paddr = bad_xaddr - PICBRIDGE0_PCI_MEM64_BASE;
+    	}
+    } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) {
+    	if ((bad_xaddr >= PICBRIDGE1_PCI_MEM32_BASE) &&
+	    (bad_xaddr <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM32;
+	    raw_paddr = bad_xaddr - PICBRIDGE1_PCI_MEM32_BASE;
+    	}
+    	if ((bad_xaddr >= PICBRIDGE1_PCI_MEM64_BASE) &&
+	    (bad_xaddr <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM64;
+	    raw_paddr = bad_xaddr - PICBRIDGE1_PCI_MEM64_BASE;
+    	}
+    } else if (IS_TIOCP_SOFT(pcibr_soft)) {
+	if ((bad_xaddr >= TIOCP_BRIDGE_PCI_MEM32_BASE) &&
+	    (bad_xaddr <= TIOCP_BRIDGE_PCI_MEM32_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM32;
+	    raw_paddr = bad_xaddr - TIOCP_BRIDGE_PCI_MEM32_BASE;
+	}
+	if ((bad_xaddr >= TIOCP_BRIDGE_PCI_MEM64_BASE) &&
+	    (bad_xaddr <= TIOCP_BRIDGE_PCI_MEM64_LIMIT)) {
+	    raw_space = PCIIO_SPACE_MEM64;
+	    raw_paddr = bad_xaddr - TIOCP_BRIDGE_PCI_MEM64_BASE;
+	}
+    } else {
+	panic("pcibr_pioerror(): unknown bridge type");
     }
+
     space = raw_space;
     offset = raw_paddr;
 
-    if ((slot == PCIIO_SLOT_NONE) && (space != PCIIO_SPACE_NONE)) {
+    if ((device == PCIIO_SLOT_NONE) && (space != PCIIO_SPACE_NONE)) {
 	/* we've got a space/offset but not which
 	 * PCI slot decodes it. Check through our
 	 * notions of which devices decode where.
@@ -1203,16 +1393,16 @@
 
 	for (cs = pcibr_soft->bs_min_slot; 
 		(cs < PCIBR_NUM_SLOTS(pcibr_soft)) && 
-					(slot == PCIIO_SLOT_NONE); cs++) {
+				(device == PCIIO_SLOT_NONE); cs++) {
 	    int                     nf = pcibr_soft->bs_slot[cs].bss_ninfo;
 	    pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[cs].bss_infos;
 
-	    for (cf = 0; (cf < nf) && (slot == PCIIO_SLOT_NONE); cf++) {
+	    for (cf = 0; (cf < nf) && (device == PCIIO_SLOT_NONE); cf++) {
 		pcibr_info_t            pcibr_info = pcibr_infoh[cf];
 
 		if (!pcibr_info)
 		    continue;
-		for (cw = 0; (cw < 6) && (slot == PCIIO_SLOT_NONE); ++cw) {
+		for (cw = 0; (cw < 6) && (device == PCIIO_SLOT_NONE); ++cw) {
 		    if (((wx = pcibr_info->f_window[cw].w_space) != PCIIO_SPACE_NONE) &&
 			((wb = pcibr_info->f_window[cw].w_base) != 0) &&
 			((ws = pcibr_info->f_window[cw].w_size) != 0) &&
@@ -1228,7 +1418,7 @@
 			     ((space == PCIIO_SPACE_MEM) ||
 			      (space == PCIIO_SPACE_MEM32) ||
 			      (space == PCIIO_SPACE_MEM64)))) {
-			    slot = cs;
+			    device = cs;
 			    func = cf;
 			    space = PCIIO_SPACE_WIN(cw);
 			    offset -= wb;
@@ -1237,7 +1427,7 @@
 		}			/* next window unless slot set */
 	    }				/* next func unless slot set */
 	}				/* next slot unless slot set */
-	/* XXX- if slot is still -1, no PCI devices are
+	/* XXX- if device is still -1, no PCI devices are
 	 * decoding here using their standard PCI BASE
 	 * registers. This would be a really good place
 	 * to cross-coordinate with the pciio PCI
@@ -1281,7 +1471,7 @@
 	    wb = map->bp_pciaddr;
 	    ws = map->bp_mapsz;
 	    cw = wx - PCIIO_SPACE_WIN(0);
-	    if (cw < 6) {
+	    if (cw >= 0 && cw < 6) {
 		wb += pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
 		wx = pcibr_soft->bs_slot[cs].bss_window[cw].bssw_space;
 	    }
@@ -1294,33 +1484,37 @@
 		wx = PCIIO_SPACE_MEM;
 	    wl = wb + ws;
 	    if ((wx == raw_space) && (raw_paddr >= wb) && (raw_paddr < wl)) {
-		atomic_inc(&map->bp_toc[0]);
-		if (slot == PCIIO_SLOT_NONE) {
-		    slot = cs;
+		ATOMIC_INC(&map->bp_toc);
+		if (device == PCIIO_SLOT_NONE) {
+		    device = cs;
+		    func = cf;
 		    space = map->bp_space;
-		    if (cw < 6)
-			offset -= pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
+		    if (cw >= 0 && cw < 6)
+			offset -= pcibr_soft->bs_slot[device].bss_window[cw].bssw_base;
 		}
+
+		break;
 	    }
 	    }
 	}
     }
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
-                "pcibr_pioerror: offset=0x%x, slot=0x%x, func=0x%x\n",
-		offset, slot, func));
+                "pcibr_pioerror: space=%d, offset=0x%lx, dev=0x%x, func=0x%x\n",
+		space, offset, device, func));
 
     if (space != PCIIO_SPACE_NONE) {
-	if (slot != PCIIO_SLOT_NONE) {
-	    if (func != PCIIO_FUNC_NONE) {
+	if (device != PCIIO_SLOT_NONE)  {
+	    external_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, device);
+
+	    if (func != PCIIO_FUNC_NONE)
 		IOERROR_SETVALUE(ioe, widgetdev, 
-				 pciio_widgetdev_create(slot,func));
-	    }
-	    else {
+				 pciio_widgetdev_create(external_slot,func));
+	    else
     		IOERROR_SETVALUE(ioe, widgetdev, 
-				 pciio_widgetdev_create(slot,0));
-	    }
+				 pciio_widgetdev_create(external_slot,0));
 	}
+
 	IOERROR_SETVALUE(ioe, busspace, space);
 	IOERROR_SETVALUE(ioe, busaddr, offset);
     }
@@ -1335,7 +1529,7 @@
 	/* if appropriate, give the error handler for this slot
 	 * a shot at this probe access as well.
 	 */
-	return (slot == PCIIO_SLOT_NONE) ? IOERROR_HANDLED :
+	return (device == PCIIO_SLOT_NONE) ? IOERROR_HANDLED :
 	    pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
     }
     /*
@@ -1346,7 +1540,7 @@
      */
 
     if (space == PCIIO_SPACE_NONE) {
-	printk("XIO Bus Error at %s\n"
+	KERN_MSG(K_CONT, "XIO Bus Error at %s\n"
 		"\taccess to XIO bus offset 0x%lx\n"
 		"\tdoes not correspond to any PCI address\n",
 		pcibr_soft->bs_name, bad_xaddr);
@@ -1402,7 +1596,7 @@
 	     * depending on debug level and which error code.
 	     */
 
-	    printk(KERN_ALERT
+	    KERN_MSG(K_ALERT,
 		    "PIO Error on PCI Bus %s",
 		    pcibr_soft->bs_name);
 	    /* this decodes part of the ioe; our caller
@@ -1413,39 +1607,31 @@
 	}
 #if defined(FORCE_ERRORS)
 	if (0) {
-#elif !DEBUG
-	if (kdebug) {
-#endif
 	    /*
 	     * Dump raw data from Bridge/PCI layer.
 	     */
-
 	    BEM_ADD_STR("Raw info from Bridge/PCI layer:\n");
-	    if (IS_PIC_SOFT(pcibr_soft)) {
-		if (bridge->p_int_status_64 & (picreg_t)BRIDGE_ISR_PCIBUS_PIOERR)
-		    pcibr_error_dump(pcibr_soft);
-	    } else {
-		if (bridge->b_int_status & (bridgereg_t)BRIDGE_ISR_PCIBUS_PIOERR)
-		    pcibr_error_dump(pcibr_soft);
-	    }
+	    if (pcireg_intr_status_get(pcibr_soft) & PCIBR_ISR_PCIBUS_PIOERR)
+		pcibr_error_dump(pcibr_soft);
+
 	    BEM_ADD_SPC(raw_space);
 	    BEM_ADD_VAR(raw_paddr);
 	    if (IOERROR_FIELDVALID(ioe, widgetdev)) {
 		short widdev;
 		IOERROR_GETVALUE(widdev, ioe, widgetdev);
-		slot = pciio_widgetdev_slot_get(widdev);
+		external_slot = pciio_widgetdev_slot_get(widdev);
+		device = PCIBR_SLOT_TO_DEVICE(pcibr_soft, external_slot);
 		func = pciio_widgetdev_func_get(widdev);
-		if (slot < PCIBR_NUM_SLOTS(pcibr_soft)) {
-		    bridgereg_t             device = bridge->b_device[slot].reg;
+		if (device < PCIBR_NUM_SLOTS(pcibr_soft)) {
+		    uint64_t device_reg = pcireg_device_get(pcibr_soft,device);
 
-		    BEM_ADD_VAR(slot);
+		    BEM_ADD_VAR(device);
 		    BEM_ADD_VAR(func);
-		    BEM_ADD_REG(device);
+		    BEM_ADD_REG(device_reg);
 		}
 	    }
-#if !DEBUG || defined(FORCE_ERRORS)
 	}
-#endif
+#endif	/* FORCE_ERRORS */
 
 	/*
 	 * Since error could not be handled at lower level,
@@ -1456,19 +1642,22 @@
 	 * dependent on INT_ENABLE register. This write just makes sure
 	 * that if the interrupt was enabled, we do get the interrupt.
 	 *
-	 * CAUTION: Resetting bit BRIDGE_IRR_PCI_GRP_CLR, acknowledges
+	 * CAUTION: Resetting bit PCIBR_IRR_PCI_GRP_CLR, acknowledges
 	 *      a group of interrupts. If while handling this error,
 	 *      some other error has occurred, that would be
 	 *      implicitly cleared by this write.
 	 *      Need a way to ensure we don't inadvertently clear some
 	 *      other errors.
 	 */
+
 	if (IOERROR_FIELDVALID(ioe, widgetdev)) {
-		short widdev;
-		IOERROR_GETVALUE(widdev, ioe, widgetdev);
-		pcibr_device_disable(pcibr_soft, 
-				 pciio_widgetdev_slot_get(widdev));
+	    short widdev;
+	    IOERROR_GETVALUE(widdev, ioe, widgetdev);
+	    external_slot = pciio_widgetdev_slot_get(widdev);
+	    device = PCIBR_SLOT_TO_DEVICE(pcibr_soft, external_slot);
+	    pcibr_device_disable(pcibr_soft, device);
 	}
+
 	if (mode == MODE_DEVUSERERROR)
 	    pcibr_error_cleanup(pcibr_soft, error_code);
     }
@@ -1481,7 +1670,6 @@
  *      This routine will identify the <device, address> that caused the error,
  *      and try to invoke the appropriate bus service to handle this.
  */
-
 int
 pcibr_dmard_error(
 		     pcibr_soft_t pcibr_soft,
@@ -1490,10 +1678,8 @@
 		     ioerror_t *ioe)
 {
     vertex_hdl_t            pcibr_vhdl = pcibr_soft->bs_vhdl;
-    bridge_t               *bridge = pcibr_soft->bs_base;
-    bridgereg_t             bus_lowaddr, bus_uppraddr;
     int                     retval = 0;
-    int                     bufnum;
+    int                     bufnum, device;
 
     /*
      * In case of DMA errors, bridge should have logged the
@@ -1506,24 +1692,14 @@
 	IOERROR_GETVALUE(tmp, ioe, widgetnum);
 	ASSERT(tmp == pcibr_soft->bs_xid);
     }
-    ASSERT(bridge);
 
     /*
      * read error log registers
      */
-    bus_lowaddr = bridge->b_wid_resp_lower;
-    bus_uppraddr = bridge->b_wid_resp_upper;
-
-    bufnum = BRIDGE_RESP_ERRUPPR_BUFNUM(bus_uppraddr);
-    IOERROR_SETVALUE(ioe, widgetdev, 
-		     pciio_widgetdev_create(
-				    BRIDGE_RESP_ERRUPPR_DEVICE(bus_uppraddr),
-				    0));
-    IOERROR_SETVALUE(ioe, busaddr,
-		     (bus_lowaddr |
-		      ((iopaddr_t)
-		       (bus_uppraddr &
-			BRIDGE_ERRUPPR_ADDRMASK) << 32)));
+    bufnum = pcireg_resp_err_buf_get(pcibr_soft);
+    device = pcireg_resp_err_dev_get(pcibr_soft);
+    IOERROR_SETVALUE(ioe, widgetdev, pciio_widgetdev_create(device, 0));
+    IOERROR_SETVALUE(ioe, busaddr, pcireg_resp_err_get(pcibr_soft));
 
     /*
      * need to ensure that the xtalk address in ioe
@@ -1541,12 +1717,12 @@
     }
 
     /*
-     * Re-enable bridge to interrupt on BRIDGE_IRR_RESP_BUF_GRP_CLR
-     * NOTE: Wheather we get the interrupt on BRIDGE_IRR_RESP_BUF_GRP_CLR or
+     * Re-enable bridge to interrupt on PCIBR_IRR_RESP_BUF_GRP_CLR
+     * NOTE: Wheather we get the interrupt on PCIBR_IRR_RESP_BUF_GRP_CLR or
      * not is dependent on INT_ENABLE register. This write just makes sure
      * that if the interrupt was enabled, we do get the interrupt.
      */
-    bridge->b_int_rst_stat = BRIDGE_IRR_RESP_BUF_GRP_CLR;
+    pcireg_intr_reset_set(pcibr_soft, PCIBR_IRR_RESP_BUF_GRP_CLR);
 
     /*
      * Also, release the "bufnum" back to buffer pool that could be re-used.
@@ -1555,19 +1731,13 @@
      */
 
     {
-	reg_p                   regp;
-	bridgereg_t             regv;
-	bridgereg_t             mask;
-
-	regp = (bufnum & 1)
-	    ? &bridge->b_odd_resp
-	    : &bridge->b_even_resp;
+	uint64_t		rrb_reg;
+	uint64_t		mask;
 
+	rrb_reg = pcireg_rrb_get(pcibr_soft, (bufnum & 1));
 	mask = 0xF << ((bufnum >> 1) * 4);
-
-	regv = *regp;
-	*regp = regv & ~mask;
-	*regp = regv;
+	pcireg_rrb_set(pcibr_soft, (bufnum & 1), (rrb_reg & ~mask));
+	pcireg_rrb_set(pcibr_soft, (bufnum & 1), rrb_reg);
     }
 
     return retval;
@@ -1637,11 +1807,11 @@
     pcibr_soft = (pcibr_soft_t) einfo;
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
-		"pcibr_error_handler: pcibr_soft=0x%x, error_code=0x%x\n",
+		"pcibr_error_handler: pcibr_soft=0x%lx, error_code=0x%x\n",
 		pcibr_soft, error_code));
 
 #if DEBUG && ERROR_DEBUG
-    printk( "%s: pcibr_error_handler\n", pcibr_soft->bs_name);
+    KERN_MSG(K_CONT, "%s: pcibr_error_handler\n", pcibr_soft->bs_name);
 #endif
 
     ASSERT(pcibr_soft != NULL);
@@ -1701,7 +1871,7 @@
     int		       dma_retval = -1;
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
-                "pcibr_error_handler_wrapper: pcibr_soft=0x%x, "
+                "pcibr_error_handler_wrapper: pcibr_soft=0x%lx, "
 		"error_code=0x%x\n", pcibr_soft, error_code));
 
     /*
@@ -1716,6 +1886,11 @@
      * the error from the PIO address.
      */
 
+#if 0
+    if (mode == MODE_DEVPROBE)
+	pio_retval = IOERROR_HANDLED;
+    else {
+#endif
     if (error_code & IOECODE_PIO) {
 	iopaddr_t               bad_xaddr;
 	/*
@@ -1734,8 +1909,8 @@
 	    pcibr_soft = pcibr_soft->bs_peers_soft;
 	    if (!pcibr_soft) {
 #if DEBUG
-		printk(KERN_WARNING "pcibr_error_handler: "
-			"bs_peers_soft==NULL. bad_xaddr= 0x%x mode= 0x%x\n",
+		KERN_MSG(K_WARN, "pcibr_error_handler: "
+			"bs_peers_soft==NULL. bad_xaddr= 0x%lx mode= 0x%x\n",
 						bad_xaddr, mode);
 #endif
   		pio_retval = IOERROR_HANDLED;
@@ -1743,11 +1918,14 @@
 	        pio_retval= pcibr_error_handler((error_handler_arg_t)pcibr_soft,
 			 (error_code & ~IOECODE_DMA), mode, ioe);
 	} else {
-	    printk(KERN_WARNING "pcibr_error_handler_wrapper(): IOECODE_PIO: "
+	    KERN_MSG(K_WARN, "pcibr_error_handler_wrapper(): IOECODE_PIO: "
 		    "saw an invalid pio address: 0x%lx\n", bad_xaddr);
 	    pio_retval = IOERROR_UNHANDLED;
 	}
     } 
+#if 0
+    } /* MODE_DEVPROBE */
+#endif
 
     /* 
      * If the error was a result of a DMA Write, we tell what bus on the PIC
@@ -1792,9 +1970,9 @@
      */
     if ((error_code & IOECODE_DMA) && (error_code & IOECODE_READ)) {
 	/*
-	 * A DMA Read error will result in a BRIDGE_ISR_RESP_XTLK_ERR
-	 * or BRIDGE_ISR_BAD_XRESP_PKT bridge error interrupt which 
-	 * are fatal interrupts (ie. BRIDGE_ISR_ERROR_FATAL) causing
+	 * A DMA Read error will result in a PCIBR_ISR_RESP_XTLK_ERR
+	 * or PCIBR_ISR_BAD_XRESP_PKT bridge error interrupt which 
+	 * are fatal interrupts (ie. PCIBR_ISR_ERROR_FATAL) causing
 	 * pcibr_error_intr_handler() to panic the system.  So is the
 	 * error handler even going to get called???  It appears that
 	 * the pcibr_dmard_error() attempts to clear the interrupts
@@ -1815,11 +1993,32 @@
      */
     if ((pio_retval == -1) && (dma_retval == -1)) {
     	return IOERROR_BADERRORCODE;
-    } else if (dma_retval != IOERROR_HANDLED) {
+    } else if ((dma_retval != IOERROR_HANDLED) && (dma_retval != -1)) {
 	return dma_retval;
-    } else if (pio_retval != IOERROR_HANDLED) {
+    } else if ((pio_retval != IOERROR_HANDLED) && (pio_retval != -1)) {
 	return pio_retval;
     } else {
 	return IOERROR_HANDLED;
     }
+}
+
+
+/*
+ * Reenable a device after handling the error.
+ * This is called by the lower layers when they wish to be reenabled
+ * after an error.
+ * Note that each layer would be calling the previous layer to reenable
+ * first, before going ahead with their own re-enabling.
+ */
+
+int
+pcibr_error_devenable(vertex_hdl_t pconn_vhdl, int error_code)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+    ASSERT(error_code & IOECODE_PIO);
+
+    pcibr_error_cleanup(pcibr_soft, error_code);
+    return IOERROR_HANDLED;
 }
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,34 +7,19 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-pcibr_hints_t           pcibr_hints_get(vertex_hdl_t, int);
-void                    pcibr_hints_fix_rrbs(vertex_hdl_t);
-void                    pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
-void			pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
-void                    pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
-void                    pcibr_hints_handsoff(vertex_hdl_t);
-void                    pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
+
+pcibr_hints_t	pcibr_hints_get(vertex_hdl_t, int);
+void		pcibr_hints_fix_rrbs(vertex_hdl_t);
+void		pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+void		pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
+void		pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
+void		pcibr_hints_handsoff(vertex_hdl_t);
+void		pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
 
 pcibr_hints_t
 pcibr_hints_get(vertex_hdl_t xconn_vhdl, int alloc)
@@ -68,9 +52,7 @@
     return (pcibr_hints_t) ainfo;
 
 abnormal_exit:
-#ifdef LATER
-    printf("SHOULD NOT BE HERE\n");
-#endif
+    KERN_MSG(K_WARN, "pcibr_hints_get(): abnormal exit");
     DEL(hint);
     return(NULL);
 
@@ -160,7 +142,7 @@
     }
     hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
     if (ainfo == 0) {
-	uint64_t                *subdevp;
+	uint64_t	*subdevp;
 
 	NEW(subdevp);
 	if (!subdevp) {
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,24 +7,14 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
 #include <linux/module.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
 #include <asm/sn/io.h>
 #include <asm/sn/sn_private.h>
 
@@ -33,7 +22,7 @@
 inline int
 compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
 {
-	FIXME("compare_and_swap_ptr : NOT ATOMIC");
+	/* FIXME - compare_and_swap_ptr NOT ATOMIC */
 	if (*location == old_ptr) {
 		*location = new_ptr;
 		return(1);
@@ -43,18 +32,19 @@
 }
 #endif
 
-unsigned		pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
+unsigned		pcibr_intr_bits(pciio_info_t info, 
+				pciio_intr_line_t lines, int nslots);
 pcibr_intr_t            pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
 void                    pcibr_intr_free(pcibr_intr_t);
-void              pcibr_setpciint(xtalk_intr_t);
+void              	pcibr_setpciint(xtalk_intr_t);
 int                     pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
 void                    pcibr_intr_disconnect(pcibr_intr_t);
 
 vertex_hdl_t            pcibr_intr_cpu_get(pcibr_intr_t);
-void                    pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
+
 void                    pcibr_intr_func(intr_arg_t);
 
-extern pcibr_info_t      pcibr_info_get(vertex_hdl_t);
+extern pcibr_info_t	pcibr_info_get(vertex_hdl_t);
 
 /* =====================================================================
  *    INTERRUPT MANAGEMENT
@@ -102,8 +92,8 @@
     pcibr_intr_wrap_t	wrap;
 
 	if (cbuf->ib_in == cbuf->ib_out)
-	    PRINT_PANIC( "pcibr intr circular buffer empty, cbuf=0x%p, ib_in=ib_out=%d\n",
-		(void *)cbuf, cbuf->ib_out);
+	    panic("pcibr intr circular buffer empty, cbuf=0x%lx, ib_in=ib_out=%d\n",
+		(uint64_t)cbuf, cbuf->ib_out);
 
 	wrap = cbuf->ib_cbuf[cbuf->ib_out++];
 	cbuf->ib_out = cbuf->ib_out % IBUFSIZE;
@@ -117,22 +107,21 @@
 pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
 {
 	int	in;
-	int	s;
 
 	/*
 	 * Multiple CPUs could be executing this code simultaneously
 	 * if a handler has registered multiple interrupt lines and
 	 * the interrupts are directed to different CPUs.
 	 */
-	s = mutex_spinlock(&cbuf->ib_lock);
+	spin_lock(&cbuf->ib_lock);
 	in = (cbuf->ib_in + 1) % IBUFSIZE;
 	if (in == cbuf->ib_out) 
-	    PRINT_PANIC( "pcibr intr circular buffer full, cbuf=0x%p, ib_in=%d\n",
-		(void *)cbuf, cbuf->ib_in);
+	    panic("pcibr intr circular buffer full, cbuf=0x%lx, ib_in=%d\n",
+		(uint64_t)cbuf, cbuf->ib_in);
 
 	cbuf->ib_cbuf[cbuf->ib_in] = wrap;
 	cbuf->ib_in = in;
-	mutex_spinunlock(&cbuf->ib_lock, s);
+	spin_unlock(&cbuf->ib_lock);
 	return;
 }
 
@@ -218,7 +207,7 @@
 
 	// force an interrupt.
 
-	*(bridgereg_t *)(p->force_int_addr) = 1;
+	*(volatile uint32_t *)(p->force_int_addr) = 1;
 
 	// wait for the interrupt to come back.
 
@@ -274,7 +263,6 @@
 	unsigned	bit;
 	unsigned	bits;
 	pcibr_soft_t    pcibr_soft = intr->bi_soft;
-	bridge_t       *bridge = pcibr_soft->bs_base;
 
 	bits = intr->bi_ibits;
 	for (bit = 0; bit < 8; bit++) {
@@ -283,9 +271,7 @@
 			PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
 		    		"pcibr_force_interrupt: bit=0x%x\n", bit));
 
-			if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
-	    			bridge->b_force_pin[bit].intr = 1;
-			}
+			pcireg_force_intr_set(pcibr_soft, bit);
 		}
 	}
 }
@@ -301,7 +287,6 @@
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
-    bridge_t               *bridge = pcibr_soft->bs_base;
     int                     is_threaded = 0;
 
     xtalk_intr_t           *xtalk_intr_p;
@@ -315,8 +300,6 @@
     pcibr_intr_t            pcibr_intr;
     pcibr_intr_list_t       intr_entry;
     pcibr_intr_list_t       intr_list;
-    bridgereg_t             int_dev;
-
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
     		"pcibr_intr_alloc: %s%s%s%s%s\n",
@@ -334,16 +317,16 @@
     pcibr_intr->bi_lines = lines;
     pcibr_intr->bi_soft = pcibr_soft;
     pcibr_intr->bi_ibits = 0;		/* bits will be added below */
-    pcibr_intr->bi_func = 0;            /* unset until connect */
-    pcibr_intr->bi_arg = 0;             /* unset until connect */
+    pcibr_intr->bi_func = 0;		/* unset until connect */
+    pcibr_intr->bi_arg = 0;		/* unset until connect */
     pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
     pcibr_intr->bi_mustruncpu = CPU_NONE;
     pcibr_intr->bi_ibuf.ib_in = 0;
     pcibr_intr->bi_ibuf.ib_out = 0;
-    mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
-    pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines, 
-		PCIBR_NUM_SLOTS(pcibr_soft));
+    spin_lock_init(&pcibr_intr->bi_ibuf.ib_lock);
 
+    pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, 
+					lines, PCIBR_NUM_SLOTS(pcibr_soft));
 
     /*
      * For each PCI interrupt line requested, figure
@@ -384,7 +367,7 @@
 			owner_dev);
 
 		PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
-			    "pcibr_intr_alloc: xtalk_intr=0x%x\n", xtalk_intr));
+			    "pcibr_intr_alloc: xtalk_intr=0x%lx\n", xtalk_intr));
 
 		/* both an assert and a runtime check on this:
 		 * we need to check in non-DEBUG kernels, and
@@ -400,27 +383,20 @@
 		     * in xtalk_intr_p.
 		     */
 		    if (!*xtalk_intr_p) {
-#ifdef SUPPORT_PRINTING_V_FORMAT
-			printk(KERN_ALERT  
-				"pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
-				xconn_vhdl);
-#else
-			printk(KERN_ALERT  
-				"pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
-				(void *)xconn_vhdl);
-#endif
+			KERN_MSG(K_ALERT, "pcibr_intr_alloc %s: "
+				"unable to get xtalk interrupt resources",
+				pcibr_soft->bs_name);
 			/* yes, we leak resources here. */
 			return 0;
 		    }
 		} else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
-		    /*
-		     * now tell the bridge which slot is
-		     * using this interrupt line.
+		    /* now tell the bridge which slot is using this 
+		     * interrupt line.
 		     */
-		    int_dev = bridge->b_int_device;
-		    int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
-		    int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
-		    bridge->b_int_device = int_dev;	/* XXXMP */
+		    pcireg_intr_device_bit_clr(pcibr_soft, 
+			    PCIBR_INT_DEV_MASK(pcibr_int_bit));
+		    pcireg_intr_device_bit_set(pcibr_soft, 
+			    (pciio_slot << PCIBR_INT_DEV_SHFT(pcibr_int_bit)));
 
 		    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
 		    		"bridge intr bit %d clears my wrb\n",
@@ -438,9 +414,9 @@
 		     * can "never happen" ...
 		     */
 		    if (!xtalk_intr) {
-			printk(KERN_ALERT  
-				"pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
-				xconn_vhdl);
+			KERN_MSG(K_ALERT, "pcibr_intr_alloc %s: "
+				"unable to set xtalk interrupt resources",
+				pcibr_soft->bs_name);
 			/* yes, we leak resources here. */
 			return 0;
 		    }
@@ -453,13 +429,14 @@
 	    NEW(intr_entry);
 	    intr_entry->il_next = NULL;
 	    intr_entry->il_intr = pcibr_intr;
-	    intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
+	    intr_entry->il_soft = pcibr_soft;
+	    intr_entry->il_slot = pciio_slot;
 	    intr_list_p = 
 		&pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
 
 	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
-			"Bridge bit 0x%x wrap=0x%x\n", pcibr_int_bit,
-			pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap));
+			"Bridge bit 0x%x wrap=0x%lx\n", pcibr_int_bit,
+			&(pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap)));
 
 	    if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
 		/* we are the first interrupt on this bridge bit.
@@ -522,12 +499,11 @@
 	}
     }
 
-#if DEBUG && INTR_DEBUG
-    printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
-#endif
     hub_intr = (hub_intr_t)xtalk_intr;
     pcibr_intr->bi_irq = hub_intr->i_bit;
     pcibr_intr->bi_cpu = hub_intr->i_cpuid;
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
+		"pcibr_intr_alloc complete: pcibr_intr=0x%lx\n", pcibr_intr));
     return pcibr_intr;
 }
 
@@ -566,19 +542,14 @@
 
 	    if ((!intr_shared) && (*xtalk_intrp)) {
 
-		bridge_t 	*bridge = pcibr_soft->bs_base;
-		bridgereg_t	int_dev;
-
 		xtalk_intr_free(*xtalk_intrp);
 		*xtalk_intrp = 0;
 
 		/* Clear the PCI device interrupt to bridge interrupt pin
 		 * mapping.
 		 */
-		int_dev = bridge->b_int_device;
-		int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
-		bridge->b_int_device = int_dev;
-
+		pcireg_intr_device_bit_clr(pcibr_soft, 
+			PCIBR_INT_DEV_MASK(pcibr_int_bit));
 	    }
 	}
     }
@@ -591,17 +562,21 @@
     iopaddr_t		 addr;
     xtalk_intr_vector_t	 vect;
     vertex_hdl_t	 vhdl;
-    bridge_t		*bridge;
-    picreg_t	*int_addr;
-
+    pci_bridge_t	*bridge;
+    int			 bus_num;
+    int			 pcibr_int_bit;
+    
     addr = xtalk_intr_addr_get(xtalk_intr);
     vect = xtalk_intr_vector_get(xtalk_intr);
     vhdl = xtalk_intr_dev_get(xtalk_intr);
-    bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
 
-    int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
-    *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
-		     (PIC_INT_ADDR_HOST & addr));
+    /* bus and int_bits are stored in sfarg, bus bit3, int_bits bit2:0 */
+    pcibr_int_bit = *((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x7;
+    bus_num = ((*((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x8) >> 3);
+
+    bridge = pcibr_bridge_ptr_get(vhdl, bus_num);
+    pcireg_intr_addr_vect_set(bridge, pcibr_int_bit, vect);
+    pcireg_intr_addr_addr_set(bridge, pcibr_int_bit, addr);
 }
 
 /*ARGSUSED */
@@ -609,18 +584,16 @@
 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
 {
     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
-    bridge_t               *bridge = pcibr_soft->bs_base;
     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
     unsigned                pcibr_int_bit;
-    uint64_t		    int_enable;
-    unsigned long           s;
+    unsigned long	    s;
 
     if (pcibr_intr == NULL)
 	return -1;
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
-		"pcibr_intr_connect: intr_func=0x%x\n",
-		pcibr_intr));
+		"pcibr_intr_connect: intr_func=0x%lx, intr_arg=0x%lx\n",
+		intr_func, intr_arg));
 
     pcibr_intr->bi_func = intr_func;
     pcibr_intr->bi_arg = intr_arg;
@@ -634,13 +607,13 @@
      */
     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
 	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
-            pcibr_intr_wrap_t       intr_wrap;
+	    pcibr_intr_wrap_t       intr_wrap;
 	    xtalk_intr_t            xtalk_intr;
-            void                   *int_addr;
+	    void		   *int_addr;
 
 	    xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-	    intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
 
+	    intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
 	    /*
 	     * If this interrupt line is being shared and the connect has
 	     * already been done, no need to do it again.
@@ -653,41 +626,27 @@
 	     * Use the pcibr wrapper function to handle all Bridge interrupts
 	     * regardless of whether the interrupt line is shared or not.
 	     */
-	    if (IS_PIC_SOFT(pcibr_soft)) 
-		int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
-	    else
-		int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
-
-	    xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
-					(xtalk_intr_setfunc_t) pcibr_setpciint,
-			       			(void *)int_addr);
+	    int_addr = pcireg_intr_addr_addr(pcibr_soft, pcibr_int_bit);
+	    pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit = 
+			       ((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
+	    xtalk_intr_connect(xtalk_intr,
+			       pcibr_intr_func,
+			       (intr_arg_t) intr_wrap,
+			       (xtalk_intr_setfunc_t) pcibr_setpciint,
+			       &pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
 
 	    pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
 
 	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
-			"pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
-			"pcibr_int_bit=0x%x\n", int_addr,
-			 *(picreg_t *)int_addr,
+			"pcibr_setpciint: int_addr=0x%lx, *int_addr=0x%lx, "
+			"pcibr_int_bit=0x%x\n", int_addr, 
+			pcireg_intr_addr_get(pcibr_soft, pcibr_int_bit),
 			pcibr_int_bit));
 	}
 
-	/* PIC WAR. PV# 854697
-	 * On PIC we must write 64-bit MMRs with 64-bit stores
-	 */
 	s = pcibr_lock(pcibr_soft);
-	if (IS_PIC_SOFT(pcibr_soft) &&
-			PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
-	    int_enable = bridge->p_int_enable_64;
-	    int_enable |= pcibr_int_bits;
-	    bridge->p_int_enable_64 = int_enable;
-	} else {
-	    bridgereg_t int_enable;
-
-	    int_enable = bridge->b_int_enable;
-	    int_enable |= pcibr_int_bits;
-	    bridge->b_int_enable = int_enable;
-	}
-	bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+	pcireg_intr_enable_bit_set(pcibr_soft, pcibr_int_bits);
+	pcireg_tflush_get(pcibr_soft);
 	pcibr_unlock(pcibr_soft, s);
 
     return 0;
@@ -698,12 +657,10 @@
 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
 {
     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
-    bridge_t               *bridge = pcibr_soft->bs_base;
     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
     unsigned                pcibr_int_bit;
-    pcibr_intr_wrap_t       intr_wrap;
-    uint64_t                int_enable;
-    unsigned long           s;
+    pcibr_intr_wrap_t	    intr_wrap;
+    unsigned long	    s;
 
     /* Stop calling the function. Now.
      */
@@ -726,20 +683,9 @@
     if (!pcibr_int_bits)
 	return;
 
-    /* PIC WAR. PV# 854697
-     * On PIC we must write 64-bit MMRs with 64-bit stores
-     */
     s = pcibr_lock(pcibr_soft);
-    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
-	int_enable = bridge->p_int_enable_64;
-	int_enable &= ~pcibr_int_bits;
-	bridge->p_int_enable_64 = int_enable;
-    } else {
-	int_enable = (uint64_t)bridge->b_int_enable;
-	int_enable &= ~pcibr_int_bits;
-	bridge->b_int_enable = (bridgereg_t)int_enable;
-    }
-    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    pcireg_intr_enable_bit_clr(pcibr_soft, pcibr_int_bits);
+    pcireg_tflush_get(pcibr_soft); 	/* wait until Bridge PIO complete */
     pcibr_unlock(pcibr_soft, s);
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
@@ -748,7 +694,6 @@
 
     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
 	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
-            void                   *int_addr;
 
 	    /* if the interrupt line is now shared,
 	     * do not disconnect it.
@@ -768,22 +713,18 @@
 	     * where the another pcibr_intr_alloc()
 	     * was in progress as we disconnected.
 	     */
+	    intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
 	    if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
 		continue;
 
-	    intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
-            if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
-                continue;
-
-            if (IS_PIC_SOFT(pcibr_soft))
-                int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
-            else
-                int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
-
+	    pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit =
+				((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
 	    xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
-				pcibr_intr_func, (intr_arg_t) intr_wrap,
-			       (xtalk_intr_setfunc_t)pcibr_setpciint,
-			       (void *)(long)pcibr_int_bit);
+			       pcibr_intr_func,
+			       (intr_arg_t) intr_wrap,
+			       (xtalk_intr_setfunc_t) pcibr_setpciint,
+			       &pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
+
 	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
 			"pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
 			pcibr_int_bit));
@@ -808,10 +749,9 @@
  *    INTERRUPT HANDLING
  */
 void
-pcibr_clearwidint(bridge_t *bridge)
+pcibr_clearwidint(pci_bridge_t *bridge)
 {
-    bridge->b_wid_int_upper = 0;
-    bridge->b_wid_int_lower = 0;
+    pcireg_intr_dst_set(bridge, 0);
 }
 
 
@@ -821,100 +761,12 @@
     xwidgetnum_t            targ = xtalk_intr_target_get(intr);
     iopaddr_t               addr = xtalk_intr_addr_get(intr);
     xtalk_intr_vector_t     vect = xtalk_intr_vector_get(intr);
-    widgetreg_t		    NEW_b_wid_int_upper, NEW_b_wid_int_lower;
-    widgetreg_t		    OLD_b_wid_int_upper, OLD_b_wid_int_lower;
-
-    bridge_t               *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
-
-    NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
-			       XTALK_ADDR_TO_UPPER(addr));
-    NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-
-    OLD_b_wid_int_upper = bridge->b_wid_int_upper;
-    OLD_b_wid_int_lower = bridge->b_wid_int_lower;
-
-    /* Verify that all interrupts from this Bridge are using a single PI */
-    if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
-	/*
-	 * Once set, these registers shouldn't change; they should
-	 * be set multiple times with the same values.
-	 *
-	 * If we're attempting to change these registers, it means
-	 * that our heuristics for allocating interrupts in a way
-	 * appropriate for IP35 have failed, and the admin needs to
-	 * explicitly direct some interrupts (or we need to make the
-	 * heuristics more clever).
-	 *
-	 * In practice, we hope this doesn't happen very often, if
-	 * at all.
-	 */
-	if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
-	    (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
-		printk(KERN_WARNING  "Interrupt allocation is too complex.\n");
-		printk(KERN_WARNING  "Use explicit administrative interrupt targetting.\n");
-		printk(KERN_WARNING  "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
-		printk(KERN_WARNING  "NEW=0x%x/0x%x  OLD=0x%x/0x%x\n",
-			NEW_b_wid_int_upper, NEW_b_wid_int_lower,
-			OLD_b_wid_int_upper, OLD_b_wid_int_lower);
-		PRINT_PANIC("PCI Bridge interrupt targetting error\n");
-	}
-    }
 
-    bridge->b_wid_int_upper = NEW_b_wid_int_upper;
-    bridge->b_wid_int_lower = NEW_b_wid_int_lower;
-    bridge->b_int_host_err = vect;
+    pci_bridge_t	   *bridge = (pci_bridge_t *)xtalk_intr_sfarg_get(intr);
 
-}
-
-/*
- * pcibr_intr_preset: called during mlreset time
- * if the platform specific code needs to route
- * one of the Bridge's xtalk interrupts before the
- * xtalk infrastructure is available.
- */
-void
-pcibr_xintr_preset(void *which_widget,
-		   int which_widget_intr,
-		   xwidgetnum_t targ,
-		   iopaddr_t addr,
-		   xtalk_intr_vector_t vect)
-{
-    bridge_t               *bridge = (bridge_t *) which_widget;
-
-    if (which_widget_intr == -1) {
-	/* bridge widget error interrupt */
-	bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
-				   XTALK_ADDR_TO_UPPER(addr));
-	bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-	bridge->b_int_host_err = vect;
-printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
-	( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
-	XTALK_ADDR_TO_LOWER(addr), vect);
-
-	/* turn on all interrupts except
-	 * the PCI interrupt requests,
-	 * at least at heart.
-	 */
-	bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
-
-    } else {
-	/* routing a PCI device interrupt.
-	 * targ and low 38 bits of addr must
-	 * be the same as the already set
-	 * value for the widget error interrupt.
-	 */
-	bridge->b_int_addr[which_widget_intr].addr =
-	    ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
-	     (BRIDGE_INT_ADDR_FLD & vect));
-	/*
-	 * now bridge can let it through;
-	 * NB: still should be blocked at
-	 * xtalk provider end, until the service
-	 * function is set.
-	 */
-	bridge->b_int_enable |= 1 << vect;
-    }
-    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    pcireg_intr_dst_target_id_set(bridge, targ);
+    pcireg_intr_dst_addr_set(bridge, addr);
+    pcireg_intr_host_err_set(bridge, vect);
 }
 
 
@@ -931,37 +783,30 @@
 pcibr_intr_func(intr_arg_t arg)
 {
     pcibr_intr_wrap_t       wrap = (pcibr_intr_wrap_t) arg;
-    reg_p                   wrbf;
     intr_func_t             func;
     pcibr_intr_t            intr;
     pcibr_intr_list_t       list;
     int                     clearit;
     int			    do_nonthreaded = 1;
     int			    is_threaded = 0;
-    int			    x = 0;
     pcibr_soft_t            pcibr_soft = wrap->iw_soft;
-    bridge_t               *bridge = pcibr_soft->bs_base;
-    uint64_t		    p_enable = pcibr_soft->bs_int_enable;
     int			    bit = wrap->iw_ibit;
 
 	/*
-	 * PIC WAR.  PV#855272
+	 * PIC BRINGUP WAR (PV# 855272):
 	 * Early attempt at a workaround for the runaway
 	 * interrupt problem.   Briefly disable the enable bit for
 	 * this device.
 	 */
 	if (IS_PIC_SOFT(pcibr_soft) &&
 			PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
-		unsigned s;
+		unsigned long	s;
 
 		/* disable-enable interrupts for this bridge pin */
-
-		p_enable &= ~(1 << bit);
-	        s = pcibr_lock(pcibr_soft);
-		bridge->p_int_enable_64 = p_enable;
-		p_enable |= (1 << bit);
-		bridge->p_int_enable_64 = p_enable;
-	        pcibr_unlock(pcibr_soft, s);
+		s = pcibr_lock(pcibr_soft);
+		pcireg_intr_enable_bit_clr(pcibr_soft, (1 << bit));
+		pcireg_intr_enable_bit_set(pcibr_soft, (1 << bit));
+		pcibr_unlock(pcibr_soft, s);
 	}
 
 	/*
@@ -984,49 +829,44 @@
 	clearit = 1;
 	while (do_nonthreaded) {
 	    for (list = wrap->iw_list; list != NULL; list = list->il_next) {
-		if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
+		if ((intr = list->il_intr) &&
+		    (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
 
-		    /*
-		     * This device may have initiated write
-		     * requests since the bridge last saw
-		     * an edge on this interrupt input; flushing
-		     * the buffer prior to invoking the handler
-		     * should help but may not be sufficient if we 
-		     * get more requests after the flush, followed
-		     * by the card deciding it wants service, before
-		     * the interrupt handler checks to see if things need
-		     * to be done.
-		     *
-		     * There is a similar race condition if
-		     * an interrupt handler loops around and
-		     * notices further service is required.
-		     * Perhaps we need to have an explicit
-		     * call that interrupt handlers need to
-		     * do between noticing that DMA to memory
-		     * has completed, but before observing the
-		     * contents of memory?
-		     */
+
+		/*
+		 * This device may have initiated write
+		 * requests since the bridge last saw
+		 * an edge on this interrupt input; flushing
+		 * the buffer prior to invoking the handler
+		 * should help but may not be sufficient if we 
+		 * get more requests after the flush, followed
+		 * by the card deciding it wants service, before
+		 * the interrupt handler checks to see if things need
+		 * to be done.
+		 *
+		 * There is a similar race condition if
+		 * an interrupt handler loops around and
+		 * notices further service is requred.
+		 * Perhaps we need to have an explicit
+		 * call that interrupt handlers need to
+		 * do between noticing that DMA to memory
+		 * has completed, but before observing the
+		 * contents of memory?
+		 */
 
 		    if ((do_nonthreaded) && (!is_threaded)) {
 			/* Non-threaded -  Call the interrupt handler at interrupt level */
 			/* Only need to flush write buffers if sharing */
 
-			if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
-			    if ((x = *wrbf))	/* write request buffer flush */
-#ifdef SUPPORT_PRINTING_V_FORMAT
-				printk(KERN_ALERT  "pcibr_intr_func %v: \n"
-				    "write buffer flush failed, wrbf=0x%x\n", 
-				    list->il_intr->bi_dev, wrbf);
-#else
-				printk(KERN_ALERT  "pcibr_intr_func %p: \n"
-				    "write buffer flush failed, wrbf=0x%lx\n", 
-				    (void *)list->il_intr->bi_dev, (long) wrbf);
-#endif
+			if (wrap->iw_shared) {
+			    pcireg_wrb_flush_get(list->il_soft, list->il_slot);
 			}
+
 			func = intr->bi_func;
 			if ( func )
 				func(intr->bi_arg);
 		    }
+
 		    clearit = 0;
 		}
 	    }
@@ -1051,27 +891,12 @@
 	 * list forever.
 	 */
 	if (clearit) {
-	    pcibr_soft_t            pcibr_soft = wrap->iw_soft;
-	    bridge_t               *bridge = pcibr_soft->bs_base;
-	    bridgereg_t             int_enable;
-	    bridgereg_t		    mask = 1 << wrap->iw_ibit;
-	    unsigned long           s;
+	    uint64_t		    mask = 1 << wrap->iw_ibit;
+	    unsigned long	    s;
 
-	    /* PIC BRINUGP WAR (PV# 854697):
-	     * On PIC we must write 64-bit MMRs with 64-bit stores
-	     */
 	    s = pcibr_lock(pcibr_soft);
-	    if (IS_PIC_SOFT(pcibr_soft) &&
-				PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
-		int_enable = bridge->p_int_enable_64;
-		int_enable &= ~mask;
-		bridge->p_int_enable_64 = int_enable;
-	    } else {
-		int_enable = (uint64_t)bridge->b_int_enable;
-		int_enable &= ~mask;
-		bridge->b_int_enable = (bridgereg_t)int_enable;
-	    }
-	    bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+	    pcireg_intr_enable_bit_clr(pcibr_soft, mask);
+	    pcireg_tflush_get(pcibr_soft);
 	    pcibr_unlock(pcibr_soft, s);
 	    return;
 	}
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_msix_intr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_msix_intr.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_msix_intr.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,264 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/sn2/shub_mmr.h>
+
+extern cfg_p           pcibr_find_capability(cfg_p, unsigned);
+extern pcibr_info_t      pcibr_info_get(vertex_hdl_t);
+
+#define DBG(x...)
+
+/*
+ * pcibr_msi_alloc:  Allocate the number of MSI as requested.
+ *	NOTE: For now we will only allocate 1 MSI.
+ */
+unsigned int
+pcibr_msi_alloc(vertex_hdl_t dev,
+		int number_requested,
+		unsigned int *irqs)
+{
+
+	pcibr_info_t	pcibr_info = pcibr_info_get(dev);
+	pcibr_soft_t	pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+	pci_bridge_t	*bridge = pcibr_soft->bs_base;
+	vertex_hdl_t	xconn_vhdl = pcibr_soft->bs_conn;
+	unsigned int	irqs_allocated = 0;
+	cfg_p		cfgw;
+	int		slot;
+	cap_msi_t	*msi_cap;
+	cap_msi64_t	*msi64p;
+	cpuid_t		cpu=0;
+	int		irq;
+	cnodeid_t	cnode;
+	int		cpuphys, slice;
+	nasid_t		nasid;
+	iopaddr_t	xtalk_addr;
+	unsigned long	msi_address;
+	xwidgetnum_t	port;
+
+	if (!irqs)
+		return 0;
+
+	/*
+	 * Check that the card supports MSI etc.
+	 * Verify number of MSI required.
+	 */
+	if (pcibr_info == NULL || bridge == NULL)
+		return 0;
+
+	slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
+	cfgw = pcibr_slot_config_addr(bridge, slot, 0);
+	if (cfgw == NULL)
+		return 0;
+
+	msi_cap = (cap_msi_t *)pcibr_find_capability(cfgw, PCI_CAP_MSI);
+	if (msi_cap == NULL){
+		return 0;
+	}
+
+	DBG("cfgw 0x%lx msi_cap 0x%lx\n", cfgw, msi_cap);
+	DBG("MSI CAPABILITY:\n");
+	DBG("PCI Bus Driver %s slot number %d\n", pcibr_soft->bs_name, slot);
+	DBG("	msi_cap_id 0x%x\n", msi_cap->msi_cap_id);
+	DBG("	msi_cap_nxt 0x%x\n", msi_cap->msi_cap_nxt);
+	DBG("	msi_control 0x%x\n", msi_cap->msi_control);
+	DBG("		msi_enable 0x%x\n", msi_cap->msi_control.msi_enable);
+	DBG("		msi_multi_msg_cap 0x%x\n", msi_cap->msi_control.msi_multi_msg_cap);
+	DBG("		msi_multi_msg_en 0x%x\n", msi_cap->msi_control.msi_multi_msg_en);
+	DBG("		msi_64bit 0x%x\n", msi_cap->msi_control.msi_64bit);
+
+	/*
+	 * Go get the logical cpu and irq
+	 */
+	cpu = intr_heuristic(xconn_vhdl, -1, &irq);
+	if (cpu == CPU_NONE) {
+		/* Need error cleanup */
+		DBG("snia_msi_alloc: intr_heuristic unable to allocate interrupt\n");
+		return 0;
+	}
+
+	irqs_allocated = 1;
+
+	/*
+	 * Go get the DMA address.
+	 * This should be setup as a separate routine callable from 
+	 * different path i.e. normal line interrupt allocation.
+	 */
+	cpuphys = cpu_physical_id(cpu);
+	slice = cpu_physical_id_to_slice(cpuphys);
+	nasid = cpu_physical_id_to_nasid(cpuphys);
+	cnode = cpuid_to_cnodeid(cpu);
+	
+	if (slice) {
+		xtalk_addr = SH_II_INT1 | ((unsigned long)nasid << 36) | (1UL << 47);
+	} else {
+		xtalk_addr = SH_II_INT0 | ((unsigned long)nasid << 36) | (1UL << 47);
+	}
+	port = pcibr_soft->bs_mxid;
+	msi_address = ((((uint64_t)port & 0xf) << 60) | (PCI64_ATTR_BAR | PCI64_ATTR_PREC) ) | xtalk_addr;
+
+	DBG("MSI Allocated Interrupt:\n");
+	DBG("	logical cpu %d nasid %d cnode %d xtalk_addr 0x%lx msi_address 0x%lx irq 0x%x\n",
+		cpu, nasid, cnode, xtalk_addr, msi_address, irq);
+
+
+	/*
+	 * Setup the MSI address/payload on the card.
+	 * The DMA Address requires a Target(Port) a or b.
+	 * The payload is irq | (1 << 8)
+	 */
+	msi_cap->msi_control.msi_multi_msg_en = 0; /* Means 1 MSI */
+
+	if (msi_cap->msi_control.msi_64bit){
+		msi64p = &msi_cap->msi_ad.msi64;
+		msi64p->msi_addr_lsb = msi_address & 0xffffffff;
+		msi64p->msi_addr_msb = (msi_address>> 32) & 0xffffffff;
+		msi64p->msi_data  = (1 << 8) | irq;
+	} else {
+		printk ("pcibr_msi_enable: 32-bit MSI not implemented\n");
+		return(0);
+	}
+
+	msi_cap->msi_control.msi_enable = 1;
+
+	*irqs = irq;
+	return(irqs_allocated);
+	
+}
+
+
+unsigned int
+pcibr_msix_alloc(vertex_hdl_t dev,
+		struct pci_dev *pci_dev,
+		int number_requested,
+		unsigned int *irqs)
+{
+
+	pcibr_info_t	pcibr_info = pcibr_info_get(dev);
+	pcibr_soft_t	pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+	pci_bridge_t	*bridge = pcibr_soft->bs_base;
+	vertex_hdl_t	xconn_vhdl = pcibr_soft->bs_conn;
+	unsigned int	irqs_allocated = 0;
+	cfg_p		cfgw;
+	int		slot;
+	cap_msix_t	*msix_cap;
+	cpuid_t		cpu=0;
+	int		irq;
+	cnodeid_t	cnode;
+	int		cpuphys, slice;
+	nasid_t		nasid;
+	iopaddr_t	xtalk_addr;
+	unsigned long	msix_address;
+	xwidgetnum_t	port;
+	cap_msix_table_entry_t *table_base;
+	cap_msix_table_entry_t msix_entry;
+
+	if (!irqs)
+		return 0;
+
+	/*
+	 * Check that the card supports MSI etc.
+	 * Verify number of MSI required.
+	 */
+	if (pcibr_info == NULL || bridge == NULL)
+		return 0;
+
+	slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
+	cfgw = pcibr_slot_config_addr(bridge, slot, 0);
+	if (cfgw == NULL)
+		return 0;
+
+	msix_cap = (cap_msix_t *)pcibr_find_capability(cfgw, PCI_CAP_MSIX);
+	if (msix_cap == NULL){
+	DBG("Not MSIX Capable: PCI Driver %s slot number %d cfgw 0x%lx\n", pcibr_soft->bs_name, slot, cfgw);
+		return 0;
+	}
+
+	DBG("cfgw 0x%lx msix_cap 0x%lx\n", cfgw, msix_cap);
+	DBG("MSIX CAPABILITY:\n");
+	DBG("PCI Bus Driver %s slot number %d\n", pcibr_soft->bs_name, slot);
+	DBG("	msix_cap_id 0x%x\n", msix_cap->msix_cap_id);
+	DBG("	msix_cap_nxt 0x%x\n", msix_cap->msix_cap_nxt);
+	DBG("	msix_control 0x%x\n", msix_cap->msix_control);
+	DBG("		msix_table_size 0x%x\n", msix_cap->msix_control.msix_table_size);
+	DBG("		msix_func_mask 0x%x\n", msix_cap->msix_control.msix_func_mask);
+	DBG("		msix_enable 0x%x\n", msix_cap->msix_control.msix_enable);
+	DBG("	msix_table 0x%x\n", msix_cap->msix_table);
+	DBG("		msix_table_bir 0x%x\n", msix_cap->msix_table.msix_table_bir);
+	DBG("		msix_table_offset x%x\n", msix_cap->msix_table.msix_table_offset);
+	DBG("	msix_pba 0x%x\n", msix_cap->msix_control);
+
+	/*
+	 * Get the MSIX Base Table Address.
+	 * Base Table Address = Table Offset << 6 + Base Address Register
+	 * The above gives us the byte address of the start of the Table.
+	 */
+	table_base = (cap_msix_table_entry_t *)pci_resource_start(
+			pci_dev, msix_cap->msix_table.msix_table_bir);
+	table_base = table_base + (msix_cap->msix_table.msix_table_offset << 6);
+	msix_entry = *table_base;
+
+	/*
+	 * Go get the logical cpu and irq
+	 */
+	cpu = intr_heuristic(xconn_vhdl, -1, &irq);
+	if (cpu == CPU_NONE) {
+		/* Need error cleanup */
+		printk("snia_msi_alloc: intr_heuristic unable to allocate interrupt\n");
+		return 0;
+	}
+
+	irqs_allocated = 1;
+	*irqs = irq;
+
+	/*
+	 * Go get the DMA address.
+	 * This should be setup as a separate routine callable from
+	 * different path i.e. normal line interrupt allocation.
+	 */
+	cpuphys = cpu_physical_id(cpu);
+	slice = cpu_physical_id_to_slice(cpuphys);
+	nasid = cpu_physical_id_to_nasid(cpuphys);
+	cnode = cpuid_to_cnodeid(cpu);
+
+	if (slice) {
+		xtalk_addr = SH_II_INT1 | ((unsigned long)nasid << 36) | (1UL << 47);
+	} else {
+		xtalk_addr = SH_II_INT0 | ((unsigned long)nasid << 36) | (1UL << 47);
+	}
+	port = pcibr_soft->bs_mxid;
+	msix_address = ((((uint64_t)port & 0xf) << 60) | (PCI64_ATTR_BAR | PCI64_ATTR_PREC) ) | xtalk_addr;
+
+	DBG("MSI Allocated Interrupt:\n");
+	DBG("	logical cpu %d nasid %d cnode %d xtalk_addr 0x%lx msix_address 0x%lx irq 0x%x\n",
+		cpu, nasid, cnode, xtalk_addr, msix_address, irq);
+
+
+	/*
+	 * Setup the MSI address/payload on the card.
+	 * The DMA Address requires a Target(Port) a or b.
+	 * The payload is irq | (1 << 8)
+	 */
+
+	/*
+	 * Enable this particular MSIX entry.
+	 */
+	msix_entry.msix_addr_lsb = msix_address & 0xffffffff;
+	msix_entry.msix_addr_msb = (msix_address>> 32) & 0xffffffff;
+	msix_entry.msix_data = (1 << 8) | irq;
+	msix_entry.msix_vec_cntl.mask_bit = 0;
+	*table_base = msix_entry;
+	msix_cap->msix_control.msix_enable = 1;
+
+	return(irqs_allocated);
+
+}
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_reg.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_reg.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_reg.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,2592 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+/***********************************************************************\
+*                                                                       *
+* NOTICE!!                                                              *
+*                                                                       *
+* This file exists both in arch/ia64/sn/io/sn2/pcibr and in             *
+* xxx/xxx/xxx/xxx/.  Please make any changes to both files.             *
+* ^^^^^^^^^^^^^^^                                                       *
+* XXX: habeck update this when we know were the file exists             *
+*                                                                       *
+\***********************************************************************/
+
+
+#ifndef	_STANDALONE
+
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/pci_defs.h>
+
+#else	/* _STANDALONE */
+
+#include "io/io.h"
+#include "io_private.h"
+#include "tiocp.h"
+#include "sys/PCI/pic.h"
+
+#define ASSERT_ALWAYS(x)
+#define splhi()  0
+#define splx(s)
+#define panic   printf
+
+#endif	/* _STANDALONE */
+
+/* XXX: HABECK NOTES: 
+ *    -Change ASSERT_ALWAYS() to ASSERT()'s prior to final FSC
+ *    -Fix the use of splhi() 
+ */
+/*
+ * Given a void pointer, which is either a pci_bridge_t pointer or a
+ * pcibr_soft_t pointer, return the bridge_type and the bridge pointer.
+ * For performance reasons it is best to call BRIDGE_TYPE_AND_PTR_GET
+ * with a pcibr_soft struct pointer rather than a pci_bridge_t pointer,
+ * since a pci_bridge_t pointer results in a PIO to the bridge to obtain
+ * the bridge_type.  But in some cases the pcibr_soft_t pointer isn't
+ * available, thus the bridge pointer must be used (this is the reason
+ * the macro support both pointer types).
+ *
+ * NOTE: Register accesses must be direct in this macro and not go thru
+ * the register access functions, else you'd endup with a nested macro.
+ *
+ * NOTE: The PROM always calls this code with a pci_bridge_t pointer
+ * since the pcibr_soft_t doesn't exist in PROM, thus the reason for 
+ * the _STANDALONE version of the macro.
+ */
+#define IS_IOADDR(ptr) (!(((uint64_t)(ptr) & CAC_BASE) == CAC_BASE))
+
+#ifndef _STANDALONE		/* Kernel version of the macro */
+#define BRIDGE_TYPE_AND_PTR_GET(ptr, type, bridge)  { \
+    *(type) = PCIBR_BRIDGETYPE_UNKNOWN; \
+    (bridge) = (pci_bridge_t *)0; \
+    if ((ptr) != NULL) { \
+	if (IS_IOADDR(ptr)) { \
+	    /* it's a pci bridge ptr, not a pcibr_soft struct ptr */ \
+	    uint32_t id = ((pci_bridge_id_t *)ptr)->id; \
+	    if (IS_TIOCP_BRIDGE(id)) \
+		*(type) = PCIBR_BRIDGETYPE_TIOCP; \
+	    else if (IS_PIC_BRIDGE(id)) \
+		*(type) = PCIBR_BRIDGETYPE_PIC; \
+	    (bridge) = (pci_bridge_t *)(ptr); \
+	    PCIBR_DEBUG((PCIBR_DEBUG_MMR, NULL, \
+			"MMR access from %s(), bridge=0x%lx\n", \
+			__FUNCTION__, ptr)); \
+	} else { \
+	    /* it's a pcibr_soft struct ptr, not a pci bridge ptr */ \
+	    *(type) = ((pcibr_soft_t)(ptr))->bs_bridge_type; \
+	    (bridge) = (pci_bridge_t *)(((pcibr_soft_t)(ptr))->bs_base); \
+	    PCIBR_DEBUG((PCIBR_DEBUG_MMR,((pcibr_soft_t)(ptr))->bs_vhdl, \
+			"MMR access from %s(), bridge=0x%lx, soft=0x%lx\n", \
+			__FUNCTION__, ((pcibr_soft_t)(ptr))->bs_base, ptr)); \
+	} \
+    } \
+}
+#else	/* _STANDALONE */	/* PROM version of the macro */
+#define BRIDGE_TYPE_AND_PTR_GET(ptr, type, bridge)  { \
+    *(type) = PCIBR_BRIDGETYPE_UNKNOWN; \
+    (bridge) = (pci_bridge_t *)0; \
+    if ((ptr) != NULL) { \
+	if (IS_IOADDR(ptr)) { \
+	    /* it's a pci bridge ptr, not a pcibr_soft struct ptr */ \
+	    uint32_t id = ((pci_bridge_id_t *)ptr)->id; \
+	    if (IS_TIOCP_BRIDGE(id)) \
+		*(type) = PCIBR_BRIDGETYPE_TIOCP; \
+	    else if (IS_PIC_BRIDGE(id)) \
+		*(type) = PCIBR_BRIDGETYPE_PIC; \
+	    (bridge) = (pci_bridge_t *)(ptr); \
+	} \
+    } \
+}
+#endif	/* _STANDALONE */
+
+
+/*
+ * Identification Register Access -- Read Only			    0000_0000 
+ */
+uint64_t
+pcireg_id_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_id;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_id;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_id_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Status Register Access -- Read Only				    0000_0008
+ */
+uint64_t
+pcireg_stat_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_stat;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_stat;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_stat_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Address Bus Side Holding Register Access -- Read Only	    0000_0010
+ */
+uint64_t
+pcireg_bus_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_err;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_err;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_bus_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Control Register Access -- Read/Write			    0000_0020
+ */
+uint64_t
+pcireg_control_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_control;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_control;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_control_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_control_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+//  unsigned		s;
+
+    /* WAR for PV 439897 & 454474.  Add a readback of the control 
+     * register.  Lock to protect against MP accesses to this
+     * register along with other write-only registers (See PVs).
+     * This register isnt accessed in the "hot path" so the splhi
+     * shouldn't be a bottleneck
+     */
+//    s = splhi();
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_control = val;
+	((tiocp_t *)bridge)->cp_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_control = val;
+	((pic_t *)bridge)->p_wid_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_control_set(): unknown bridgetype");
+    }
+
+//    splx(s);
+}
+
+void
+pcireg_control_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+//  unsigned		s;
+
+    /* WAR for PV 439897 & 454474.  Add a readback of the control
+     * register.  Lock to protect against MP accesses to this
+     * register along with other write-only registers (See PVs).
+     * This register isnt accessed in the "hot path" so the splhi
+     * shouldn't be a bottleneck
+     */
+//    s = splhi();
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_control &= ~bits;
+	((tiocp_t *)bridge)->cp_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_control &= ~bits;
+	((pic_t *)bridge)->p_wid_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_control_bit_clr(): unknown bridgetype");
+    }
+
+//    splx(s);
+}
+
+void
+pcireg_control_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+//  unsigned		s;
+
+    /* WAR for PV 439897 & 454474.  Add a readback of the control
+     * register.  Lock to protect against MP accesses to this
+     * register along with other write-only registers (See PVs).
+     * This register isnt accessed in the "hot path" so the splhi
+     * shouldn't be a bottleneck
+     */
+//    s = splhi();
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_control |= bits;
+	((tiocp_t *)bridge)->cp_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_control |= bits;
+	((pic_t *)bridge)->p_wid_control;	/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_control_bit_set(): unknown bridgetype");
+    }
+
+//    splx(s);
+}
+
+
+/*
+ * Bus Speed (from control register); -- Read Only access	    0000_0020
+ * 0x00 == 33MHz, 0x01 == 66MHz, 0x10 == 100MHz, 0x11 == 133MHz
+ */
+uint64_t
+pcireg_speed_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		speedbits;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	speedbits = ((tiocp_t *)bridge)->cp_control & TIOCP_CTRL_PCI_SPEED;
+	ret = (speedbits >> 4);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	speedbits = ((pic_t *)bridge)->p_wid_control & PIC_CTRL_PCI_SPEED;
+	ret = (speedbits >> 4);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_speed_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Bus Mode (ie. PCIX or PCI) (from Status register);		    0000_0008
+ * 0x0 == PCI, 0x1 == PCI-X
+ */
+uint64_t
+pcireg_mode_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		pcix_active_bit;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	pcix_active_bit = ((tiocp_t *)bridge)->cp_stat & TIOCP_STAT_PCIX_ACTIVE;
+	ret = (pcix_active_bit >> TIOCP_STAT_PCIX_ACTIVE_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	pcix_active_bit = ((pic_t *)bridge)->p_wid_stat & PIC_STAT_PCIX_ACTIVE;
+	ret = (pcix_active_bit >> PIC_STAT_PCIX_ACTIVE_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_mode_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Request TimeOut Register Access -- Read/Write		    0000_0028
+ */
+uint64_t
+pcireg_req_timeout_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_req_timeout;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_req_timeout;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_req_timeout_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_req_timeout_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_req_timeout = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_req_timeout = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_req_timeout_set(): unknown bridgetype");
+    }
+}
+
+/*
+ * Interrupt Destination Addr Register Access -- Read/Write	    0000_0038
+ */
+uint64_t
+pcireg_intr_dst_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_intr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_int;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_dst_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_intr = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_int = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Intr Destination Addr Reg Access (target_id) -- Read/Write	    0000_0038
+ */
+uint64_t
+pcireg_intr_dst_target_id_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		tid_bits;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	/* Undefined in TIOCP, fallthru and return 0 */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	tid_bits = (((pic_t *)bridge)->p_wid_int & PIC_INTR_DEST_TID);
+	ret = (tid_bits >> PIC_INTR_DEST_TID_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_target_id_get():unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_dst_target_id_set(void *ptr, uint64_t target_id)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	/* Undefined in TIOCP, fallthru, do nothing */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_int &= ~PIC_INTR_DEST_TID;
+	((pic_t *)bridge)->p_wid_int |= 
+		((target_id << PIC_INTR_DEST_TID_SHFT) & PIC_INTR_DEST_TID);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_target_id_set():unknown bridgetype");
+    }
+}
+
+
+/*
+ * Intr Destination Addr Register Access (addr) -- Read/Write	    0000_0038
+ */
+uint64_t
+pcireg_intr_dst_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_intr & TIOCP_CTALK_ADDR_MASK; 
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_int & PIC_XTALK_ADDR_MASK; 
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_dst_addr_set(void *ptr, uint64_t addr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_intr &= ~TIOCP_CTALK_ADDR_MASK;
+	((tiocp_t *)bridge)->cp_intr |= (addr & TIOCP_CTALK_ADDR_MASK);
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_int &= ~PIC_XTALK_ADDR_MASK;
+	((pic_t *)bridge)->p_wid_int |= (addr & PIC_XTALK_ADDR_MASK);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_dst_addr_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Cmd Word Holding Bus Side Error Register Access -- Read Only	    0000_0040
+ */
+uint64_t
+pcireg_cmdword_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_err_cmdword;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_err_cmdword;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_cmdword_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * LLP Configuration Register Access -- Read/Write		    0000_0048
+ */
+uint64_t
+pcireg_llp_cfg_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	panic("pcireg_llp_cfg_get(): undefined register");	
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_llp;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_llp_cfg_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_llp_cfg_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+//  unsigned		s;
+
+    /* WAR for PV 439897 & 454474.  Add a readback of the llp cfg
+     * register.  Lock to protect against MP accesses to this
+     * register along with other write-only registers (See PVs).
+     * This register isnt accessed in the "hot path" so the splhi
+     * shouldn't be a bottleneck
+     */
+//    s = splhi();
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	panic("pcireg_llp_cfg_set(): undefined register");	
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_wid_llp = val;
+	((pic_t *)bridge)->p_wid_llp;		/* WAR */
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_llp_cfg_set(): unknown bridgetype");
+    }
+
+//    splx(s);
+}
+
+
+/*
+ * PCI/PCIX Target Flush Register Access -- Read Only		    0000_0050
+ */
+uint64_t
+pcireg_tflush_get(void *ptr)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ret = ((tiocp_t *)bridge)->cp_tflush;
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ret = ((pic_t *)bridge)->p_wid_tflush;
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcibr_tflush_get(): unknown bridgetype");
+    }
+
+    /* Read of the Targer Flush shouls always return zero */
+    ASSERT_ALWAYS(ret == 0);
+    return(ret);
+}
+
+
+/*
+ * Cmd Word Holding Link Side Error Register Access -- Read Only    0000_0058
+ */
+uint64_t
+pcireg_linkside_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_aux_err;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_aux_err;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_linkside_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI Response Buffer Address Holding Register -- Read Only	    0000_0068
+ */
+uint64_t
+pcireg_resp_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_resp;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_resp;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_resp_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI Resp Buffer Address Holding Reg (Address) -- Read Only	    0000_0068
+ */
+uint64_t
+pcireg_resp_err_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_resp & TIOCP_RSP_BUF_ADDR;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_resp & PIC_RSP_BUF_ADDR;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_resp_err_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+/*
+ * PCI Resp Buffer Address Holding Register (Buffer)-- Read Only    0000_0068
+ */
+uint64_t
+pcireg_resp_err_buf_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		bufnum_bits;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	bufnum_bits = (((tiocp_t *)bridge)->cp_resp & TIOCP_RSP_BUF_NUM);
+	ret = (bufnum_bits >> TIOCP_RSP_BUF_NUM_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	bufnum_bits =(((pic_t *)bridge)->p_wid_resp_upper & PIC_RSP_BUF_NUM);
+	ret = (bufnum_bits >> PIC_RSP_BUF_NUM_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_resp_err_buf_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI Resp Buffer Address Holding Register (Device)-- Read Only    0000_0068
+ */
+uint64_t
+pcireg_resp_err_dev_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		devnum_bits;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	devnum_bits = (((tiocp_t *)bridge)->cp_resp & TIOCP_RSP_BUF_DEV_NUM);
+	ret = (devnum_bits >> TIOCP_RSP_BUF_DEV_NUM_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	devnum_bits = (((pic_t *)bridge)->p_wid_resp_upper & PIC_RSP_BUF_DEV_NUM);
+	ret = (devnum_bits >> PIC_RSP_BUF_DEV_NUM_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_resp_err_dev_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Address Holding Register Link Side Errors -- Read Only	    0000_0078
+ */
+uint64_t
+pcireg_linkside_err_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_addr_lkerr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_wid_addr_lkerr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_linkside_err_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Direct Mapping Register Access -- Read/Write			    0000_0080
+ */
+uint64_t
+pcireg_dirmap_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_dir_map;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_dir_map;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_dirmap_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_dirmap_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_dir_map = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_dir_map = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_dirmap_set(): unknown bridgetype");
+    }
+}
+
+void 
+pcireg_dirmap_wid_set(void *ptr, uint64_t target)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	/* Undefined in TIOCP, fallthru, do nothing */
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_dir_map &= ~PIC_DIRMAP_WID;
+	((pic_t *)bridge)->p_dir_map |= 
+			((target << PIC_DIRMAP_WID_SHFT) & PIC_DIRMAP_WID);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_dirmap_wid_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_dirmap_diroff_set(void *ptr, uint64_t dir_off)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_dir_map &= ~TIOCP_DIRMAP_DIROFF_UP;
+	((tiocp_t *)bridge)->cp_dir_map |= ((dir_off << 3) & TIOCP_DIRMAP_DIROFF_UP);
+	((tiocp_t *)bridge)->cp_dir_map &= ~TIOCP_DIRMAP_DIROFF;
+	((tiocp_t *)bridge)->cp_dir_map |= (dir_off & TIOCP_DIRMAP_DIROFF);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_dir_map &= ~PIC_DIRMAP_DIROFF;
+	((pic_t *)bridge)->p_dir_map |= (dir_off & PIC_DIRMAP_DIROFF);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_dirmap_diroff_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_dirmap_add512_set(void *ptr)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ((tiocp_t *)bridge)->cp_dir_map |= TIOCP_DIRMAP_ADD512;
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ((pic_t *)bridge)->p_dir_map |= PIC_DIRMAP_ADD512;
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcireg_dirmap_add512_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_dirmap_add512_clr(void *ptr)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ((tiocp_t *)bridge)->cp_dir_map &= ~TIOCP_DIRMAP_ADD512;
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ((pic_t *)bridge)->p_dir_map &= ~PIC_DIRMAP_ADD512;
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcireg_dirmap_add512_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * SSRAM Parity Error Register Access -- Read Only		    0000_0094
+ */
+uint64_t
+pcireg_ssram_parity_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	panic("pcireg_ssram_parity_get(): undefined register");
+    case PCIBR_BRIDGETYPE_PIC:
+	panic("pcireg_ssram_parity_get(): undefined register");
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_ssram_parity_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI Page Map Fault Address Register Access -- Read Only	    0000_0090
+ */
+uint64_t
+pcireg_map_fault_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_map_fault;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_map_fault;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_map_fault_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Arbitration Register Access -- Read/Write			    0000_00A0
+ */
+uint64_t
+pcireg_arbitration_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_arb;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_arb;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_arbitration_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_arbitration_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_arb = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_arb = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_arbitration_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_arbitration_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_arb |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_arb |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_arbitration_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_arbitration_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_arb &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_arb &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_arbitration_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Internal Ram Parity Error Register Access -- Read Only	    0000_00B0
+ */
+uint64_t
+pcireg_parity_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_ate_parity_err;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_ate_parity_err;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_parity_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI TimeOut Register Access -- Read/Write			    0000_00C0
+ */
+uint64_t
+pcireg_timeout_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_bus_timeout;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_bus_timeout;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_timeout_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_timeout_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_bus_timeout = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_bus_timeout = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_timeout_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_timeout_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_bus_timeout |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_bus_timeout |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_timeout_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_timeout_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_bus_timeout &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_bus_timeout &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_timeout_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Type 1 Configuration Register Access -- Read/Write		    0000_00C8
+ */
+uint64_t
+pcireg_type1_cntr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pci_cfg;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pci_cfg;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_type1_cntr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_type1_cntr_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_pci_cfg = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_pci_cfg = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_type1_cfg_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * PCI Bus Error Lower Addr Holding Reg Access -- Read Only	    0000_00D8
+ */
+uint64_t
+pcireg_pci_bus_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pci_err;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pci_err;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_pci_bus_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCI Bus Error Addr Holding Reg Access (Address) -- Read Only	    0000_00D8
+ */
+uint64_t
+pcireg_pci_bus_addr_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pci_err & TIOCP_CTALK_ADDR_MASK; 
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pci_err & PIC_XTALK_ADDR_MASK; 
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_pci_bus_addr_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Interrupt Status Register Access -- Read Only		    0000_0100
+ */
+uint64_t
+pcireg_intr_status_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_status;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_status;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcibr_intr_status_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Interrupt Enable Register Access -- Read/Write		    0000_0108
+ */
+uint64_t
+pcireg_intr_enable_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_enable;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_enable;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_enable_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_enable_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_enable = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_enable = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_enable_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_enable_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_enable &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_enable &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_enable_bit_clr(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_enable_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_enable |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_enable |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_enable_bit_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Interrupt Reset Register Access -- Write Only		    0000_0110
+ */
+void
+pcireg_intr_reset_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_rst_stat = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_rst_stat = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_reset_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_reset_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_rst_stat |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_rst_stat |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_reset_bit_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Interrupt Mode Register -- Read/Write			    0000_0118
+ */
+uint64_t
+pcireg_intr_mode_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_mode;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_mode;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_mode_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_mode_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_mode = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_mode = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_mode_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_mode_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_mode |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_mode |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_mode_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_mode_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_mode &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_mode &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_mode_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Interrupt Device Select Register Access -- Read/Write	    0000_0120
+ */
+uint64_t
+pcireg_intr_device_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_device;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_device;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_device_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_device_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_device = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_device = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_device_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_device_bit_set(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_device |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_device |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_device_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_intr_device_bit_clr(void *ptr, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_device &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_device &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_device_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Host Error Interrupt Field Register Access -- Read/Write	    0000_0128
+ */
+uint64_t
+pcireg_intr_host_err_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_host_err;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_host_err;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_host_err_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_host_err_set(void *ptr, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_host_err = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_host_err = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_host_err_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Interrupt Host Address Register -- Read/Write	0000_0130 - 0000_0168
+ */
+uint64_t
+pcireg_intr_addr_get(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_int_addr[int_n];
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_int_addr[int_n];
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_addr_set(void *ptr, int int_n, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_addr[int_n] = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_addr[int_n] = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_set(): unknown bridgetype");
+    }
+}
+
+void *
+pcireg_intr_addr_addr(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    void	       *ret = (void *)0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = (void *)&(((tiocp_t *)bridge)->cp_int_addr[int_n]);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = (void *)&(((pic_t *)bridge)->p_int_addr[int_n]);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_addr(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+/*
+ * Intr Host Address Register (int_fld) -- Read/Write	0000_0130 - 0000_0168
+ */
+uint64_t
+pcireg_intr_addr_vect_get(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		int_fld_bits;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	int_fld_bits = (((tiocp_t *)bridge)->cp_int_addr[int_n] & 
+			TIOCP_HOST_INTR_FLD);
+	ret = (int_fld_bits >> TIOCP_HOST_INTR_FLD_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	int_fld_bits = (((pic_t *)bridge)->p_int_addr[int_n] &
+			PIC_HOST_INTR_FLD);
+	ret = (int_fld_bits >> PIC_HOST_INTR_FLD_SHFT);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_vect_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_addr_vect_set(void *ptr, int int_n, uint64_t vect)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ((tiocp_t *)bridge)->cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_FLD;
+	((tiocp_t *)bridge)->cp_int_addr[int_n] |= 
+		((vect << TIOCP_HOST_INTR_FLD_SHFT) & TIOCP_HOST_INTR_FLD);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ((pic_t *)bridge)->p_int_addr[int_n] &= ~PIC_HOST_INTR_FLD;
+	((pic_t *)bridge)->p_int_addr[int_n] |= 
+		((vect << PIC_HOST_INTR_FLD_SHFT) & PIC_HOST_INTR_FLD);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_vect_get(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Intr Host Address Register (int_addr) -- Read/Write	0000_0130 - 0000_0168
+ */
+uint64_t
+pcireg_intr_addr_addr_get(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = (((tiocp_t *)bridge)->cp_int_addr[int_n] & TIOCP_HOST_INTR_ADDR);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = (((pic_t *)bridge)->p_int_addr[int_n] & PIC_HOST_INTR_ADDR);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_intr_addr_addr_set(void *ptr, int int_n, uint64_t addr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
+	((tiocp_t *)bridge)->cp_int_addr[int_n] |= (addr & TIOCP_HOST_INTR_ADDR);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
+	((pic_t *)bridge)->p_int_addr[int_n] |= (addr & PIC_HOST_INTR_ADDR);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_addr_addr_get(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Interrupt View Register Access -- Read Only			    0000_0170
+ */
+uint64_t
+pcireg_intr_view_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_err_int_view;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_err_int_view;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_view_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Multiple Interrupt Register Access -- Read Only		    0000_0178
+ */
+uint64_t
+pcireg_intr_multiple_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_mult_int;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_mult_int;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_intr_multiple_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Force Always Intr Register Access -- Write Only	0000_0180 - 0000_01B8
+ */
+void
+pcireg_force_always_set(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_force_always[int_n] = 1;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_force_always[int_n] = 1;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_force_always_set(): unknown bridgetype");
+    }
+}
+
+void *
+pcireg_force_always_addr_get(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    void	       *ret = (void *)0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = (void *)&(((tiocp_t *)bridge)->cp_force_always[int_n]);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = (void *)&(((pic_t *)bridge)->p_force_always[int_n]);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_force_always_addr_get(): unknown bridgetype");
+    }
+
+    return(ret);
+}
+
+
+/*
+ * Force Interrupt Register Access -- Write Only	0000_01C0 - 0000_01F8
+ */
+void
+pcireg_force_intr_set(void *ptr, int int_n)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((int_n >= 0) && (int_n <= 7));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_force_pin[int_n] = 1;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_force_pin[int_n] = 1;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_force_intr_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Device(x) Register Access -- Read/Write		0000_0200 - 0000_0218
+ */
+uint64_t
+pcireg_device_get(void *ptr, int device)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	ret = ((tiocp_t *)bridge)->cp_device[device];
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	ret = ((pic_t *)bridge)->p_device[device];
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_device_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_device_set(void *ptr, int device, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((tiocp_t *)bridge)->cp_device[device] = val; 
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((pic_t *)bridge)->p_device[device] = val; 
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_device_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_device_bit_set(void *ptr, int device, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((tiocp_t *)bridge)->cp_device[device] |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((pic_t *)bridge)->p_device[device] |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_device_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_device_bit_clr(void *ptr, int device, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((tiocp_t *)bridge)->cp_device[device] &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((device >= 0) && (device <= 3));
+	((pic_t *)bridge)->p_device[device] &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_device_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * Device(x) Write Buffer Flush Reg Access -- Read Only	0000_0240 - 0000_0258
+ */
+uint64_t
+pcireg_wrb_flush_get(void *ptr, int device)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ASSERT_ALWAYS((device >= 0) && (device <= 3));
+        ret = ((tiocp_t *)bridge)->cp_wr_req_buf[device];
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ASSERT_ALWAYS((device >= 0) && (device <= 3));
+        ret = ((pic_t *)bridge)->p_wr_req_buf[device];
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcireg_wrb_flush_get(): unknown bridgetype");
+    }
+    
+    /* Read of the Write Buffer Flush should always return zero */
+    ASSERT_ALWAYS(ret == 0);
+    return(ret);
+}
+
+
+/*
+ * Even/Odd RRB Register Access -- Read/Write		0000_0280 - 0000_0288
+ */
+uint64_t
+pcireg_rrb_get(void *ptr, int even_odd)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((even_odd >= 0) && (even_odd <= 1));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_rrb_map[even_odd];
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_rrb_map[even_odd];
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_rrb_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_rrb_set(void *ptr, int even_odd, uint64_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    ASSERT_ALWAYS((even_odd >= 0) && (even_odd <= 1));
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_rrb_map[even_odd] = val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_rrb_map[even_odd] = val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_rrb_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_rrb_bit_set(void *ptr, int even_odd, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_rrb_map[even_odd] |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_rrb_map[even_odd] |= bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_rrb_bit_set(): unknown bridgetype");
+    }
+}
+
+void
+pcireg_rrb_bit_clr(void *ptr, int even_odd, uint64_t bits)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	((tiocp_t *)bridge)->cp_rrb_map[even_odd] &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	((pic_t *)bridge)->p_rrb_map[even_odd] &= ~bits;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_rrb_bit_clr(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * RRB Status Register Access -- Read Only			    0000_0290
+ */
+uint64_t
+pcireg_rrb_status_get(void *ptr)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ret = ((tiocp_t *)bridge)->cp_resp_status;
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ret = ((pic_t *)bridge)->p_resp_status;
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcireg_rrb_status_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * RRB Clear Register Access -- Write Only			    0000_0298
+ */
+void
+pcireg_rrb_clear_set(void *ptr, uint64_t val)
+{
+    short               bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ((tiocp_t *)bridge)->cp_resp_clear = val; 
+        break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ((pic_t *)bridge)->p_resp_clear = val; 
+        break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+        panic("pcireg_rrb_clear_set(): unknown bridgetype");
+    }
+}
+
+
+/*
+ * PCIX Bus Error Address Register Access -- Read Only		    0000_0600
+ */
+uint64_t
+pcireg_pcix_bus_err_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_bus_err_addr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_bus_err_addr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_bus_err_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX Bus Error Attribute Register Access -- Read Only	    0000_0608
+ */
+uint64_t
+pcireg_pcix_bus_err_attr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_bus_err_attr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_bus_err_attr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_bus_err_attr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX Bus Error Data Register Access -- Read Only		    0000_0610
+ */
+uint64_t
+pcireg_pcix_bus_err_data_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_bus_err_data;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_bus_err_data;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_bus_err_data_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX PIO Split Request Address Register Access -- Read Only	    0000_0618
+ */
+uint64_t
+pcireg_pcix_pio_split_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_pio_split_addr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_pio_split_addr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_pio_split_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX PIO Split Request Attribute Register Access -- Read Only    0000_0620
+ */
+uint64_t
+pcireg_pcix_pio_split_attr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_pio_split_attr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_pio_split_attr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_pio_split_attr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX DMA Request Error Attribute Register Access -- Read Only    0000_0628
+ */
+uint64_t
+pcireg_pcix_req_err_attr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_dma_req_err_attr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_dma_req_err_attr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_req_err_attr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * PCIX DMA Request Error Address Register Access -- Read Only	    0000_0630
+ */
+uint64_t
+pcireg_pcix_req_err_addr_get(void *ptr)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = ((tiocp_t *)bridge)->cp_pcix_dma_req_err_addr;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = ((pic_t *)bridge)->p_pcix_dma_req_err_addr;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_req_err_addr_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+
+/*
+ * Type 0 Configuration Space Access -- Read/Write
+ */
+cfg_p
+pcireg_type0_cfg_addr(void *ptr, uint8_t slot, uint8_t func, int off)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    cfg_p		ret = (cfg_p)0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    /*
+     * Return a config space address for the given slot/func/offset.  
+     * Note the returned ptr is a 32bit word (ie. cfg_p) aligned ptr
+     * pointing to the 32bit word that contains the "off" offset byte.
+     */
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS(((int8_t)slot >= 0) && ((int8_t)slot <= 3));
+	ret = &(((tiocp_t *)bridge)->cp_type0_cfg_dev[slot].f[func].l[(off/4)]);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	/* Type 0 Config space accesses on PIC are 1-4, not 0-3 since
+         * it is a PCIX Bridge.  See sys/PCI/pic.h for explanation.
+	 */
+	slot++;
+	ASSERT_ALWAYS(((int)slot >= 1) && ((int)slot <= 4));
+	ret = &(((pic_t *)bridge)->p_type0_cfg_dev[slot].f[func].l[(off/4)]);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_type0_cfg_addr(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Type 1 Configuration Space Access -- Read/Write
+ */
+cfg_p
+pcireg_type1_cfg_addr(void *ptr, uint8_t func, int offset)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    cfg_p		ret = (cfg_p)0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    /*
+     * Return a config space address for the given slot/func/offset.
+     * Note the returned ptr is a 32bit word (ie. cfg_p) aligned ptr
+     * pointing to the 32bit word that contains the "offset" byte.
+     */
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ret = &(((tiocp_t *)bridge)->cp_type1_cfg.f[func].l[(offset / 4)]);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ret = &(((pic_t *)bridge)->p_type1_cfg.f[func].l[(offset / 4)]);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_type1_cfg_addr(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+
+/*
+ * Internal ATE SSRAM Access -- Read/Write 
+ */
+bridge_ate_t
+pcireg_int_ate_get(void *ptr, int ate_index)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    uint64_t		ret = 0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	ret = ((tiocp_t *)bridge)->cp_int_ate_ram[ate_index];
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	ret = ((pic_t *)bridge)->p_int_ate_ram[ate_index];
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_int_ate_get(): unknown bridgetype");
+    }
+    return(ret);
+}
+
+void
+pcireg_int_ate_set(void *ptr, int ate_index, bridge_ate_t val)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+	ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	((tiocp_t *)bridge)->cp_int_ate_ram[ate_index] = (tiocp_ate_t)val;
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+	ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	((pic_t *)bridge)->p_int_ate_ram[ate_index] = (picate_t)val;
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_int_ate_set(): unknown bridgetype");
+    }
+}
+
+bridge_ate_p
+pcireg_int_ate_addr(void *ptr, int ate_index)
+{
+    short		bridge_type;
+    pci_bridge_t       *bridge;
+    bridge_ate_p	ret = (bridge_ate_p)0;
+
+    BRIDGE_TYPE_AND_PTR_GET(ptr, &bridge_type, bridge);
+
+    switch (bridge_type) {
+    case PCIBR_BRIDGETYPE_TIOCP:
+        ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	ret = &(((tiocp_t *)bridge)->cp_int_ate_ram[ate_index]);
+	break;
+    case PCIBR_BRIDGETYPE_PIC:
+        ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
+	ret = &(((pic_t *)bridge)->p_int_ate_ram[ate_index]);
+	break;
+    case PCIBR_BRIDGETYPE_UNKNOWN:
+	panic("pcireg_int_ate_addr(): unknown bridgetype");
+    }
+    return(ret);
+}
+
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,91 +7,83 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
 #include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-void              do_pcibr_rrb_clear(bridge_t *, int);
-void              do_pcibr_rrb_flush(bridge_t *, int);
-int               do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t, int);
-int               do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);
-int               do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int, int);
-int               do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int, int);
-void		  do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
-
-void              do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
-
-int		  pcibr_wrb_flush(vertex_hdl_t);
-int               pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
-int               pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
-void              pcibr_rrb_flush(vertex_hdl_t);
-int		  pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
 
-void		  pcibr_rrb_debug(char *, pcibr_soft_t);
+void		do_pcibr_rrb_clear(pci_bridge_t *, int);
+void		do_pcibr_rrb_flush(pci_bridge_t *, int);
+int		do_pcibr_rrb_count_valid(pci_bridge_t *, pciio_slot_t, int);
+int		do_pcibr_rrb_count_avail(pci_bridge_t *, pciio_slot_t);
+int		do_pcibr_rrb_alloc(pci_bridge_t *, pciio_slot_t, int, int);
+int		do_pcibr_rrb_free(pci_bridge_t *, pciio_slot_t, int, int);
+void		do_pcibr_rrb_free_all(pcibr_soft_t, pci_bridge_t *, pciio_slot_t);
+
+void		pcibr_rrb_alloc_init(pcibr_soft_t, int, int, int);
+void		pcibr_rrb_alloc_more(pcibr_soft_t, int, int, int);
+
+int		pcibr_wrb_flush(vertex_hdl_t);
+int		pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
+int		pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
+int		pcibr_alloc_all_rrbs(vertex_hdl_t, int, int, int, int, 
+			     	     int, int, int, int, int);
+void		pcibr_rrb_flush(vertex_hdl_t);
+int		pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
+
+void            pcibr_rrb_debug(char *, pcibr_soft_t);
+
 
 /*
  * RRB Management
  *
- * All the do_pcibr_rrb_ routines manipulate the Read Response Buffer (rrb)
- * registers within the Bridge.	 Two 32 registers (b_rrb_map[2] also known
- * as the b_even_resp & b_odd_resp registers) are used to allocate the 16
- * rrbs to devices.  The b_even_resp register represents even num devices,
- * and b_odd_resp represent odd number devices.	 Each rrb is represented by
- * 4-bits within a register.
+ * All the do_pcibr_rrb_ routines manipulate the Read Response Buffer (RRB)
+ * registers within a Bridge.	 Two 32 registers (one for even number 
+ * devices, and one for odd number devices) are used to allocate the 16 RRBs
+ * to devices.  Each RRB is represented by 4-bits within a register:
  *   BRIDGE & XBRIDGE:	1 enable bit, 1 virtual channel bit, 2 device bits
- *   PIC:		1 enable bit, 2 virtual channel bits, 1 device bit
- * PIC has 4 devices per bus, and 4 virtual channels (1 normal & 3 virtual)
- * per device.	BRIDGE & XBRIDGE have 8 devices per bus and 2 virtual
- * channels (1 normal & 1 virtual) per device.	See the BRIDGE and PIC ASIC
- * Programmers Reference guides for more information.
+ *   PIC & TIOCP:	1 enable bit, 2 virtual channel bits, 1 device bit
+ * PIC and TIOCP has 4 devices per bus, and 4 virtual channels (1 normal and 
+ * 3 virtual) per device.  BRIDGE & XBRIDGE have 8 devices per bus and 2
+ * virtual channels (1 normal & 1 virtual) per device.  See the BRIDGE/PIC/
+ * TIOCP ASIC Programmers Reference guides for more information.
  */ 
- 
 #define RRB_MASK (0xf)			/* mask a single rrb within reg */
 #define RRB_SIZE (4)			/* sizeof rrb within reg (bits) */
  
-#define RRB_ENABLE_BIT(bridge)		(0x8)  /* [BRIDGE | PIC]_RRB_EN */
-#define NUM_PDEV_BITS(bridge)		(1)
-#define NUM_VDEV_BITS(bridge)		(2)
-#define NUMBER_VCHANNELS(bridge)	(4)
-#define SLOT_2_PDEV(bridge, slot)	((slot) >> 1)
-#define SLOT_2_RRB_REG(bridge, slot)	((slot) & 0x1)
+#define RRB_ENABLE_BIT(bridge)	      (0x8)  /* [BRIDGE | PIC]_RRB_EN */
+#define NUM_PDEV_BITS(bridge)	      (1)
+#define NUM_VDEV_BITS(bridge)	      (2)
+#define NUMBER_VCHANNELS(bridge)      (4)
+#define SLOT_2_PDEV(bridge, slot)     ((slot) >> 1)
+#define SLOT_2_RRB_REG(bridge, slot)  ((slot) & 0x1)
+
+#define RRB_VALID(rrb)		      (0x00010000 << (rrb))
+#define RRB_INUSE(rrb)		      (0x00000001 << (rrb))
+#define RRB_CLEAR(rrb)		      (0x00000001 << (rrb))
  
 /* validate that the slot and virtual channel are valid for a given bridge */
 #define VALIDATE_SLOT_n_VCHAN(bridge, s, v) \
-    (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0)
+    (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && \
+      (((v) >= 0) && ((v) <= 3))) ? 1 : 0)
  
 /*  
  * Count how many RRBs are marked valid for the specified PCI slot
  * and virtual channel.	 Return the count.
  */ 
 int
-do_pcibr_rrb_count_valid(bridge_t *bridge,
+do_pcibr_rrb_count_valid(pci_bridge_t *bridge,
 			 pciio_slot_t slot,
 			 int vchan)
 {
-    bridgereg_t tmp;
+    uint64_t tmp;
     uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
     int rrb_index, cnt=0;
 
     if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
-	printk(KERN_WARNING "do_pcibr_rrb_count_valid() invalid slot/vchan [%d/%d]\n", slot, vchan);
+	KERN_MSG(K_WARN, "do_pcibr_rrb_count_valid() invalid slot/vchan");
 	return 0;
     }
     
@@ -101,8 +92,8 @@
     pdev_bits = SLOT_2_PDEV(bridge, slot);
     rrb_bits = enable_bit | vchan_bits | pdev_bits;
     
-    tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
-
+    tmp = pcireg_rrb_get(bridge, SLOT_2_RRB_REG(bridge, slot));
+    
     for (rrb_index = 0; rrb_index < 8; rrb_index++) {
 	if ((tmp & RRB_MASK) == rrb_bits)
 	    cnt++;
@@ -117,22 +108,22 @@
  * slot.  Return the count.
  */ 
 int
-do_pcibr_rrb_count_avail(bridge_t *bridge,
+do_pcibr_rrb_count_avail(pci_bridge_t *bridge,
 			 pciio_slot_t slot)
 {
-    bridgereg_t tmp;
+    uint64_t tmp;
     uint16_t enable_bit;
     int rrb_index, cnt=0;
     
     if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, 0)) {
-	printk(KERN_WARNING "do_pcibr_rrb_count_avail() invalid slot/vchan");
+	KERN_MSG(K_WARN, "do_pcibr_rrb_count_avail() invalid slot/vchan");
 	return 0;
     }
     
     enable_bit = RRB_ENABLE_BIT(bridge);
-
-    tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
-
+    
+    tmp = pcireg_rrb_get(bridge, SLOT_2_RRB_REG(bridge, slot));
+    
     for (rrb_index = 0; rrb_index < 8; rrb_index++) {
 	if ((tmp & enable_bit) != enable_bit)
 	    cnt++;
@@ -151,17 +142,17 @@
  * we return failure.
  */ 
 int
-do_pcibr_rrb_alloc(bridge_t *bridge,
+do_pcibr_rrb_alloc(pci_bridge_t *bridge,
 		   pciio_slot_t slot,
 		   int vchan,
 		   int more)
 {
-    bridgereg_t reg, tmp = (bridgereg_t)0;
+    uint64_t reg, tmp = 0;
     uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
     int rrb_index;
     
     if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
-	printk(KERN_WARNING "do_pcibr_rrb_alloc() invalid slot/vchan");
+	KERN_MSG(K_WARN, "do_pcibr_rrb_alloc() invalid slot/vchan");
 	return -1;
     }
     
@@ -169,9 +160,9 @@
     vchan_bits = vchan << NUM_PDEV_BITS(bridge);
     pdev_bits = SLOT_2_PDEV(bridge, slot);
     rrb_bits = enable_bit | vchan_bits | pdev_bits;
-
-    reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
-
+    
+    reg = tmp = pcireg_rrb_get(bridge, SLOT_2_RRB_REG(bridge, slot));
+    
     for (rrb_index = 0; ((rrb_index < 8) && (more > 0)); rrb_index++) {
 	if ((tmp & enable_bit) != enable_bit) {
 	    /* clear the rrb and OR in the new rrb into 'reg' */
@@ -181,8 +172,8 @@
 	}
 	tmp = (tmp >> RRB_SIZE);
     }
-
-    bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
+    
+    pcireg_rrb_set(bridge, SLOT_2_RRB_REG(bridge, slot), reg);
     return (more ? -1 : 0);
 }
  
@@ -196,17 +187,17 @@
  * if we return failure.
  */ 
 int
-do_pcibr_rrb_free(bridge_t *bridge,
+do_pcibr_rrb_free(pci_bridge_t *bridge,
 		  pciio_slot_t slot,
 		  int vchan,
 		  int less)
 {
-    bridgereg_t reg, tmp = (bridgereg_t)0, clr = 0;
+    uint64_t reg, tmp = 0, clr = 0;
     uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
     int rrb_index;
     
     if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
-	printk(KERN_WARNING "do_pcibr_rrb_free() invalid slot/vchan");
+	KERN_MSG(K_WARN, "do_pcibr_rrb_free() invalid slot/vchan");
 	return -1;
     }
     
@@ -214,9 +205,9 @@
     vchan_bits = vchan << NUM_PDEV_BITS(bridge);
     pdev_bits = SLOT_2_PDEV(bridge, slot);
     rrb_bits = enable_bit | vchan_bits | pdev_bits;
-
-    reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
-
+    
+    reg = tmp = pcireg_rrb_get(bridge, SLOT_2_RRB_REG(bridge, slot));
+    
     for (rrb_index = 0; ((rrb_index < 8) && (less > 0)); rrb_index++) {
 	if ((tmp & RRB_MASK) == rrb_bits) {
 	   /*
@@ -231,9 +222,9 @@
 	}
 	tmp = (tmp >> RRB_SIZE);
     }
-
-    bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
-
+    
+    pcireg_rrb_set(bridge, SLOT_2_RRB_REG(bridge, slot), reg);
+    
     /* call do_pcibr_rrb_clear() for all the rrbs we've freed */
     for (rrb_index = 0; rrb_index < 8; rrb_index++) {
 	int evn_odd = SLOT_2_RRB_REG(bridge, slot);
@@ -251,7 +242,7 @@
  */ 
 void
 do_pcibr_rrb_free_all(pcibr_soft_t pcibr_soft,
-		      bridge_t *bridge,
+		      pci_bridge_t *bridge,
 		      pciio_slot_t slot)
 {
     int vchan;
@@ -271,26 +262,26 @@
  * valid.
  */
 void
-do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
+do_pcibr_rrb_clear(pci_bridge_t *bridge, int rrb)
 {
-    bridgereg_t             status;
+    uint64_t             status;
 
-    /* bridge_lock must be held;
-     * this RRB must be disabled.
-     */
+    /* bridge_lock must be held;  this RRB must be disabled. */
 
     /* wait until RRB has no outstanduing XIO packets. */
-    while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
-	;				/* XXX- beats on bridge. bad idea? */
+    status = pcireg_rrb_status_get(bridge);
+    while (status & RRB_INUSE(rrb)) {
+	status = pcireg_rrb_status_get(bridge);
     }
 
     /* if the RRB has data, drain it. */
-    if (status & BRIDGE_RRB_VALID(rrb)) {
-	bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
+    if (status & RRB_VALID(rrb)) {
+	pcireg_rrb_clear_set(bridge, RRB_CLEAR(rrb));
 
 	/* wait until RRB is no longer valid. */
-	while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
-		;				/* XXX- beats on bridge. bad idea? */
+	status = pcireg_rrb_status_get(bridge);
+	while (status & RRB_VALID(rrb)) {
+	    status = pcireg_rrb_status_get(bridge);
 	}
     }
 }
@@ -302,37 +293,84 @@
  * before calling do_pcibr_rrb_clear().
  */
 void
-do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
+do_pcibr_rrb_flush(pci_bridge_t *bridge, int rrbn)
 {
-    reg_p	 rrbp = &bridge->b_rrb_map[rrbn & 1].reg;
-    bridgereg_t	 rrbv;
-    int		 shft = (RRB_SIZE * (rrbn >> 1));
-    unsigned long	 ebit = RRB_ENABLE_BIT(bridge) << shft;
-
-    rrbv = *rrbp;
+    uint64_t	rrbv;
+    int		shft = (RRB_SIZE * (rrbn >> 1));
+    uint64_t	ebit = RRB_ENABLE_BIT(bridge) << shft;
 
+    rrbv = pcireg_rrb_get(bridge, (rrbn & 1));
     if (rrbv & ebit) {
-	*rrbp = rrbv & ~ebit;
+	pcireg_rrb_set(bridge, (rrbn & 1), (rrbv & ~ebit));
     }
 
     do_pcibr_rrb_clear(bridge, rrbn);
 
     if (rrbv & ebit) {
-	*rrbp = rrbv;
+	pcireg_rrb_set(bridge, (rrbn & 1), rrbv);
     }
 }
 
+/*
+ * Initialize a slot with a given number of RRBs.  (this routine
+ * will also give back RRBs if the slot has more than we want).
+ */
+void
+pcibr_rrb_alloc_init(pcibr_soft_t pcibr_soft,
+		     int slot,
+		     int vchan,
+		     int init_rrbs)
+{
+    pci_bridge_t	*bridge = pcibr_soft->bs_base;
+    int			 had = pcibr_soft->bs_rrb_valid[slot][vchan];
+    int			 have = had;
+    int			 added = 0;
+
+    for (added = 0; have < init_rrbs; ++added, ++have) {
+	if (pcibr_soft->bs_rrb_res[slot] > 0)
+	    pcibr_soft->bs_rrb_res[slot]--;
+	else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
+	    pcibr_soft->bs_rrb_avail[slot & 1]--;
+	else
+	    break;
+	if (do_pcibr_rrb_alloc(bridge, slot, vchan, 1) < 0)
+	    break;
 
+	pcibr_soft->bs_rrb_valid[slot][vchan]++;
+    }
+
+    /* Free any extra RRBs that the slot may have allocated to it */
+    while (have > init_rrbs) {
+	pcibr_soft->bs_rrb_avail[slot & 1]++;
+	pcibr_soft->bs_rrb_valid[slot][vchan]--;
+	do_pcibr_rrb_free(bridge, slot, vchan, 1);
+	added--;
+	have--;
+    }
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
+		"pcibr_rrb_alloc_init: had %d, added/removed %d, "
+		"(of requested %d) RRBs "
+		"to slot %d, vchan %d\n", had, added, init_rrbs,
+		PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), vchan));
+
+    pcibr_rrb_debug("pcibr_rrb_alloc_init", pcibr_soft);
+}
+
+
+/*
+ * Allocate more RRBs to a given slot (if the RRBs are available).
+ */
 void
-do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
-		       int slot,
-		       int vchan, 
-		       int more_rrbs)
+pcibr_rrb_alloc_more(pcibr_soft_t pcibr_soft,
+		     int slot,
+		     int vchan, 
+		     int more_rrbs)
 {
-    bridge_t               *bridge = pcibr_soft->bs_base;
-    int                     got;
+    pci_bridge_t	*bridge = pcibr_soft->bs_base;
+    int			 added;
 
-    for (got = 0; got < more_rrbs; ++got) {
+    for (added = 0; added < more_rrbs; ++added) {
 	if (pcibr_soft->bs_rrb_res[slot] > 0)
 	    pcibr_soft->bs_rrb_res[slot]--;
 	else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
@@ -346,26 +384,27 @@
     }
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
-		"do_pcibr_rrb_autoalloc: added %d (of %d requested) RRBs "
-		"to slot %d, vchan %d\n", got, more_rrbs, 
+		"pcibr_rrb_alloc_more: added %d (of %d requested) RRBs "
+		"to slot %d, vchan %d\n", added, more_rrbs, 
 		PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), vchan));
 
-    pcibr_rrb_debug("do_pcibr_rrb_autoalloc", pcibr_soft);
+    pcibr_rrb_debug("pcibr_rrb_alloc_more", pcibr_soft);
 }
 
 
+
 /*
  * Flush all the rrb's assigned to the specified connection point.
  */
 void
 pcibr_rrb_flush(vertex_hdl_t pconn_vhdl)
 {
-    pciio_info_t  pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t  pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pcibr_soft_t  pcibr_soft = (pcibr_soft_t)pciio_info_mfast_get(pciio_info);
     pciio_slot_t  slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
-    bridge_t	 *bridge = pcibr_soft->bs_base;
+    pci_bridge_t *bridge = pcibr_soft->bs_base;
 
-    bridgereg_t tmp;
+    uint64_t tmp;
     uint16_t enable_bit, pdev_bits, rrb_bits, rrb_mask;
     int rrb_index;
     unsigned long s;
@@ -375,7 +414,7 @@
     rrb_bits = enable_bit | pdev_bits;
     rrb_mask = enable_bit | ((NUM_PDEV_BITS(bridge) << 1) - 1);
 
-    tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
+    tmp = pcireg_rrb_get(bridge, SLOT_2_RRB_REG(bridge, slot));
 
     s = pcibr_lock(pcibr_soft);
     for (rrb_index = 0; rrb_index < 8; rrb_index++) {
@@ -395,25 +434,13 @@
 int
 pcibr_wrb_flush(vertex_hdl_t pconn_vhdl)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    bridge_t               *bridge = pcibr_soft->bs_base;
-    volatile bridgereg_t   *wrb_flush;
+    pci_bridge_t	   *bridge = pcibr_soft->bs_base;
+
+    pcireg_wrb_flush_get(bridge, pciio_slot);
 
-    wrb_flush = &(bridge->b_wr_req_buf[pciio_slot].reg);
-    if ( IS_PIC_SOFT(pcibr_soft) ) {
-	while (*wrb_flush)
-		;
-    }
-    else {
-	if (io_get_sh_swapper(NASID_GET(bridge))) {
-		while (BRIDGE_REG_GET32((wrb_flush)));
-	} else {
-		while (*wrb_flush)
-			;
-	}
-    }
     return(0);
 }
 
@@ -433,10 +460,10 @@
 		int *count_vchan0,
 		int *count_vchan1)
 {
-    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = pciio_hostinfo_get(pconn_vhdl);
     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-    bridge_t               *bridge = pcibr_soft->bs_base;
+    pci_bridge_t	   *bridge = pcibr_soft->bs_base;
     int                     desired_vchan0;
     int                     desired_vchan1;
     int                     orig_vchan0;
@@ -449,7 +476,7 @@
     int                     res_rrbs;
     int			    vchan_total;
     int			    vchan;
-    unsigned long                s;
+    unsigned long	    s;
     int                     error;
 
     /*
@@ -645,10 +672,10 @@
     pciio_info_t            pciio_info;
     pciio_slot_t            pciio_slot;
     pcibr_soft_t            pcibr_soft;
-    unsigned long                s;
+    unsigned long	    s;
     int                     error = -1;
 
-    if ((pciio_info = pciio_info_get(pconn_vhdl)) &&
+    if ((pciio_info = pciio_hostinfo_get(pconn_vhdl)) &&
 	(pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info)) &&
 	((pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info)) < PCIBR_NUM_SLOTS(pcibr_soft))) {
 
@@ -691,7 +718,7 @@
     pcibr_soft_t	 pcibr_soft;
     pcibr_info_h	 pcibr_infoh;
     pcibr_info_t	 pcibr_info;
-    bridge_t		*bridge;
+    pci_bridge_t	*bridge;
     int 		 vchan_total;
     int			 vchan;
     int                  chan[4];
@@ -699,10 +726,10 @@
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
 
     if (!pcibr_soft)
-	return(-EINVAL);
+	return(EINVAL);
 
     if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
-	return(-EINVAL);
+	return(EINVAL);
 
     bridge = pcibr_soft->bs_base;
 
@@ -711,42 +738,45 @@
     for (vchan = 0; vchan < vchan_total; vchan++) 
         chan[vchan] = do_pcibr_rrb_count_valid(bridge, slot, vchan);
 
-    if (IS_PIC_SOFT(pcibr_soft)) {
- 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_vhdl,
-	    "pcibr_slot_initial_rrb_alloc: slot %d started with %d+%d+%d+%d\n",
-	    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), 
-	    chan[VCHAN0], chan[VCHAN1], chan[VCHAN2], chan[VCHAN3]));
-    } else {
-	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_vhdl,
-	    "pcibr_slot_initial_rrb_alloc: slot %d started with %d+%d\n",
-	    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), 
-	    chan[VCHAN0], chan[VCHAN1]));
-    }
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_vhdl,
+	"pcibr_slot_initial_rrb_alloc: slot %d started with %d+%d+%d+%d\n",
+	PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), 
+	chan[VCHAN0], chan[VCHAN1], chan[VCHAN2], chan[VCHAN3]));
 
     /* Do we really need any?
      */
     pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
     pcibr_info = pcibr_infoh[0];
+    /*
+     * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478):
+     * Don't free RRBs we allocated to device[2|3]--vchan3 as
+     * a WAR to those PVs mentioned above.  In pcibr_attach2
+     * we allocate RRB0,8,1,9 to device[2|3]--vchan3.
+     */
+    if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft) && IS_PIC_SOFT(pcibr_soft) && 
+			(slot == 2 || slot == 3) &&
+        		(pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
+        		!pcibr_soft->bs_slot[slot].has_host) {
 
-    if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft) && IS_PIC_SOFT(pcibr_soft) &&
-                        (slot == 2 || slot == 3) &&
-                        (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
-                        !pcibr_soft->bs_slot[slot].has_host) {
-
-        for (vchan = 0; vchan < 2; vchan++) {
-            do_pcibr_rrb_free(bridge, slot, vchan, 8);
-            pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
-        }
+	for (vchan = 0; vchan < 2; vchan++) {
+	    do_pcibr_rrb_free(bridge, slot, vchan, 8);
+	    pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
+	}
 
-        pcibr_soft->bs_rrb_valid[slot][3] = chan[3];
+	pcibr_soft->bs_rrb_valid[slot][3] = chan[3];
 
-        return(-ENODEV);
+	return(ENODEV);
     }
 
-    /* Give back any assigned to empty slots */
-    if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) && !pcibr_soft->bs_slot[slot].has_host) {
+    if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
+	!pcibr_soft->bs_slot[slot].has_host) {
 	do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
-	return(-ENODEV);
+        
+	/* Reserve RRBs for this empty slot for hot-plug */
+	for (vchan = 0; vchan < vchan_total; vchan++) 
+	    pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
+
+	return(ENODEV);
     }
 
     for (vchan = 0; vchan < vchan_total; vchan++)
@@ -755,16 +785,6 @@
     return(0);
 }
 
-void
-rrb_reserved_free(pcibr_soft_t pcibr_soft, int slot)
-{
-        int res = pcibr_soft->bs_rrb_res[slot];
-
-        if (res) {
-                 pcibr_soft->bs_rrb_avail[slot & 1] += res;
-                 pcibr_soft->bs_rrb_res[slot] = 0;
-        }
-}
 
 /*
  * pcibr_initial_rrb
@@ -773,7 +793,8 @@
  *      the normal channel, the number of RRBs assigned to the virtual
  *      channels, and the number of RRBs assigned as reserved. 
  *
- *      A candidate slot is any existing (populated or empty) slot.
+ *      A candidate slot is a populated slot on a non-SN1 system or 
+ *      any existing (populated or empty) slot on an SN1 system.
  *      Empty SN1 slots need RRBs to support hot-plug operations.
  */
 
@@ -782,7 +803,7 @@
 			     pciio_slot_t first, pciio_slot_t last)
 {
     pcibr_soft_t            pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-    bridge_t               *bridge = pcibr_soft->bs_base;
+    pci_bridge_t	   *bridge = pcibr_soft->bs_base;
     pciio_slot_t            slot;
     int			    rrb_total;
     int			    vchan_total;
@@ -836,6 +857,9 @@
     for (slot = first; slot <= last; ++slot) {
         int                     r;
 
+	if (pcibr_soft->bs_unused_slot & (1 << slot))
+	    continue;
+
 	rrb_total = 0;
 	for (vchan = 0; vchan < vchan_total; vchan++)
 		rrb_total += pcibr_soft->bs_rrb_valid[slot][vchan];
@@ -868,13 +892,8 @@
                     "%s: rrbs available, even=%d, odd=%d\n", calling_func,
                     pcibr_soft->bs_rrb_avail[0], pcibr_soft->bs_rrb_avail[1]));
 
-        if (IS_PIC_SOFT(pcibr_soft)) {
-            PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
-                        "\tslot\tvchan0\tvchan1\tvchan2\tvchan3\treserved\n"));
-        } else {
-	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
-		        "\tslot\tvchan0\tvchan1\treserved\n"));
-        }
+        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
+                    "\tslot\tvchan0\tvchan1\tvchan2\tvchan3\treserved\n"));
 
         for (slot=0; slot < PCIBR_NUM_SLOTS(pcibr_soft); slot++) {
 	    /*
@@ -882,24 +901,17 @@
              * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printf
              * arguments fails so sprintf() it into a temporary string.
              */
-	    if (IS_PIC_SOFT(pcibr_soft)) {
-                sprintf(tmp_str, "\t %d\t  %d\t  %d\t  %d\t  %d\t  %d\n", 
-		        PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
-                        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN0],
-                        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN1],
-                        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN2],
-                        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN3],
-                        pcibr_soft->bs_rrb_res[slot]);
-	    } else {
-	        sprintf(tmp_str, "\t %d\t  %d\t  %d\t  %d\n", 
-		        PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
-		        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN0],
-		        0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN1],
-		        pcibr_soft->bs_rrb_res[slot]);
-	    }
+            sprintf(tmp_str, "\t %d\t  %d\t  %d\t  %d\t  %d\t  %d\n", 
+		    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
+                    0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN0],
+                    0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN1],
+                    0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN2],
+                    0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN3],
+                    pcibr_soft->bs_rrb_res[slot]);
     
             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
                         "%s", tmp_str));
         }
     }
 }
+
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -8,26 +7,14 @@
  */
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pci.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
+#include <asm/uaccess.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
 #include <asm/sn/sn_private.h>
 
 extern pcibr_info_t     pcibr_info_get(vertex_hdl_t);
@@ -35,6 +22,11 @@
 extern pcibr_info_t     pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
 extern int		pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
 extern int		pcibr_pcix_rbars_calc(pcibr_soft_t);
+extern int		pciio_ppb_attach(vertex_hdl_t);
+
+extern char *pci_space[];
+extern struct reg_desc  space_desc[];
+extern struct reg_desc  device_bits[];
 
 int pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
 int pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
@@ -48,33 +40,34 @@
 		 pciio_slot_t slot, int drv_flags);
 int pcibr_slot_detach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
                  int drv_flags, char *l1_msg, int *sub_errorp);
-static int pcibr_probe_slot(bridge_t *, cfg_p, unsigned int *);
+int pcibr_is_slot_sys_critical(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_probe_slot(pci_bridge_t *, cfg_p, unsigned int *);
+static int pcibr_probe_work(pci_bridge_t *bridge, void *addr, int len, void *valp);
+static uint64_t pcibr_disable_mst_timeout_work(pci_bridge_t *bridge);
+static int pcibr_enable_mst_timeout_work(pci_bridge_t *bridge);
 void pcibr_device_info_free(vertex_hdl_t, pciio_slot_t);
 iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t, 
                                pciio_space_t, int, int, int);
-void pciibr_bus_addr_free(pcibr_soft_t, pciio_win_info_t);
+void pcibr_bus_addr_free(pciio_win_info_t);
 cfg_p pcibr_find_capability(cfg_p, unsigned);
 extern uint64_t  do_pcibr_config_get(cfg_p, unsigned, unsigned);
 void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t); 
 
+#ifdef PCI_HOTPLUG
 int pcibr_slot_attach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
                 int drv_flags, char *l1_msg, int *sub_errorp);
-
+int pcibr_slot_pwr(vertex_hdl_t, pciio_slot_t, int, char *);
+int pcibr_slot_startup(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t slot);
+int pcibr_slot_shutdown(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t slot);
+void pcibr_slot_func_info_return(pcibr_info_h pcibr_infoh, int func,
+                 pcibr_slot_func_info_resp_t funcp);
 int pcibr_slot_info_return(pcibr_soft_t pcibr_soft, pciio_slot_t slot,
                  pcibr_slot_info_resp_t respp);
+int pcibr_slot_query(vertex_hdl_t, pcibr_slot_req_t);
 
 extern vertex_hdl_t baseio_pci_vhdl;
 int scsi_ctlr_nums_add(vertex_hdl_t, vertex_hdl_t);
-
-
-/* For now .... */
-/*
- * PCI Hot-Plug Capability Flags
-
- */
-#define D_PCI_HOT_PLUG_ATTACH  0x200  /* Driver supports PCI hot-plug attach */
-#define D_PCI_HOT_PLUG_DETACH  0x400  /* Driver supports PCI hot-plug detach */
-
+#endif /* PCI_HOTPLUG*/
 
 /* 
  * PCI-X Max Outstanding Split Transactions translation array and Max Memory
@@ -87,22 +80,22 @@
 int max_readcount_to_bufsize[MAX_READCNT_TABLE] = {512, 1024, 2048, 4096 };
 
 
+#define COPYOUT(a, b, c)	copy_to_user(b,a,c)
+
 /*==========================================================================
  *	BRIDGE PCI SLOT RELATED IOCTLs
  */
 
+#ifdef PCI_HOTPLUG
 /*
  * pcibr_slot_startup
  *	Software start-up the PCI slot.
  */
-
-#ifdef PIC_LATER
-
 int
 pcibr_slot_startup(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
 {
     pcibr_soft_t                   pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-    pciio_slot_t                   slot;
+    pciio_slot_t                   slot = reqp->req_slot;
     int                            error = 0;
     char                           l1_msg[BRL1_QSIZE+1];
     struct pcibr_slot_up_resp_s    tmp_up_resp;
@@ -113,16 +106,23 @@
     }
 
     /* req_slot is the 'external' slot number, convert for internal use */
-    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, reqp->req_slot);
+    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, slot);
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HOTPLUG, pcibr_vhdl,
+                "pcibr_slot_startup: pcibr_soft=0x%lx, slot=%d, reqp=0x%lx\n",
+                pcibr_soft, slot, reqp));
 
+    /* Do not allow start-up of a slot in a shoehorn */
+    if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
+       return(PCI_SLOT_IN_SHOEHORN);
+    }
+ 
     /* Check for the valid slot */
     if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
         return(PCI_NOT_A_SLOT);
 
-#ifdef PIC_LATER
     /* Acquire update access to the bus */
     mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
-#endif
 
     if (pcibr_soft->bs_slot[slot].slot_status & SLOT_STARTUP_CMPLT) {
         error = PCI_SLOT_ALREADY_UP;
@@ -135,16 +135,15 @@
     strncpy(tmp_up_resp.resp_l1_msg, l1_msg, L1_QSIZE);
     tmp_up_resp.resp_l1_msg[L1_QSIZE] = '\0';
 
-    if (COPYOUT(&tmp_up_resp, reqp->req_respp.up, reqp->req_size)) {
+    if (copyout(&tmp_up_resp, reqp->req_respp.up, reqp->req_size)) {
         return(EFAULT);
     }
 
     startup_unlock:
 
-#ifdef PIC_LATER
     /* Release the bus lock */
     mrunlock(pcibr_soft->bs_bus_lock);
-#endif
+
     return(error);
 }
 
@@ -156,8 +155,7 @@
 pcibr_slot_shutdown(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
 {
     pcibr_soft_t                   pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-    bridge_t                      *bridge;
-    pciio_slot_t                   slot;
+    pciio_slot_t                   slot = reqp->req_slot;
     int                            error = 0;
     char                           l1_msg[BRL1_QSIZE+1];
     struct pcibr_slot_down_resp_s  tmp_down_resp;
@@ -169,18 +167,23 @@
     }
 
     /* req_slot is the 'external' slot number, convert for internal use */
-    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, reqp->req_slot);
+    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, slot);
 
-    bridge = pcibr_soft->bs_base;
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HOTPLUG, pcibr_vhdl,
+                "pcibr_slot_shutdown: pcibr_soft=0x%lx, slot=%d, reqp=0x%lx\n",
+                pcibr_soft, slot, reqp));
 
     /* Check for valid slot */
     if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
         return(PCI_NOT_A_SLOT);
 
-#ifdef PIC_LATER
+    /* Do not allow shut-down of a slot in a shoehorn */
+    if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
+       return(PCI_SLOT_IN_SHOEHORN);
+    }
+
     /* Acquire update access to the bus */
     mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
-#endif
 
     if ((pcibr_soft->bs_slot[slot].slot_status & SLOT_SHUTDOWN_CMPLT) ||
         ((pcibr_soft->bs_slot[slot].slot_status & SLOT_STATUS_MASK) == 0)) {
@@ -200,8 +203,7 @@
     }
 
     /* Do not allow the last 33 MHz card to be removed */
-    if ((bridge->b_wid_control & BRIDGE_CTRL_BUS_SPEED_MASK) ==
-         BRIDGE_CTRL_BUS_SPEED_33) {
+    if (IS_33MHZ(pcibr_soft)) {
         for (tmp_slot = pcibr_soft->bs_first_slot;
              tmp_slot <= pcibr_soft->bs_last_slot; tmp_slot++)
             if (tmp_slot != slot)
@@ -223,37 +225,17 @@
 
     shutdown_copyout:
 
-    if (COPYOUT(&tmp_down_resp, reqp->req_respp.down, reqp->req_size)) {
+    if (copyout(&tmp_down_resp, reqp->req_respp.down, reqp->req_size)) {
         return(EFAULT);
     }
 
     shutdown_unlock:
 
-#ifdef PIC_LATER
     /* Release the bus lock */
     mrunlock(pcibr_soft->bs_bus_lock);
-#endif
 
     return(error);
 }
-#endif	/* PIC_LATER */
-
-char *pci_space_name[] = {"NONE", 
-			  "ROM",
-			  "IO",
-			  "",
-			  "MEM",
-			  "MEM32",
-			  "MEM64",
-			  "CFG",
-			  "WIN0",
-			  "WIN1",
-			  "WIN2",
-			  "WIN3",
-			  "WIN4",
-			  "WIN5",
-			  "",
-			  "BAD"};
 
 void
 pcibr_slot_func_info_return(pcibr_info_h pcibr_infoh,
@@ -270,16 +252,15 @@
     }
 
     funcp->resp_f_status |= FUNC_IS_VALID;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    sprintf(funcp->resp_f_slot_name, "%v", pcibr_info->f_vertex);
-#endif
+    vertex_to_name(pcibr_info->f_vertex, funcp->resp_f_slot_name, MAXDEVNAME);
+    if(is_sys_critical_vertex(pcibr_info->f_vertex)) {
+        funcp->resp_f_status |= FUNC_IS_SYS_CRITICAL;
+    }
 
     funcp->resp_f_bus = pcibr_info->f_bus;
     funcp->resp_f_slot = PCIBR_INFO_SLOT_GET_EXT(pcibr_info);
     funcp->resp_f_func = pcibr_info->f_func;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    sprintf(funcp->resp_f_master_name, "%v", pcibr_info->f_master);
-#endif
+    vertex_to_name(pcibr_info->f_master, funcp->resp_f_master_name, MAXDEVNAME);
     funcp->resp_f_pops = pcibr_info->f_pops;
     funcp->resp_f_efunc = pcibr_info->f_efunc;
     funcp->resp_f_einfo = pcibr_info->f_einfo;
@@ -293,8 +274,7 @@
         funcp->resp_f_window[win].resp_w_size =
                                   pcibr_info->f_window[win].w_size;
         sprintf(funcp->resp_f_window[win].resp_w_space,
-                "%s",
-                pci_space_name[pcibr_info->f_window[win].w_space]);
+                "%s", pci_space[pcibr_info->f_window[win].w_space]);
     }
 
     funcp->resp_f_rbase = pcibr_info->f_rbase;
@@ -315,13 +295,11 @@
 {
     pcibr_soft_slot_t            pss;
     int                          func;
-    bridge_t                    *bridge = pcibr_soft->bs_base;
-    reg_p                        b_respp;
+    pci_bridge_t		*bridge = pcibr_soft->bs_base;
     pcibr_slot_info_resp_t       slotp;
     pcibr_slot_func_info_resp_t  funcp;
-    extern void snia_kmem_free(void *, int);
 
-    slotp = snia_kmem_zalloc(sizeof(*slotp), 0);
+    slotp = snia_kmem_zalloc(sizeof(*slotp));
     if (slotp == NULL) {
         return(ENOMEM);
     }
@@ -333,14 +311,15 @@
 
     slotp->resp_has_host = pss->has_host;
     slotp->resp_host_slot = pss->host_slot;
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    sprintf(slotp->resp_slot_conn_name, "%v", pss->slot_conn);
-#else
-    sprintf(slotp->resp_slot_conn_name, "%p", (void *)pss->slot_conn);
-#endif
+    vertex_to_name(pss->slot_conn, slotp->resp_slot_conn_name, MAXDEVNAME);
     slotp->resp_slot_status = pss->slot_status;
 
     slotp->resp_l1_bus_num = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
+
+    if (is_sys_critical_vertex(pss->slot_conn)) {
+        slotp->resp_slot_status |= SLOT_IS_SYS_CRITICAL;
+    }
+
     slotp->resp_bss_ninfo = pss->bss_ninfo;
 
     for (func = 0; func < pss->bss_ninfo; func++) {
@@ -349,7 +328,7 @@
     }
 
     sprintf(slotp->resp_bss_devio_bssd_space, "%s",
-            pci_space_name[pss->bss_devio.bssd_space]);
+            pci_space[pss->bss_devio.bssd_space]);
     slotp->resp_bss_devio_bssd_base = pss->bss_devio.bssd_base;
     slotp->resp_bss_device = pss->bss_device;
 
@@ -362,40 +341,22 @@
     slotp->resp_bss_d32_base = pss->bss_d32_base;
     slotp->resp_bss_d32_flags = pss->bss_d32_flags;
 
-    slotp->resp_bss_ext_ates_active = pss->bss_ext_ates_active;
-
-    slotp->resp_bss_cmd_pointer = pss->bss_cmd_pointer;
-    slotp->resp_bss_cmd_shadow = pss->bss_cmd_shadow;
-
     slotp->resp_bs_rrb_valid = pcibr_soft->bs_rrb_valid[slot][VCHAN0];
     slotp->resp_bs_rrb_valid_v1 = pcibr_soft->bs_rrb_valid[slot][VCHAN1];
     slotp->resp_bs_rrb_valid_v2 = pcibr_soft->bs_rrb_valid[slot][VCHAN2];
     slotp->resp_bs_rrb_valid_v3 = pcibr_soft->bs_rrb_valid[slot][VCHAN3];
     slotp->resp_bs_rrb_res = pcibr_soft->bs_rrb_res[slot];
+    slotp->resp_b_resp = pcireg_rrb_get(bridge, (slot & 1));
 
-    if (slot & 1) {
-        b_respp = &bridge->b_odd_resp;
-    } else {
-        b_respp = &bridge->b_even_resp;
-    }
-
-    slotp->resp_b_resp = *b_respp;
-
-    slotp->resp_b_int_device = bridge->b_int_device;
-
-    if (IS_PIC_SOFT(pcibr_soft)) {
-	slotp->resp_p_int_enable = bridge->p_int_enable_64;
-	slotp->resp_p_int_host = bridge->p_int_addr_64[slot];
-    } else {
-	slotp->resp_b_int_enable = bridge->b_int_enable;
-	slotp->resp_b_int_host = bridge->b_int_addr[slot].addr;
-    }
-
-    if (COPYOUT(slotp, respp, sizeof(*respp))) {
+    slotp->resp_b_int_device = pcireg_intr_device_get(bridge);
+    slotp->resp_b_int_enable = pcireg_intr_enable_get(bridge);
+    slotp->resp_b_int_host = pcireg_intr_addr_get(bridge, slot);
+    
+    if (copyout(slotp, respp, sizeof(*respp))) {
         return(EFAULT);
     }
 
-    snia_kmem_free(slotp, sizeof(*slotp));
+    DEL(slotp);
 
     return(0);
 }
@@ -426,7 +387,7 @@
 pcibr_slot_query(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
 {
     pcibr_soft_t            pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-    pciio_slot_t            slot;
+    pciio_slot_t            slot = reqp->req_slot;
     pciio_slot_t            tmp_slot;
     pcibr_slot_info_resp_t  respp = reqp->req_respp.query;
     int                     size = reqp->req_size;
@@ -438,10 +399,10 @@
     }
 
     /* req_slot is the 'external' slot number, convert for internal use */
-    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, reqp->req_slot);
+    slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, slot);
 
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HOTPLUG, pcibr_vhdl,
-                "pcibr_slot_query: pcibr_soft=0x%x, slot=%d, reqp=0x%x\n",
+                "pcibr_slot_query: pcibr_soft=0x%lx, slot=%d, reqp=0x%lx\n",
                 pcibr_soft, slot, reqp));
 
     /* Make sure that we have a valid PCI slot number or PCIIO_SLOT_NONE */
@@ -449,22 +410,25 @@
         return(PCI_NOT_A_SLOT);
     }
 
+    /* Do not allow a query of a slot in a shoehorn */
+    if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
+       return(PCI_SLOT_IN_SHOEHORN);
+    }
+
     /* Return information for the requested PCI slot */
     if (slot != PCIIO_SLOT_NONE) {
         if (size < sizeof(*respp)) {
             return(PCI_RESP_AREA_TOO_SMALL);
         }
 
-#ifdef PIC_LATER
         /* Acquire read access to the bus */
         mrlock(pcibr_soft->bs_bus_lock, MR_ACCESS, PZERO);
-#endif
+
         error = pcibr_slot_info_return(pcibr_soft, slot, respp);
 
-#ifdef PIC_LATER
         /* Release the bus lock */
         mrunlock(pcibr_soft->bs_bus_lock);
-#endif
+
         return(error);
     }
 
@@ -476,16 +440,14 @@
             return(PCI_RESP_AREA_TOO_SMALL);
         }
 
-#ifdef PIC_LATER
         /* Acquire read access to the bus */
         mrlock(pcibr_soft->bs_bus_lock, MR_ACCESS, PZERO);
-#endif
+
         error = pcibr_slot_info_return(pcibr_soft, tmp_slot, respp);
 
-#ifdef PCI_LATER
         /* Release the bus lock */
         mrunlock(pcibr_soft->bs_bus_lock);
-#endif
+
         if (error) {
             return(error);
         }
@@ -496,10 +458,80 @@
 
     return(error);
 }
+#endif	/* PCI_HOTPLUG */
+
+#if 0
+/*
+ * pcibr_slot_reset
+ *	Reset the PCI device in the particular slot.
+ *
+ *      The Xbridge does not comply with the PCI Specification
+ *      when resetting an indiviaudl slot.  An individual slot is
+ *      is reset by toggling the slot's bit in the Xbridge Control
+ *      Register.  The Xbridge will assert the target slot's 
+ *      (non-bussed) RST signal, but does not assert the (bussed) 
+ *      REQ64 signal as required by the specification.   As
+ *      designed, the Xbridge cannot assert the REQ64 signal
+ *      becuase it may interfere with a bus transaction in progress.
+ *      The practical effects of this Xbridge implementation is
+ *      device dependent;  it probably will not adversely effect
+ *      32-bit cards, but may disable 64-bit data transfers by those
+ *      cards that normally support 64-bit data transfers.  
+ *
+ *      The Xbridge will assert REQ64 when all four slots are reset
+ *      by simultaneously toggling all four slot reset bits in the
+ *      Xbridge Control Register.  This is basically a PCI bus reset
+ *      and asserting the (bussed) REQ64 signal will not interfere
+ *      with any bus transactions in progress.
+ *
+ *      The Xbridge (and the SN0 Bridge) support resetting only
+ *      four PCI bus slots via the (X)bridge Control Register.
+ *
+ *      To reset an individual slot for the PCI Hot-Plug feature
+ *      use the L1 console commands to power-down and then 
+ *      power-up the slot, or use the kernel infrastructure
+ *      functions to power-down/up the slot when they are
+ *      implemented for SN1.
+ */
+int
+pcibr_slot_reset(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
+{
+	pcibr_soft_t		 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+	pci_bridge_t		*bridge;
+	uint64_t		 ctrlreg;
+
+	if (!pcibr_soft)
+		return(EINVAL);
+
+	if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
+		return(EINVAL);
+
+	/* Enable the DMA operations from this device of the xtalk widget
+	 * (PCI host bridge in this case).
+	 */
+	xtalk_widgetdev_enable(pcibr_soft->bs_conn, slot);
+
+	/* Set the reset slot bit in the bridge's wid control register
+	 * to reset the PCI slot 
+	 */
+	bridge = pcibr_soft->bs_base;
+
+	/* Read the bridge widget control and clear out the reset pin
+	 * bit for the corresponding slot.  Then restore the register.
+	 * NOTE: PCI card gets reset when the reset pin bit changes
+	 * from 0 to 1.
+	 */
+	ctrlreg = pcireg_control_get(bridge);
+	pcireg_control_bit_clr(bridge, PCIBR_CTRL_RST_PIN(slot));
+	pcireg_control_set(bridge, ctrlreg);	
+
+	/* Flush the write buffers if any !! */
+	pcireg_wrb_flush_get(bridge, slot);
+
+	return(0);
+}
+#endif
 
-#define PROBE_LOCK 0	/* FIXME: we're attempting to lock around accesses
-			 * to b_int_enable.   This hangs pcibr_probe_slot()
-			 */
 
 /*
  * pcibr_slot_info_init
@@ -514,7 +546,7 @@
     pcibr_soft_t	    pcibr_soft;
     pcibr_info_h	    pcibr_infoh;
     pcibr_info_t	    pcibr_info;
-    bridge_t		   *bridge;
+    pci_bridge_t	   *bridge;
     cfg_p                   cfgw;
     unsigned                idword;
     unsigned                pfail;
@@ -533,7 +565,8 @@
     int			    func;
     vertex_hdl_t	    conn_vhdl;
     pcibr_soft_slot_t	    slotp;
-    
+    uint64_t		    device_reg;
+
     /* Get the basic software information required to proceed */
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
     if (!pcibr_soft)
@@ -550,17 +583,17 @@
 	return(0);    
     }
 
+#ifdef PCI_HOTPLUG
+    /* Check for a slot with any system critical functions */
+    if (pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
+        return(EPERM);
+#endif /* PCI_HOTPLUG */
+
     /* Try to read the device-id/vendor-id from the config space */
     cfgw = pcibr_slot_config_addr(bridge, slot, 0);
 
-#if PROBE_LOCK
-    s = pcibr_lock(pcibr_soft);
-#endif
-    if (pcibr_probe_slot(bridge, cfgw, &idword)) 
+    if (pcibr_probe_slot(bridge, cfgw, &idword))
 	return(ENODEV);
-#if PROBE_LOCK
-    pcibr_unlock(pcibr_soft, s);
-#endif
 
     slotp = &pcibr_soft->bs_slot[slot];
     slotp->slot_status |= SLOT_POWER_UP;
@@ -571,7 +604,7 @@
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PROBE, pcibr_vhdl,
 		"pcibr_slot_info_init: slot=%d, vendor=0x%x, device=0x%x\n",
 		PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), vendor, device));
-
+	
     /* If the vendor id is not valid then the slot is not populated
      * and we are done.
      */
@@ -579,6 +612,7 @@
 	return(ENODEV);			
     
     htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
+
     nfunc = 1;
     rfunc = PCIIO_FUNC_NONE;
     pfail = 0;
@@ -591,16 +625,10 @@
     if (htype & 0x80) {		/* MULTIFUNCTION */
 	for (func = 1; func < 8; ++func) {
 	    cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
-#if PROBE_LOCK
-            s = pcibr_lock(pcibr_soft);
-#endif
 	    if (pcibr_probe_slot(bridge, cfgw, &idwords[func])) {
 		pfail |= 1 << func;
 		continue;
 	    }
-#if PROBE_LOCK
-            pcibr_unlock(pcibr_soft, s);
-#endif
 	    vendor = 0xFFFF & idwords[func];
 	    if (vendor == 0xFFFF) {
 		pfail |= 1 << func;
@@ -631,49 +659,21 @@
 	    rfunc = func;
 	}
 	htype &= 0x7f;
-	if (htype != 0x00) {
-	    printk(KERN_WARNING 
+	if (htype == 0x00) {			/* type 0 header */
+	    nbars = PCI_CFG_BASE_ADDRS;
+	} else if (htype == 0x01) {		/* type 1 header */
+	    nbars = PCI_CFG_PPB_BASE_ADDRS;
+	} else {				/* unknown/unsupported header */
+	    KERN_MSG(K_WARN,
 		"%s pcibr: pci slot %d func %d has strange header type 0x%x\n",
 		    pcibr_soft->bs_name, slot, func, htype);
 	    nbars = 2;
-	} else {
-	    nbars = PCI_CFG_BASE_ADDRS;
 	}
 
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
-                "pcibr_slot_info_init: slot=%d, func=%d, cfgw=0x%x\n",
+                "pcibr_slot_info_init: slot=%d, func=%d, cfgw=0x%lx\n",
 		PCIBR_DEVICE_TO_SLOT(pcibr_soft,slot), func, cfgw));
 
-#ifdef PIC_LATER
-        /*
-         * Check for a Quad ATM PCI "card" and return all the PCI bus
-         * memory and I/O space.  This will work-around an apparent
-         * hardware problem with the Quad ATM XIO card handling large
-         * PIO addresses.  Releasing all the space for use by the card
-         * will lower the PIO addresses with the PCI bus address space.
-         * This is OK since the PROM did not assign any BAR addresses. 
-         *
-         * Only release all the PCI bus addresses once.
-         *
-         */
-        if ((vendor == LINC_VENDOR_ID_NUM) && (device == LINC_DEVICE_ID_NUM)) {
-            iopaddr_t               prom_base_addr = pcibr_soft->bs_xid << 24;
-            int                     prom_base_size = 0x1000000;
-
-            if (!(pcibr_soft->bs_bus_addr_status & PCIBR_BUS_ADDR_MEM_FREED)) {
-		pciio_device_win_populate(&pcibr_soft->bs_mem_win_map,
-					  prom_base_addr, prom_base_size);
-                pcibr_soft->bs_bus_addr_status |= PCIBR_BUS_ADDR_MEM_FREED;
-            }
-
-            if (!(pcibr_soft->bs_bus_addr_status & PCIBR_BUS_ADDR_IO_FREED)) {
-		pciio_device_win_populate(&pcibr_soft->bs_io_win_map,
-					  prom_base_addr, prom_base_size);
-                pcibr_soft->bs_bus_addr_status |= PCIBR_BUS_ADDR_IO_FREED;
-            }
-        }
-#endif	/* PIC_LATER */
-
 	/* 
 	 * If the latency timer has already been set, by prom or by the
 	 * card itself, use that value.  Otherwise look at the device's
@@ -688,9 +688,8 @@
 	 */
 
 	lt_time = do_pcibr_config_get(cfgw, PCI_CFG_LATENCY_TIMER, 1);
-
-	if ((lt_time == 0) && !(bridge->b_device[slot].reg & BRIDGE_DEV_RT) &&
-				       (device == 0x5 /* RAD_DEV */)) {
+	device_reg = pcireg_device_get(bridge, slot);
+	if ((lt_time == 0) && !(device_reg & PCIBR_DEV_RT)) {
 	     unsigned	min_gnt;
 	     unsigned	min_gnt_mult;
 	    
@@ -722,7 +721,6 @@
 		    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, lt_time));
 	}
 
-
 	/* In our architecture the setting of the cacheline size isn't 
 	 * beneficial for cards in PCI mode, but in PCI-X mode devices
 	 * can optionally use the cacheline size value for internal 
@@ -737,27 +735,25 @@
 			"func=%d, to 0x20\n",
 			PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func));
 	    }
+	}
 
-	    /* Get the PCI-X capability if running in PCI-X mode.  If the func
-	     * doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
-	     * pcibr_info struct so the device driver for that function is not
-	     * called.
-	     */
+	/* Get the PCI-X capability if running in PCI-X mode.  If the func
+	 * doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
+	 * pcibr_info struct so the device driver for that function is not
+	 * called.
+	 */
+	if (IS_PCIX(pcibr_soft)) {
 	    if (!(pcix_cap = pcibr_find_capability(cfgw, PCI_CAP_PCIX))) {
-		printk(KERN_WARNING
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-		        "%v: Bus running in PCI-X mode, But card in slot %d, "
-		        "func %d not PCI-X capable\n", pcibr_vhdl, slot, func);
-#else
-		        "0x%lx: Bus running in PCI-X mode, But card in slot %d, "
-		        "func %d not PCI-X capable\n", (unsigned long)pcibr_vhdl, slot, func);
-#endif
+		KERN_MSG(K_WARN,
+		        "%s: Bus running in PCI-X mode, But card in slot %d, "
+		        "func %d not PCI-X capable\n", 
+			pcibr_soft->bs_name, slot, func);
 		pcibr_device_info_new(pcibr_soft, slot, PCIIO_FUNC_NONE,
 		               PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
 		continue;
 	    }
 	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
-                    "pcibr_slot_info_init: PCI-X capability at 0x%x for "
+                    "pcibr_slot_info_init: PCI-X capability at 0x%lx for "
 		    "slot=%d, func=%d\n", 
 		    pcix_cap, PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func));
 	} else {
@@ -771,6 +767,9 @@
          * and the number of max outstanding split trasnactions that they
 	 * have requested.  NOTE: "pcix_cap != NULL" implies IS_PCIX()
 	 */
+	/* XXX: HABECK: PIC-BRINGUP: f_pcix_cap references need to use the
+	 *                           do_pcibr_config_get/set() routine!
+	 */
 	pcibr_info->f_pcix_cap = (cap_pcix_type0_t *)pcix_cap;
 	if (pcibr_info->f_pcix_cap) {
 	    int max_out;      /* max outstanding splittrans from status reg */
@@ -836,18 +835,19 @@
 	     * start supporting more PCI providers.
 	     */
 	    base = do_pcibr_config_get(wptr, (win * 4), 4);
-
 	    if (base & PCI_BA_IO_SPACE) {
 		/* BASE is in I/O space. */
 		space = PCIIO_SPACE_IO;
 		mask = -4;
 		code = base & 3;
 		base = base & mask;
+
 		if (base == 0) {
 		    ;		/* not assigned */
 		} else if (!(cmd_reg & PCI_CMD_IO_SPACE)) {
 		    base = 0;	/* decode not enabled */
 		}
+
 	    } else {
 		/* BASE is in MEM space. */
 		space = PCIIO_SPACE_MEM;
@@ -867,12 +867,51 @@
 	    }
 
 	    if (base != 0) {	/* estimate size */
+		pciio_space_t	tmp_space = space;
+		iopaddr_t	tmp_base;
+
 		size = base & -base;
+
+		/*
+		 * Reserve this space in the relavent address map.  Don't
+		 * care about the return code from pcibr_bus_addr_alloc().
+		 */
+
+		if (space == PCIIO_SPACE_MEM && code != PCI_BA_MEM_1MEG) {
+			tmp_space = PCIIO_SPACE_MEM32;
+		}
+
+                tmp_base = pcibr_bus_addr_alloc(pcibr_soft,
+                                            	&pcibr_info->f_window[win],
+                                            	tmp_space,
+                                            	base, size, 0);
+
+		/*
+         	 * The kernel only allows functions to have so many variable
+                 * args attempting to call PCIBR_DEBUG_ALWAYS() with more than
+         	 * 5 printf arguments fails so sprintf() it into a temporary 
+		 * string (tmp_str).
+         	 */
+		if (pcibr_debug_mask & PCIBR_DEBUG_BAR) {
+		    char	tmp_str[256];
+
+		    sprintf(tmp_str, "pcibr_slot_info_init: slot=%d, "
+			"func=%d win %d reserving space %s [0x%lx..0x%lx], "
+			"tmp_base 0x%lx\n",
+			PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
+			func, win, pci_space[tmp_space], (uint64_t)base,
+			(uint64_t)(base + size - 1), (uint64_t)tmp_base);
+
+		    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl, 
+				"%s",tmp_str));
+		}
+
 	    } else {		/* calculate size */
 		do_pcibr_config_set(wptr, (win * 4), 4, ~0);    /* write 1's */
 		size = do_pcibr_config_get(wptr, (win * 4), 4); /* read back */
 		size &= mask;	/* keep addr */
 		size &= -size;	/* keep lsbit */
+
 		if (size == 0)
 		    continue;
 	    }	
@@ -940,9 +979,15 @@
 pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl,
                      pciio_slot_t slot)
 {
-    pcibr_soft_t	pcibr_soft;
-    pcibr_info_h	pcibr_infoh;
-    int			nfunc;
+    pcibr_soft_t	 pcibr_soft;
+    pcibr_info_h	 pcibr_infoh;
+    int			 nfunc;
+
+#if defined(PCI_HOTSWAP_DEBUG)
+    pci_bridge_t	*bridge;
+    int			 win;
+    cfg_p		 wptr;
+#endif /* PCI_HOTSWAP_DEBUG */
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
 
@@ -952,6 +997,15 @@
     if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
 	return(EINVAL);
 
+#if defined(PCI_HOTSWAP_DEBUG)
+    /* Clean out all the base registers */
+    bridge = pcibr_soft->bs_base;
+    wptr = pcibr_slot_config_addr(bridge, slot, PCI_CFG_BASE_ADDR_0);
+    
+    for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) 
+	do_pcibr_config_set(wptr, (win * 4), 4, 0);
+#endif /* PCI_HOTSWAP_DEBUG */
+
     nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
 
     pcibr_device_info_free(pcibr_vhdl, slot);
@@ -1079,14 +1133,14 @@
     pcibr_soft_t	 pcibr_soft;
     pcibr_info_h	 pcibr_infoh;
     pcibr_info_t	 pcibr_info;
-    bridge_t		*bridge;
+    pci_bridge_t	*bridge;
     iopaddr_t            mask;
     int		       	 nbars;
     int		       	 nfunc;
     int			 func;
     int			 win;
     int                  rc = 0;
-    int			 align;
+    int			 align = 0;
     int			 align_slot;
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
@@ -1126,8 +1180,7 @@
      * the entire "lo" area is only a
      * megabyte, total ...
      */
-    align_slot = 0x100000;
-    align = align_slot;
+    align_slot = (slot < 2) ? 0x200000 : 0x100000;
 
     for (func = 0; func < nfunc; ++func) {
 	cfg_p                   cfgw;
@@ -1135,9 +1188,7 @@
 	pciio_space_t           space;
 	iopaddr_t               base;
 	size_t                  size;
-#ifdef PCI_LATER
 	char			tmp_str[256];
-#endif
 	unsigned                pci_cfg_cmd_reg;
 	unsigned                pci_cfg_cmd_reg_add = 0;
 
@@ -1158,6 +1209,7 @@
 	    nbars = PCI_CFG_BASE_ADDRS;
 
 	for (win = 0; win < nbars; ++win) {
+
 	    space = pcibr_info->f_window[win].w_space;
 	    base = pcibr_info->f_window[win].w_base;
 	    size = pcibr_info->f_window[win].w_size;
@@ -1172,23 +1224,24 @@
          	 * 5 printf arguments fails so sprintf() it into a temporary 
 		 * string (tmp_str).
          	 */
-#if defined(SUPPORT_PRINTING_R_FORMAT)
 		if (pcibr_debug_mask & PCIBR_DEBUG_BAR) {
 		    sprintf(tmp_str, "pcibr_slot_addr_space_init: slot=%d, "
-			"func=%d win %d is in %r [0x%x..0x%x], allocated by "
-			"prom\n", PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
-			func, win, space, space_desc, base, base + size - 1);
+			"func=%d win %d is in space %s [0x%lx..0x%lx], "
+			"allocated by prom\n",
+			PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, win,
+			pci_space[space], (uint64_t)base, 
+			(uint64_t)(base + size - 1));
 		    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl, 
 				"%s",tmp_str));
 		}
-#endif	/* SUPPORT_PRINTING_R_FORMAT */
+
 		continue;		/* already allocated */
 	    }
 
 	    align = (win) ? size : align_slot; 
 
-	    if (align < _PAGESZ)
-		align = _PAGESZ;        /* ie. 0x00004000 */
+	    if (align < PAGE_SIZE)
+		align = PAGE_SIZE;        /* ie. 0x00004000 */
  
 	    switch (space) {
 	    case PCIIO_SPACE_IO:
@@ -1203,7 +1256,7 @@
 	    case PCIIO_SPACE_MEM:
 		if ((do_pcibr_config_get(wptr, (win * 4), 4) &
 		     PCI_BA_MEM_LOCATION) == PCI_BA_MEM_1MEG) {
- 
+
 		    /* allocate from 20-bit PCI space */
                     base = pcibr_bus_addr_alloc(pcibr_soft,
                                                 &pcibr_info->f_window[win],
@@ -1232,28 +1285,26 @@
 	    pcibr_info->f_window[win].w_base = base;
 	    do_pcibr_config_set(wptr, (win * 4), 4, base);
 
-#if defined(SUPPORT_PRINTING_R_FORMAT)
 	    if (pcibr_debug_mask & PCIBR_DEBUG_BAR) {
                 if (base >= size) {
 		    sprintf(tmp_str,"pcibr_slot_addr_space_init: slot=%d, func="
-				    "%d, win %d is in %r[0x%x..0x%x], "
+				    "%d, win %d is in space %s [0x%lx..0x%lx], "
 				    "allocated by pcibr\n",
 				    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), 
-				    func, win, space, space_desc, base, 
-				    base + size - 1);
+				    func, win, pci_space[space], (uint64_t)base,
+				    (uint64_t)(base + size - 1));
 		     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl, 
 				 "%s",tmp_str));
-	        }
-		else {
+	        } else {
 		    sprintf(tmp_str,"pcibr_slot_addr_space_init: slot=%d, func="
-				    "%d, win %d, unable to alloc 0x%x in %r\n",
-				    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), 
-				    func, win, size, space, space_desc);
+				    "%d, win %d, unable to alloc 0x%lx in "
+				    "space %s\n", 
+				    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),func,
+				    win, (uint64_t)size, pci_space[space]);
 		    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl, 
 				"%s",tmp_str));
 	        }
 	    }
-#endif	/* SUPPORT_PRINTING_R_FORMAT */
 	}				/* next base */
 
 	/*
@@ -1274,6 +1325,7 @@
 		    rc = ENOSPC;
 		else {
 		    do_pcibr_config_set(wptr, 0, 4, base);
+
 		    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl,
 				"pcibr_slot_addr_space_init: slot=%d, func=%d, "
 				"ROM in [0x%X..0x%X], allocated by pcibr\n",
@@ -1313,16 +1365,12 @@
 	pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
 
 	pci_cfg_cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
-
-#if PCI_FBBE	/* XXX- check here to see if dev can do fast-back-to-back */
-	if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
-	    fast_back_to_back_enable = 0;
-#endif
 	pci_cfg_cmd_reg &= 0xFFFF;
 	if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
 	    do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4, 
 				pci_cfg_cmd_reg | pci_cfg_cmd_reg_add);
     }				/* next func */
+
     return(rc);
 }
 
@@ -1330,14 +1378,12 @@
  * pcibr_slot_device_init
  * 	Setup the device register in the bridge for this PCI slot.
  */
-
 int
 pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl,
 		       pciio_slot_t slot)
 {
     pcibr_soft_t	 pcibr_soft;
-    bridge_t		*bridge;
-    bridgereg_t		 devreg;
+    uint64_t		 devreg;
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
 
@@ -1347,34 +1393,38 @@
     if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
 	return(EINVAL);
 
-    bridge = pcibr_soft->bs_base;
+    /*
+     * Adjustments to Device(x) and init of bss_device shadow
+     */
+    devreg = pcireg_device_get(pcibr_soft, slot);
+    devreg &= ~PCIBR_DEV_PAGE_CHK_DIS;
 
     /*
-     * Adjustments to Device(x)
-     * and init of bss_device shadow
+     * Enable virtual channels by default (exception: see PIC WAR below)
      */
-    devreg = bridge->b_device[slot].reg;
-    devreg &= ~BRIDGE_DEV_PAGE_CHK_DIS;
+    devreg |= PCIBR_DEV_VIRTUAL_EN;
 
     /*
-     * PIC WAR. PV# 855271
-     * Don't enable virtual channels in the PIC by default.
-     * Can cause problems with 32-bit devices. (The bit is only intended
-     * for 64-bit devices).  We set the bit in pcibr_try_set_device()
-     * if we're 64-bit and requesting virtual channels.
+     * PIC WAR. PV# 855271:  Disable virtual channels in the PIC since
+     * it can cause problems with 32-bit devices.  We'll set the bit in
+     * pcibr_try_set_device() iff we're 64-bit and requesting virtual 
+     * channels.
      */
-    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV855271, pcibr_soft))
-	devreg |= BRIDGE_DEV_COH;
-    else
-	devreg |= BRIDGE_DEV_COH | BRIDGE_DEV_VIRTUAL_EN;
+    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
+	devreg &= ~PCIBR_DEV_VIRTUAL_EN;
+    }
+
+    /* If PIC, force Coherent transactions. */
+    if (IS_PIC_SOFT(pcibr_soft)) {
+	devreg |= PCIBR_DEV_COH;
+    }
+
     pcibr_soft->bs_slot[slot].bss_device = devreg;
-    bridge->b_device[slot].reg = devreg;
+    pcireg_device_set(pcibr_soft, slot, devreg);
 
-#ifdef PIC_LATER
     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pcibr_vhdl,
-		"pcibr_slot_device_init: Device(%d): %R\n",
-		slot, devreg, device_bits));
-#endif
+		"pcibr_slot_device_init: Device(%d): 0x%x\n",
+		slot, devreg));
     return(0);
 }
 
@@ -1461,16 +1511,10 @@
     pcibr_info_t	pcibr_info;
     int			func;
     vertex_hdl_t	xconn_vhdl, conn_vhdl;
-#ifdef PIC_LATER
-    vertex_hdl_t	scsi_vhdl;
-#endif
     int			nfunc;
     int                 error_func;
     int                 error_slot = 0;
     int                 error = ENODEV;
-#ifdef PIC_LATER
-    int                 hwg_err;
-#endif
 
     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
 
@@ -1490,7 +1534,6 @@
     pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
 
     for (func = 0; func < nfunc; ++func) {
-
 	pcibr_info = pcibr_infoh[func];
 	
 	if (!pcibr_info)
@@ -1502,14 +1545,28 @@
 	conn_vhdl = pcibr_info->f_vertex;
 
 
-	error_func = pciio_device_attach(conn_vhdl, drv_flags);
-
-#ifdef PIC_LATER
+	/*
+	 * Check if this device is a PCI-to-PCI bridge.
+	 * pciio_ppb_attach() returns -1 if it is not.  In that case
+	 * call the normal device attach.
+	 */
+	error_func = pciio_ppb_attach(conn_vhdl);
+	if (error_func < 0)
+		error_func = pciio_device_attach(conn_vhdl, drv_flags);
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEV_ATTACH, pcibr_vhdl,
+		    "pcibr_slot_call_device_attach: slot=%d, func=%d "
+		    "drv_flags=0x%x, pciio_device_attach returned %d\n",
+		    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, 
+		    drv_flags, error_func));
+#ifdef PCI_HOTPLUG
         /*
          * Try to assign well-known SCSI controller numbers for hot-plug
          * insert
          */
         if (drv_flags) {
+	    vertex_hdl_t	scsi_vhdl;
+    	    int			hwg_err;
 
             hwg_err = hwgraph_path_lookup(conn_vhdl, EDGE_LBL_SCSI_CTLR "/0",
                                           &scsi_vhdl, NULL);
@@ -1536,7 +1593,7 @@
             hwgraph_vertex_unref(scsi_vhdl);
 
         }
-#endif /* PIC_LATER */
+#endif /* PCI_HOTPLUG */
 
         pcibr_info->f_att_det_error = error_func;
 
@@ -1616,6 +1673,12 @@
 
 	error_func = pciio_device_detach(conn_vhdl, drv_flags);
 
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEV_DETACH, pcibr_vhdl,
+		    "pcibr_slot_call_device_detach: slot=%d, func=%d "
+		    "drv_flags=0x%x, pciio_device_detach returned %d\n",
+		    PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, 
+		    drv_flags, error_func));
+
         pcibr_info->f_att_det_error = error_func;
 
 	if (error_func)
@@ -1641,6 +1704,7 @@
     return(error);
 }
 
+#ifdef PCI_HOTPLUG
 /*
  * pcibr_slot_attach
  *	This is a place holder routine to keep track of all the
@@ -1656,11 +1720,113 @@
                   int         *sub_errorp)
 {
     pcibr_soft_t  pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-#ifdef PIC_LATER
     timespec_t    ts;
-#endif
     int		  error;
 
+    if (!(pcibr_soft->bs_slot[slot].slot_status & SLOT_POWER_UP)) {
+	uint64_t speed = pcireg_speed_get(pcibr_soft);
+	uint64_t mode = pcireg_mode_get(pcibr_soft);
+
+        /* Power-up the slot */
+        error = pcibr_slot_pwr(pcibr_vhdl, slot, L1_REQ_PCI_UP, l1_msg);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_L1_ERR);
+        } else {
+            pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_POWER_MASK;
+            pcibr_soft->bs_slot[slot].slot_status |= SLOT_POWER_UP;
+        }
+
+	/* The speed/mode of the bus may have changed due to the hotplug */
+	pcibr_soft->bs_bridge_mode = ((speed << 1) | mode);
+
+        /*
+         * Allow cards like the Alteon Gigabit Ethernet Adapter to complete
+         * on-card initialization following the slot reset
+         */
+         ts.tv_sec = 0;                      /* 0 secs */
+         ts.tv_nsec = 500 * (1000 * 1000);   /* 500 msecs */
+         nano_delay(&ts);
+
+#if 0
+        /* Reset the slot */
+        error = pcibr_slot_reset(pcibr_vhdl, slot)
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_RESET_ERR);
+        }
+#endif
+
+        /* Find out what is out there */
+        error = pcibr_slot_info_init(pcibr_vhdl, slot);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_INFO_INIT_ERR);
+        }
+
+        /* Set up the address space for this slot in the PCI land */
+        error = pcibr_slot_addr_space_init(pcibr_vhdl, slot);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_ADDR_INIT_ERR);
+        }
+
+	/* Allocate the PCI-X Read Buffer Attribute Registers (RBARs)*/
+	if (IS_PCIX(pcibr_soft)) {
+	    int tmp_slot;
+
+	    /* Recalculate the RBARs for all the devices on the bus.  Only
+	     * return an error if we error for the given 'slot'
+	     */
+	    pcibr_soft->bs_pcix_rbar_inuse = 0;
+	    pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
+	    pcibr_soft->bs_pcix_rbar_percent_allowed = 
+					pcibr_pcix_rbars_calc(pcibr_soft);
+	    for (tmp_slot = pcibr_soft->bs_min_slot;
+			tmp_slot < PCIBR_NUM_SLOTS(pcibr_soft); ++tmp_slot) {
+		if (tmp_slot == slot)
+		    continue;	/* skip this 'slot', we do it below */
+                (void)pcibr_slot_pcix_rbar_init(pcibr_soft, tmp_slot);
+	    }
+
+	    error = pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
+	    if (error) {
+		if (sub_errorp)
+		    *sub_errorp = error;
+		return(PCI_SLOT_RBAR_ALLOC_ERR);
+	    }
+	}
+
+        /* Setup the device register */
+        error = pcibr_slot_device_init(pcibr_vhdl, slot);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_DEV_INIT_ERR);
+        }
+
+        /* Setup host/guest relations */
+        error = pcibr_slot_guest_info_init(pcibr_vhdl, slot);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_GUEST_INIT_ERR);
+        }
+
+        /* Initial RRB management */
+        error = pcibr_slot_initial_rrb_alloc(pcibr_vhdl, slot);
+        if (error) {
+            if (sub_errorp)
+                *sub_errorp = error;
+            return(PCI_SLOT_RRB_ALLOC_ERR);
+        }
+
+    }
+
     /* Do not allow a multi-function card to be hot-plug inserted */
     if (pcibr_soft->bs_slot[slot].bss_ninfo > 1) {
         if (sub_errorp)
@@ -1681,6 +1847,7 @@
 
     return(0);
 }
+#endif	/* PCI_HOTPLUG */
 
 /*
  * pcibr_slot_detach
@@ -1697,11 +1864,19 @@
     pcibr_soft_t  pcibr_soft = pcibr_soft_get(pcibr_vhdl);
     int		  error;
     
+#ifdef PCI_HOTPLUG
+    /* Make sure that we do not detach a system critical function vertex */
+    if(pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
+        return(PCI_IS_SYS_CRITICAL);
+#endif /* PCI_HOTPLUG */
+
     /* Call the device detach function */
     error = (pcibr_slot_call_device_detach(pcibr_vhdl, slot, drv_flags));
     if (error) {
         if (sub_errorp)
             *sub_errorp = error;       
+	if (l1_msg)
+	    ;
         return(PCI_SLOT_DRV_DETACH_ERR);
     }
 
@@ -1721,62 +1896,256 @@
             (void)pcibr_slot_pcix_rbar_init(pcibr_soft, tmp_slot);
     }
 
+#ifdef PCI_HOTPLUG
+    /* Only power-down slot for a hot-plug detach */
+    if (!drv_flags)
+        return(0);
+ 
+    /* Power-down the slot */
+    error = pcibr_slot_pwr(pcibr_vhdl, slot, L1_REQ_PCI_DOWN, l1_msg);
+    if (!error) {
+        pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_POWER_MASK;
+        pcibr_soft->bs_slot[slot].slot_status |= SLOT_POWER_DOWN;
+    } else {
+        if (sub_errorp)
+            *sub_errorp = error;
+        return(PCI_L1_ERR);
+    }
+#endif /* PCI_HOTPLUG */
+
     return (0);
 
 }
 
+#ifdef PCI_HOTPLUG
+/*
+ * pcibr_slot_pwr
+ *      Power-up or power-down a PCI slot.  This routines makes calls to
+ *      the L1 system controller driver which requires "external" slot#.
+ */
+int
+pcibr_slot_pwr(vertex_hdl_t pcibr_vhdl,
+               pciio_slot_t slot,
+               int          up_or_down,
+	       char        *l1_msg)
+{
+    pcibr_soft_t        pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    nasid_t             nasid;
+    uint                rack, bay;
+
+    nasid = NASID_GET(pcibr_soft->bs_base);
+
+    if (PEBRICK_NODE(nasid)) {
+        if (peer_iobrick_rack_bay_get(nasid, &rack, &bay)) {
+            printf("Function pcibr_slot_pwr could not read rack and bay "
+                   "location of PEBrick at nasid %d\n", nasid);
+        }
+        return(peer_iobrick_pci_slot_pwr(pcibr_soft->bs_l1sc, rack, bay,
+			     pcibr_widget_to_bus(pcibr_vhdl),
+			     PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
+                             up_or_down,
+                             l1_msg));
+    }
+
+    return(iobrick_pci_slot_pwr(pcibr_soft->bs_l1sc, 
+                                pcibr_widget_to_bus(pcibr_vhdl),
+                                PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
+                                up_or_down,
+				l1_msg,
+				pcibr_soft->bs_xid));
+}
+
+
+/*
+ * pcibr_is_slot_sys_critical
+ *      Check slot for any functions that are system critical.
+ *      Return 1 if any are system critical or 0 otherwise.
+ *
+ *      This function will always return 0 when called by 
+ *      pcibr_attach() because the system critical vertices 
+ *      have not yet been set in the hwgraph.
+ */
+int
+pcibr_is_slot_sys_critical(vertex_hdl_t pcibr_vhdl,
+                      pciio_slot_t slot)
+{
+    pcibr_soft_t        pcibr_soft;
+    pcibr_info_h        pcibr_infoh;
+    pcibr_info_t        pcibr_info;
+    vertex_hdl_t        conn_vhdl = GRAPH_VERTEX_NONE;
+    int                 nfunc;
+    int                 func;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft)
+	return(EINVAL);
+
+    if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
+	return(EINVAL);
+
+    nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+
+    for (func = 0; func < nfunc; ++func) {
+	char	vname[MAXDEVNAME];
+        pcibr_info = pcibr_infoh[func];
+        if (!pcibr_info)
+            continue;
+
+        if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
+            continue;
+
+        conn_vhdl = pcibr_info->f_vertex;
+ 	vertex_to_name(conn_vhdl, vname, MAXDEVNAME);
+        if (is_sys_critical_vertex(conn_vhdl)) { 
+            KERN_MSG(K_WARN, "%s is a system critical device vertex\n", vname);
+            return(1); 
+        }
+
+    }
+
+    return(0);
+}
+#endif /* PCI_HOTPLUG */
+
 /*
- * pcibr_probe_slot_pic: read a config space word
- * while trapping any errors; return zero if
+ * pcibr_probe_slot: read a config space word
+ * while trapping any errors; reutrn zero if
  * all went OK, or nonzero if there was an error.
  * The value read, if any, is passed back
  * through the valp parameter.
  */
-static int
-pcibr_probe_slot_pic(bridge_t *bridge,
-                 cfg_p cfg,
-                 unsigned *valp)
-{
-	int rv;
-	picreg_t p_old_enable = (picreg_t)0, p_new_enable;
-	extern int badaddr_val(volatile void *, int, volatile void *);
-
-	p_old_enable = bridge->p_int_enable_64;
-	p_new_enable = p_old_enable & ~(BRIDGE_IMR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT);
-	bridge->p_int_enable_64 = p_new_enable;
-
-	if (bridge->p_err_int_view_64 & (BRIDGE_ISR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT))
-		bridge->p_int_rst_stat_64 = BRIDGE_IRR_MULTI_CLR;
-
-	if (bridge->p_int_status_64 & (BRIDGE_IRR_PCI_GRP | PIC_PCIX_GRP_CLR)) {
-		bridge->p_int_rst_stat_64 = (BRIDGE_IRR_PCI_GRP_CLR | PIC_PCIX_GRP_CLR);
-		(void) bridge->b_wid_tflush;	/* flushbus */
-	}
-	rv = badaddr_val((void *) cfg, 4, valp);
-	if (bridge->p_err_int_view_64 & (BRIDGE_ISR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT)) {
-		bridge->p_int_rst_stat_64 = BRIDGE_IRR_MULTI_CLR;
-		rv = 1;         /* unoccupied slot */
+int
+pcibr_probe_slot(pci_bridge_t *bridge,
+		 cfg_p cfg,
+		 unsigned *valp)
+{
+    return pcibr_probe_work(bridge, (void *)cfg, 4, (void *)valp);
+}
+
+/*
+ * Probe an offset within a piomap with errors disabled.
+ * len must be 1, 2, 4, or 8.  	The probed address must be a multiple of
+ * len.
+ *
+ * Returns:	0	if the offset was probed and put valid data in valp
+ *		-1	if there was a usage error such as improper alignment
+ *			or out of bounds offset/len combination.  In this
+ *			case, the map was not probed
+ *		1 	if the offset was probed but resulted in an error
+ *			such as device not responding, bus error, etc.
+ */
+
+int
+pcibr_piomap_probe(pcibr_piomap_t piomap, off_t offset, int len, void *valp)
+{
+	if (offset + len > piomap->bp_mapsz) {
+		return -1;
 	}
-	bridge->p_int_enable_64 = p_old_enable;
-	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
-	return(rv);
+
+	return pcibr_probe_work(piomap->bp_soft->bs_base,
+				piomap->bp_kvaddr + offset, len, valp);
 }
 
 /*
- * pcibr_probe_slot: read a config space word
- * while trapping any errors; return zero if
+ * pcibr_probe_probe: read an address
+ * while trapping any errors; reutrn zero if
  * all went OK, or nonzero if there was an error.
+ * Errors are further divided into <0 for usage
+ * errors and >0 for access errors.
+ *
+ * Len must be 1, 2, 4, or 8, and addr must be aligned on len.
+ *
  * The value read, if any, is passed back
  * through the valp parameter.
  */
 static int
-pcibr_probe_slot(bridge_t *bridge,
-		 cfg_p cfg,
-		 unsigned *valp)
+pcibr_probe_work(pci_bridge_t *bridge,
+		 void *addr,
+		 int len,
+		 void *valp)
 {
-    return(pcibr_probe_slot_pic(bridge, cfg, valp));
+    int			rv, changed;
+
+    /*
+     * Sanity checks ...
+     */
+
+    if (len != 1 && len != 2 && len != 4 && len != 8) {
+	return -1;				/* invalid len */
+    }
+
+    if ((uint64_t)addr & (len-1)) {
+	return -1;				/* invalid alignment */
+    }
+
+    changed = pcibr_disable_mst_timeout_work(bridge);
+
+    rv = snia_badaddr_val((void *)addr, len, valp);
+
+    /* Clear the int_view register incase it was set */
+    pcireg_intr_reset_set(bridge, PCIBR_IRR_MULTI_CLR);
+
+    if (changed) {
+    	pcibr_enable_mst_timeout_work(bridge);
+    }
+
+    return (rv ? 1 : 0);	/* return 1 for snia_badaddr_val error, 0 if ok */
+}
+
+uint64_t
+pcibr_disable_mst_timeout(vertex_hdl_t pcibr_vhdl)
+{
+	pcibr_soft_t	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+	return pcibr_disable_mst_timeout_work(pcibr_soft->bs_base);
+}
+
+int
+pcibr_enable_mst_timeout(vertex_hdl_t pcibr_vhdl)
+{
+	pcibr_soft_t	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+	return pcibr_enable_mst_timeout_work(pcibr_soft->bs_base);
 }
 
+static uint64_t
+pcibr_disable_mst_timeout_work(pci_bridge_t *bridge)
+{
+    uint64_t		old_enable;
+    uint64_t		new_enable;
+    uint64_t		intr_bits;
+
+    intr_bits = PCIBR_ISR_PCI_MST_TIMEOUT | PCIBR_ISR_PCIX_MTOUT;
+    old_enable = pcireg_intr_enable_get(bridge);
+    pcireg_intr_enable_bit_clr(bridge, intr_bits);
+    new_enable = pcireg_intr_enable_get(bridge);
+
+    if (old_enable == new_enable) {
+	return 0;		/* was already disabled */
+    } else {
+	return 1;
+    }
+}
+
+static int
+pcibr_enable_mst_timeout_work(pci_bridge_t *bridge)
+{
+    uint64_t		old_enable;
+    uint64_t		new_enable;
+    uint64_t		intr_bits;
+    
+    intr_bits = PCIBR_ISR_PCI_MST_TIMEOUT | PCIBR_ISR_PCIX_MTOUT;
+    old_enable = pcireg_intr_enable_get(bridge);
+    pcireg_intr_enable_bit_set(bridge, intr_bits);
+    new_enable = pcireg_intr_enable_get(bridge);
+
+    if (old_enable == new_enable) {
+	return 0;		/* was alread enabled */
+    } else {
+	return 1;
+    }
+}
 
 void
 pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
@@ -1785,12 +2154,12 @@
     pcibr_info_t	pcibr_info;
     pciio_function_t	func;
     pcibr_soft_slot_t	slotp = &pcibr_soft->bs_slot[slot];
-    bridge_t           *bridge = pcibr_soft->bs_base; 
+    pci_bridge_t       *bridge = pcibr_soft->bs_base;
     cfg_p               cfgw;
     int			nfunc = slotp->bss_ninfo;
     int                 bar;
     int                 devio_index;
-    int                 s;
+    unsigned long	s;
     unsigned            cmd_reg;
 
 
@@ -1813,7 +2182,7 @@
                 continue;
 
             /* Free the PCI bus space */
-            pciibr_bus_addr_free(pcibr_soft, &pcibr_info->f_window[bar]);
+            pcibr_bus_addr_free(&pcibr_info->f_window[bar]);
 
             /* Get index of the DevIO(x) register used to access this BAR */
             devio_index = pcibr_info->f_window[bar].w_devio_index;
@@ -1831,7 +2200,7 @@
 
         /* Free the Expansion ROM PCI bus space */
 	if(pcibr_info->f_rbase && pcibr_info->f_rsize) {
-            pciibr_bus_addr_free(pcibr_soft, &pcibr_info->f_rwindow);
+            pcibr_bus_addr_free(&pcibr_info->f_rwindow);
         }
 
         pcibr_unlock(pcibr_soft, s);
@@ -1853,12 +2222,6 @@
     slotp->bss_d64_flags = 0;
     slotp->bss_d32_base = PCIBR_D32_BASE_UNSET;
     slotp->bss_d32_flags = 0;
-
-    /* Clear out shadow info necessary for the external SSRAM workaround */
-    slotp->bss_ext_ates_active = ATOMIC_INIT(0);
-    slotp->bss_cmd_pointer = 0;
-    slotp->bss_cmd_shadow = 0;
-
 }
 
 
@@ -1901,9 +2264,9 @@
 
 
 void
-pciibr_bus_addr_free(pcibr_soft_t pcibr_soft, pciio_win_info_t win_info_p)
+pcibr_bus_addr_free(pciio_win_info_t win_info_p)
 {
-	pciio_device_win_free(&win_info_p->w_win_alloc);
+    pciio_device_win_free(&win_info_p->w_win_alloc);
 }
 
 /*
@@ -1916,18 +2279,30 @@
 {
     pcibr_soft_t	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
     xwidgetnum_t	widget = pcibr_soft->bs_xid;
+    cnodeid_t 		cnode = NASID_TO_COMPACT_NODEID(pcibr_soft->bs_nasid);
+    slabid_t		slab = geo_slab((NODEPDA(cnode))->geoid);
     int			bricktype = pcibr_soft->bs_bricktype;
-    int			bus = pcibr_soft->bs_busnum;
+    int			bus;
     
-    /* 
-     * For PIC there are 2 busses per widget and pcibr_soft->bs_busnum
-     * will be 0 or 1.  For [X]BRIDGE there is 1 bus per widget and 
-     * pcibr_soft->bs_busnum will always be zero.  So we add bs_busnum
-     * to what io_brick_map_widget returns to get the bus number.
-     */
-    if ((bus += io_brick_map_widget(bricktype, widget)) > 0) {
-	return bus;
-    } else {
+    if ((bus = io_brick_map_widget(bricktype, widget)) <= 0) {
+	KERN_MSG(K_WARN, "pcibr_widget_to_bus() bad bricktype %d\n", bricktype);
 	return 0;
     }
+
+    /* For PIC there are 2 busses per widget and pcibr_soft->bs_busnum
+     * will be 0 or 1.  Add in the correct PIC bus offset.
+     */
+    if (IS_PIC_SOFT(pcibr_soft)) {
+	bus += pcibr_soft->bs_busnum;
+    }
+
+    /* For TIOCP there maybe more than one TIO in a brick, each TIO is
+     * it's own nasid, each TIO has two busses (CP0 & CP1).  Add in the
+     * correct slab offset.
+     */
+    if (IS_TIOCP_SOFT(pcibr_soft)) {
+	bus += (slab * 2);
+    }
+
+    return bus;
 }
diff -Nru a/arch/ia64/sn/io/sn2/pciio.c b/arch/ia64/sn/io/sn2/pciio.c
--- a/arch/ia64/sn/io/sn2/pciio.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pciio.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,115 +6,11 @@
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/xtalk/xbow.h>	/* Must be before iograph.h to get MAX_PORT_NUM */
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/ioerror_handling.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pciio_private.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
 #include <asm/sn/pci/pci_bus_cvlink.h>
 #include <asm/sn/simulator.h>
 
-#define DEBUG_PCIIO
-#undef DEBUG_PCIIO	/* turn this on for yet more console output */
-
-
 char                    pciio_info_fingerprint[] = "pciio_info";
 
-int
-badaddr_val(volatile void *addr, int len, volatile void *ptr)
-{
-	int ret = 0;
-	volatile void *new_addr;
-
-	switch (len) {
-		case 4:
-			new_addr = (void *) addr;
-			ret = ia64_sn_probe_io_slot((long)new_addr, len, (void *)ptr);
-			break;
-		default:
-			printk(KERN_WARNING "badaddr_val given len %x but supports len of 4 only\n", len);
-	}
-
-	if (ret < 0)
-		panic("badaddr_val: unexpected status (%d) in probing", ret);
-	return(ret);
-
-}
-
-
-nasid_t
-get_console_nasid(void)
-{
-	extern nasid_t console_nasid;
-	extern nasid_t master_baseio_nasid;
-
-	if (console_nasid < 0) {
-		console_nasid = ia64_sn_get_console_nasid();
-		if (console_nasid < 0) {
-// ZZZ What do we do if we don't get a console nasid on the hardware????
-			if (IS_RUNNING_ON_SIMULATOR() )
-				console_nasid = master_baseio_nasid;
-		}
-	} 
-	return console_nasid;
-}
-
-nasid_t
-get_master_baseio_nasid(void)
-{
-	extern nasid_t master_baseio_nasid;
-	extern char master_baseio_wid;
-
-	if (master_baseio_nasid < 0) {
-		master_baseio_nasid = ia64_sn_get_master_baseio_nasid();
-
-		if ( master_baseio_nasid >= 0 ) {
-        		master_baseio_wid = WIDGETID_GET(KL_CONFIG_CH_CONS_INFO(master_baseio_nasid)->memory_base);
-		}
-	} 
-	return master_baseio_nasid;
-}
-
-int
-hub_dma_enabled(vertex_hdl_t xconn_vhdl)
-{
-	return(0);
-}
-
-int
-hub_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
-{
-	return(0);
-}
-
-void
-ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
-{
-}
-
-/******
- ****** end hack defines ......
- ******/
-
-
-
-
 /* =====================================================================
  *    PCI Generic Bus Provider
  * Implement PCI provider operations.  The pciio* layer provides a
@@ -137,97 +32,9 @@
  */
 
 #if !defined(DEV_FUNC)
-static pciio_provider_t *pciio_to_provider_fns(vertex_hdl_t dev);
+extern pciio_provider_t *pciio_to_provider_fns(vertex_hdl_t dev);
 #endif
 
-pciio_piomap_t          pciio_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
-void                    pciio_piomap_free(pciio_piomap_t);
-caddr_t                 pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
-
-void                    pciio_piomap_done(pciio_piomap_t);
-caddr_t                 pciio_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-caddr_t			pciio_pio_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
-
-iopaddr_t               pciio_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
-void                    pciio_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
-
-pciio_dmamap_t          pciio_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
-void                    pciio_dmamap_free(pciio_dmamap_t);
-iopaddr_t               pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-void                    pciio_dmamap_done(pciio_dmamap_t);
-iopaddr_t               pciio_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
-void			pciio_dmamap_drain(pciio_dmamap_t);
-void			pciio_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
-void			pciio_dmalist_drain(vertex_hdl_t, alenlist_t);
-iopaddr_t               pciio_dma_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
-
-pciio_intr_t            pciio_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
-void                    pciio_intr_free(pciio_intr_t);
-int                     pciio_intr_connect(pciio_intr_t, intr_func_t, intr_arg_t);
-void                    pciio_intr_disconnect(pciio_intr_t);
-vertex_hdl_t            pciio_intr_cpu_get(pciio_intr_t);
-
-void			pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
-
-void                    pciio_provider_startup(vertex_hdl_t);
-void                    pciio_provider_shutdown(vertex_hdl_t);
-
-pciio_endian_t          pciio_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
-pciio_priority_t        pciio_priority_set(vertex_hdl_t, pciio_priority_t);
-vertex_hdl_t            pciio_intr_dev_get(pciio_intr_t);
-
-vertex_hdl_t            pciio_pio_dev_get(pciio_piomap_t);
-pciio_slot_t            pciio_pio_slot_get(pciio_piomap_t);
-pciio_space_t           pciio_pio_space_get(pciio_piomap_t);
-iopaddr_t               pciio_pio_pciaddr_get(pciio_piomap_t);
-ulong                   pciio_pio_mapsz_get(pciio_piomap_t);
-caddr_t                 pciio_pio_kvaddr_get(pciio_piomap_t);
-
-vertex_hdl_t            pciio_dma_dev_get(pciio_dmamap_t);
-pciio_slot_t            pciio_dma_slot_get(pciio_dmamap_t);
-
-pciio_info_t            pciio_info_chk(vertex_hdl_t);
-pciio_info_t            pciio_info_get(vertex_hdl_t);
-void                    pciio_info_set(vertex_hdl_t, pciio_info_t);
-vertex_hdl_t            pciio_info_dev_get(pciio_info_t);
-pciio_slot_t            pciio_info_slot_get(pciio_info_t);
-pciio_function_t        pciio_info_function_get(pciio_info_t);
-pciio_vendor_id_t       pciio_info_vendor_id_get(pciio_info_t);
-pciio_device_id_t       pciio_info_device_id_get(pciio_info_t);
-vertex_hdl_t            pciio_info_master_get(pciio_info_t);
-arbitrary_info_t        pciio_info_mfast_get(pciio_info_t);
-pciio_provider_t       *pciio_info_pops_get(pciio_info_t);
-error_handler_f	       *pciio_info_efunc_get(pciio_info_t);
-error_handler_arg_t    *pciio_info_einfo_get(pciio_info_t);
-pciio_space_t		pciio_info_bar_space_get(pciio_info_t, int);
-iopaddr_t		pciio_info_bar_base_get(pciio_info_t, int);
-size_t			pciio_info_bar_size_get(pciio_info_t, int);
-iopaddr_t		pciio_info_rom_base_get(pciio_info_t);
-size_t			pciio_info_rom_size_get(pciio_info_t);
-
-int                     pciio_attach(vertex_hdl_t);
-
-void                    pciio_provider_register(vertex_hdl_t, pciio_provider_t *pciio_fns);
-void                    pciio_provider_unregister(vertex_hdl_t);
-pciio_provider_t       *pciio_provider_fns_get(vertex_hdl_t);
-
-int                     pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
-
-vertex_hdl_t            pciio_device_register(vertex_hdl_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-
-void			pciio_device_unregister(vertex_hdl_t);
-pciio_info_t		pciio_device_info_new(pciio_info_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-void			pciio_device_info_free(pciio_info_t);
-vertex_hdl_t		pciio_device_info_register(vertex_hdl_t, pciio_info_t);
-void			pciio_device_info_unregister(vertex_hdl_t, pciio_info_t);
-int                     pciio_device_attach(vertex_hdl_t, int);
-int			pciio_device_detach(vertex_hdl_t, int);
-void                    pciio_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
-
-int                     pciio_reset(vertex_hdl_t);
-int                     pciio_write_gather_flush(vertex_hdl_t);
-int                     pciio_slot_inuse(vertex_hdl_t);
-
 /* =====================================================================
  *    Provider Function Location
  *
@@ -240,7 +47,7 @@
 
 #if !defined(DEV_FUNC)
 
-static pciio_provider_t *
+pciio_provider_t *
 pciio_to_provider_fns(vertex_hdl_t dev)
 {
     pciio_info_t            card_info;
@@ -261,13 +68,10 @@
 	}
     }
 
-    if (provider_fns == NULL)
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-	PRINT_PANIC("%v: provider_fns == NULL", dev);
-#else
-	PRINT_PANIC("0x%p: provider_fns == NULL", (void *)dev);
-#endif
-
+    if (provider_fns == NULL) {
+	char devname[MAXDEVNAME];
+	panic("%s: provider_fns == NULL", vertex_to_name(dev, devname, MAXDEVNAME));
+    }
     return provider_fns;
 
 }
@@ -396,8 +200,8 @@
 		     size_t byte_count,	/* Size of mapping */
 		     size_t align)
 {					/* Alignment needed */
-    if (align < NBPP)
-	align = NBPP;
+    if (align < PAGE_SIZE)
+	align = PAGE_SIZE;
     return DEV_FUNC(dev, piospace_alloc)
 	(dev, dev_desc, space, byte_count, align);
 }
@@ -753,50 +557,6 @@
 }
 
 /*
- * Specify endianness constraints.  The driver tells us what the device
- * does and how it would like to see things in memory.  We reply with
- * how things will actually appear in memory.
- */
-pciio_endian_t
-pciio_endian_set(vertex_hdl_t dev,
-		 pciio_endian_t device_end,
-		 pciio_endian_t desired_end)
-{
-    ASSERT((device_end == PCIDMA_ENDIAN_BIG) || (device_end == PCIDMA_ENDIAN_LITTLE));
-    ASSERT((desired_end == PCIDMA_ENDIAN_BIG) || (desired_end == PCIDMA_ENDIAN_LITTLE));
-
-#if DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    printk(KERN_ALERT  "%v: pciio_endian_set is going away.\n"
-	    "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
-	    "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
-	    dev);
-#else
-    printk(KERN_ALERT  "0x%x: pciio_endian_set is going away.\n"
-	    "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
-	    "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
-	    dev);
-#endif
-#endif
-
-    return DEV_FUNC(dev, endian_set)
-	(dev, device_end, desired_end);
-}
-
-/*
- * Specify PCI arbitration priority.
- */
-pciio_priority_t
-pciio_priority_set(vertex_hdl_t dev,
-		   pciio_priority_t device_prio)
-{
-    ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
-
-    return DEV_FUNC(dev, priority_set)
-	(dev, device_prio);
-}
-
-/*
  * Read value of configuration register
  */
 uint64_t
@@ -865,71 +625,6 @@
     return DEV_FUNC(dev, reset) (dev);
 }
 
-/*
- * flush write gather buffers
- */
-int
-pciio_write_gather_flush(vertex_hdl_t dev)
-{
-    return DEV_FUNC(dev, write_gather_flush) (dev);
-}
-
-vertex_hdl_t
-pciio_intr_dev_get(pciio_intr_t pciio_intr)
-{
-    return (pciio_intr->pi_dev);
-}
-
-/****** Generic crosstalk pio interfaces ******/
-vertex_hdl_t
-pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_dev);
-}
-
-pciio_slot_t
-pciio_pio_slot_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_slot);
-}
-
-pciio_space_t
-pciio_pio_space_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_space);
-}
-
-iopaddr_t
-pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_pciaddr);
-}
-
-ulong
-pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_mapsz);
-}
-
-caddr_t
-pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
-{
-    return (pciio_piomap->pp_kvaddr);
-}
-
-/****** Generic crosstalk dma interfaces ******/
-vertex_hdl_t
-pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
-{
-    return (pciio_dmamap->pd_dev);
-}
-
-pciio_slot_t
-pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
-{
-    return (pciio_dmamap->pd_slot);
-}
-
 /****** Generic pci slot information interfaces ******/
 
 pciio_info_t
@@ -958,6 +653,25 @@
     return pciio_info;
 }
 
+/*
+ * Given a vertex_hdl_t, return the pciio_info_t from the host device - that
+ * is, the device plugged into the slot on the host bus.
+ */
+
+pciio_info_t
+pciio_hostinfo_get(vertex_hdl_t pciio)
+{
+	pciio_info_t	pciio_info;
+
+	pciio_info = pciio_info_get(pciio);
+	if (pciio_info && (pciio_info->c_vertex != pciio_info->c_hostvertex)) {
+		pciio_info = pciio_info_get(pciio_info->c_hostvertex);
+	}
+
+	return pciio_info;
+}
+
+
 void
 pciio_info_set(vertex_hdl_t pciio, pciio_info_t pciio_info)
 {
@@ -979,12 +693,34 @@
     return (pciio_info->c_vertex);
 }
 
+vertex_hdl_t
+pciio_info_hostdev_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_hostvertex);
+}
+
+/*ARGSUSED*/
+pciio_bus_t
+pciio_info_bus_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_bus);
+}
+
 pciio_slot_t
 pciio_info_slot_get(pciio_info_t pciio_info)
 {
     return (pciio_info->c_slot);
 }
 
+pciio_slot_t
+pciio_info_hostslot_get(pciio_info_t pciio_info)
+{
+    vertex_hdl_t	host_vhdl = pciio_info_hostdev_get(pciio_info);
+    pciio_info_t	host_info = pciio_info_get(host_vhdl);
+
+    return (pciio_info_slot_get(host_info));
+}
+
 pciio_function_t
 pciio_info_function_get(pciio_info_t pciio_info)
 {
@@ -1021,49 +757,30 @@
     return (pciio_info->c_pops);
 }
 
-error_handler_f	       *
-pciio_info_efunc_get(pciio_info_t pciio_info)
-{
-    return (pciio_info->c_efunc);
-}
-
-error_handler_arg_t    *
-pciio_info_einfo_get(pciio_info_t pciio_info)
-{
-    return (pciio_info->c_einfo);
-}
-
-pciio_space_t
-pciio_info_bar_space_get(pciio_info_t info, int win)
-{
-    return info->c_window[win].w_space;
-}
-
-iopaddr_t
-pciio_info_bar_base_get(pciio_info_t info, int win)
+int
+pciio_businfo_multi_master_get(pciio_businfo_t businfo)
 {
-    return info->c_window[win].w_base;
+    return businfo->bi_multi_master;
 }
 
-size_t
-pciio_info_bar_size_get(pciio_info_t info, int win)
+pciio_asic_type_t
+pciio_businfo_asic_type_get(pciio_businfo_t businfo)
 {
-    return info->c_window[win].w_size;
+    return businfo->bi_asic_type;
 }
 
-iopaddr_t
-pciio_info_rom_base_get(pciio_info_t info)
+pciio_bus_type_t
+pciio_businfo_bus_type_get(pciio_businfo_t businfo)
 {
-    return info->c_rbase;
+    return businfo->bi_bus_type;
 }
 
-size_t
-pciio_info_rom_size_get(pciio_info_t info)
+pciio_bus_speed_t
+pciio_businfo_bus_speed_get(pciio_businfo_t businfo)
 {
-    return info->c_rsize;
+    return businfo->bi_bus_speed;
 }
 
-
 /* =====================================================================
  *          GENERIC PCI INITIALIZATION FUNCTIONS
  */
@@ -1077,11 +794,8 @@
 pciio_attach(vertex_hdl_t pciio)
 {
 #if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    printk("%v: pciio_attach\n", pciio);
-#else
-    printk("0x%x: pciio_attach\n", pciio);
-#endif
+    char devname[MAXDEVNAME];
+    printk("%s: pciio_attach\n", vertex_to_name(pciio, devname, MAXDEVNAME));
 #endif
     return 0;
 }
@@ -1119,37 +833,6 @@
     return (pciio_provider_t *) ainfo;
 }
 
-/*ARGSUSED4 */
-int
-pciio_driver_register(
-			 pciio_vendor_id_t vendor_id,
-			 pciio_device_id_t device_id,
-			 char *driver_prefix,
-			 unsigned flags)
-{
-	return(0);
-}
-
-vertex_hdl_t
-pciio_device_register(
-		vertex_hdl_t connectpt,	/* vertex for /hw/.../pciio/%d */
-		vertex_hdl_t master,	/* card's master ASIC (PCI provider) */
-		pciio_slot_t slot,	/* card's slot */
-		pciio_function_t func,	/* card's func */
-		pciio_vendor_id_t vendor_id,
-		pciio_device_id_t device_id)
-{
-    return pciio_device_info_register
-	(connectpt, pciio_device_info_new (NULL, master, slot, func,
-					   vendor_id, device_id));
-}
-
-void
-pciio_device_unregister(vertex_hdl_t pconn)
-{
-    DEV_FUNC(pconn,device_unregister)(pconn);
-}
-
 pciio_info_t
 pciio_device_info_new(
 		pciio_info_t pciio_info,
@@ -1173,6 +856,17 @@
     pciio_info->c_efunc = 0;
     pciio_info->c_einfo = 0;
 
+    /*
+     * PPB support fields
+     */
+
+    pciio_info->c_forw = NULL;
+    pciio_info->c_back = NULL;
+    pciio_info->c_ppb = NULL;
+    pciio_info->c_parent_ppb = NULL;
+    pciio_info->c_hostvertex = GRAPH_VERTEX_NONE;
+    pciio_info->c_type1 = 0;
+
     return pciio_info;
 }
 
@@ -1182,7 +876,7 @@
     /* NOTE : pciio_info is a structure within the pcibr_info
      *	      and not a pointer to memory allocated on the heap !!
      */
-    BZERO((char *)pciio_info,sizeof(pciio_info));
+    memset((char *)pciio_info, 0, sizeof(pciio_info));
 }
 
 vertex_hdl_t
@@ -1203,6 +897,7 @@
 	return pconn;
 
     pciio_info->c_vertex = pconn;
+    pciio_info->c_hostvertex = pconn;	/* ppb code modifies this */
     pciio_info_set(pconn, pciio_info);
 
     /*
@@ -1293,14 +988,26 @@
 {
 
 	struct resource *new_res;
-	int status = 0;
-
-	new_res = (struct resource *) kmalloc( sizeof(struct resource), KM_NOSLEEP);
+	int status;
 
-	status = allocate_resource( root_resource, new_res,
+	new_res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
+	if (!new_res)
+		return 0;
+
+	if (start > 0) {
+		status = allocate_resource( root_resource, new_res,
+			size, start /* Min start addr. */,
+			(start + size) - 1, 1,
+			NULL, NULL);
+	} else {
+		if (size > align)
+			align = size;
+		status = allocate_resource( root_resource, new_res,
 				    size, align /* Min start addr. */,
 				    root_resource->end, align,
 				    NULL, NULL);
+	}
+
 	if (status) {
 		kfree(new_res);
 		return((iopaddr_t) NULL);
@@ -1326,8 +1033,7 @@
 void
 pciio_device_win_free(pciio_win_alloc_t win_alloc)
 {
-
-	int status = 0;
+	int status;
 
 	if (win_alloc->wa_resource) {
 		status = release_resource(win_alloc->wa_resource);
@@ -1389,47 +1095,21 @@
 }
 
 int
-pciio_dma_enabled(vertex_hdl_t pconn_vhdl)
-{
-	return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
-}
-
-int
 pciio_info_type1_get(pciio_info_t pci_info)
 {
-	return(0);
+	return (pci_info->c_type1);
 }
 
-/*
- *  XXX: should probably be called __sn2_pci_rrb_alloc
- */
-/* used by qla1280 */
-int
-snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
-	int *count_vchan0,
-	int *count_vchan1)
+pciio_businfo_t
+pciio_businfo_get(vertex_hdl_t conn)
 {
-	vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+	pciio_info_t		info;
+	extern pciio_businfo_t	pciio_ppb_businfo_get(vertex_hdl_t);
 
-	return pcibr_rrb_alloc(dev, count_vchan0, count_vchan1);
-}
-EXPORT_SYMBOL(snia_pcibr_rrb_alloc);
-
-/* 
- * XXX: interface should be more like
- *
- *	int __sn2_pci_enable_bwswap(struct pci_dev *dev);
- *	void __sn2_pci_disable_bswap(struct pci_dev *dev);
- */
-/* used by ioc4 ide */
-pciio_endian_t
-snia_pciio_endian_set(struct pci_dev *pci_dev,
-	pciio_endian_t device_end,
-	pciio_endian_t desired_end)
-{
-	vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
-	
-	return DEV_FUNC(dev, endian_set)
-		(dev, device_end, desired_end);
+	info = pciio_info_get(conn);
+	if (info->c_parent_ppb) {
+		return pciio_ppb_businfo_get(conn);
+	} else {
+		return DEV_FUNC(conn, businfo_get)(conn);
+	}
 }
-EXPORT_SYMBOL(snia_pciio_endian_set);
diff -Nru a/arch/ia64/sn/io/sn2/pciio_ppb.c b/arch/ia64/sn/io/sn2/pciio_ppb.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/pciio_ppb.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,1550 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * ###maule:  Need some work on this comment block yet ...
+ *
+ * Fairly generic PCI-PCI Bridge (PPB) driver module.
+ * 
+ * This driver implements provider-independent PPB support for Irix.
+ *
+ * Goals
+ * =====
+ * Transparantly provide pciio services to PCI devices downstream of a PPB
+ * hierarchy.
+ *
+ * Implement as a provider-independent module.
+ *
+ * Design
+ * ======
+ * 
+ *
+ * For the most part, devices behind a PPB are used in the same way as devices
+ * directly connected to a host bus.  Drivers are written using the guidelines
+ * set forth in the device drivers programming guide.  The most notable
+ * exception is that some providers might not allow config space pio maps to
+ * devices residing on secondary busses because of shared address space when
+ * accessing these devices.
+ *
+ * Other restrictions might come into play if devices behind a PPB request
+ * resources or mappings which are not compatable with each other when you
+ * consider that all subordinate devices originate at a common host slot.  An
+ * example might be interrupt routing using DEVICE_ADMIN statements.
+ *
+ * All subordinate bus numbers in the system come from a shared pool, at start
+ * at 16.  Bus numbers < 16 are left to the providers.  A future modification to
+ * this code might move bus number allocation to be a per-provider function.
+ *
+ * While this code should be provider-independent, no attempt was made to 
+ * retrofit this to O2.  The O2 already had PPB support, and it did not seem 
+ * worth the risk to break customer code that might relay on how O2 works.
+ *
+ * No attempt was made to restrict the configuration for the well-known
+ * bridge/xbridge issue of RRB thrashing when there are multiple PCI masters
+ * doing reads from a single bridge.  The reasoning is that there is no easy way
+ * to tell if downstream PCI masters intend to do host DMA or not.  It is
+ * perfectly ok to allow multiple masters doing device-to-device traffic.
+ *
+ * Provider implementations must make the following accomodations for this
+ * driver to work correctly:
+ *
+ * 	When accessing config space (pciio_config_get()/pciio_config_set())
+ *	the provider should examine the pciio_info_t of the passed vhdl
+ *	to determine if TYPE1 addressing should be used.  This can be checked
+ *	by pciio_info_type1_get() returning non-zero.  This means that
+ *      the bus/slot/function to be used are encoded in the address
+ *	to be read/written, and the macros PCI_TYPE1_BUS,
+ *	PCI_TYPE1_SLOT, and PCI_TYPE1_FUNC should be used to extract
+ * 	the correct values.
+ *
+ *	In addition, if the provider stores non-zero bus numbers for the host
+ *	PCI bus in a device's pciio_info_t->c_bus (as bridge/xbridge do), the
+ * 	provider code must determine if the register to be read resides on the
+ *	host bus (actually PCI bus 0), or a subordinate bus (PCI bus != 0).
+ *	pcibr accomplishes this by checking the pciio_info c_vertex against
+ *	c_hostvertex.  If they are the same, the device is on the host bus, and
+ *	this is PCI bus 0.  If not, the device is on a subordinate bus, and the
+ *	bus number can be taken from c_bus.
+ *
+ *	Providers must decide if it is appropriate for pio maps to be 
+ *	constructed for config space of devices on subordinate busses.  For
+ *	bridge/xbridge, this should not be allowed because all devices on 
+ *	subordinate busses share the same address space (the bus/slot to use is
+ * 	programmed in a bridge/xbridge register).
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/xtalk/xbow.h>  /* Must be before iograph.h to get MAX_PORT_NUM */
+#include <asm/sn/iograph.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pciio_private.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+#include <asm/sn/simulator.h>
+
+#ifndef NEW
+#define NEW(x)	((x) = kmem_zalloc(sizeof(*(x)), KM_SLEEP))
+#define DEL(x)	kmem_free((x), sizeof(*(x)))
+#endif
+
+#if DEBUG
+#define PPB_DEBUG	1
+#endif
+
+static int ppb_scan(vertex_hdl_t);
+static int do_ppb_scan(vertex_hdl_t slot_vhdl);
+static int ppb_function_init(pciio_info_t);
+static void ppb_function_print(pciio_info_t, char *);
+static int ppb_addr_space_assign(pciio_info_t);
+static int function_addr_space_assign(pciio_info_t);
+static int ppb_device_init(pciio_ppb_t, pciio_slot_t);
+static void ppb_link(pciio_info_t, pciio_ppb_t);
+static void ppb_bus_reserve(pciio_bus_t);
+static void ppb_bus_free(pciio_bus_t);
+static pciio_bus_t ppb_bus_alloc(void);
+static void ppb_attach_thread(pciio_info_t);
+static void ppb_attach_function(pciio_info_t);
+static uint64_t ppb_roundup_log2(uint64_t val);
+
+int pciio_intr_remap(pciio_info_t, int);	/* used in pciio.c */
+int ppb_showconfig = 0;				/* debug */
+
+typedef int (st_func_t)(void *);
+
+extern uint64_t pcibr_config_get_safe(vertex_hdl_t, unsigned, unsigned);
+
+static struct semaphore pciio_ppb_sema;
+
+#define ROUND(x, mult)				\
+	(((mult) == 0) ? (x) :			\
+	(((x) + ((mult)-1)) / (mult)) * (mult))
+
+#define ROUNDUP(var, mult)	((var) = ROUND((var), (mult)))
+
+/*
+ * Convenience macro for deciding if a device is a PPB or not.
+ *
+ * Note:  This check might not be 100% accurate, as I don't know if devices
+ * with a subclass of PCI_CFG_SUBCLASS_BRIDGE_HOST are *required* to follow
+ * the PCI Header Type 1 format.  We currently include that check to allow
+ * for the Broadcom BCM-1250 HyperTransport bridge to leverage this code for
+ * its initialization and bus scanning.  It is possible that this check will
+ * change in the future.  (maule, 10/21/2002)
+ */
+
+#define IS_PPB(class, subclass) \
+	((class) == PCI_CFG_CLASS_BRIDGE && \
+	 ((subclass) == PCI_CFG_SUBCLASS_BRIDGE_HOST || \
+	  (subclass) == PCI_CFG_SUBCLASS_BRIDGE_PCI))
+
+static int
+ppb_scan(vertex_hdl_t slot_vhdl)
+{
+	int		 rval;
+	pciio_info_t	 pci_info;
+	pciio_ppb_t	 ppb_info;
+	pciio_businfo_t  businfo;
+
+	/*
+ 	 * Make a preliminary scan of the bus allocating subordiniate bus
+	 * numbers, and collecting BAR sizing information.
+	 *
+	 * Need to single-thread around do_ppb_scan() so that bus number
+	 * allocations are done correctly.  Since we're not expecting alot
+	 * of ppb scan contention, seems like that should be ok.
+	 */
+
+	down(&pciio_ppb_sema);
+	rval = do_ppb_scan(slot_vhdl);
+	up(&pciio_ppb_sema);
+
+	if (rval != 0) {
+		return rval;
+	}
+
+	/*
+	 * Check Master restrictions
+	 */
+
+	pci_info = pciio_info_get(slot_vhdl);
+	businfo = pciio_businfo_get(slot_vhdl);
+	ppb_info = pci_info->c_ppb;
+
+	if ((ppb_info->b_master_sub > 1) &&
+	    (PCIIO_GET_MULTI_MASTER(businfo) == 0)) {
+		KERN_MSG(K_WARN,
+			"pciio_ppb:  Host PCI slot 0x%p", (void *)slot_vhdl);
+		KERN_MSG(K_CONT,
+			"    does not support having multiple dma masters downstream.\n");
+		KERN_MSG(K_CONT,
+			"    No devices behind that slot are being initialized.  This behavior\n");
+		KERN_MSG(K_CONT,
+			"    can be overridden by setting the kernel mtune pciio_multimaster_override.\n");
+
+		return -1;
+	}
+
+	/*
+	 * Allocate address spaces
+	 */
+
+	if ((ppb_info = pci_info->c_ppb)) {
+		if (ppb_info->b_io.size) {
+			ppb_info->b_io.size =
+				ppb_roundup_log2(ppb_info->b_io.size);
+
+#if PPB_DEBUG
+			KERN_MSG(K_NOTE,
+				"ppb_scan 0x%x:  allocating 0x%x bytes "
+				"from PCIIO_SPACE_IO\n",
+				slot_vhdl, ppb_info->b_io.size);
+#endif
+			
+			ppb_info->b_io.base = pciio_piospace_alloc(slot_vhdl,
+							NULL,
+							PCIIO_SPACE_IO,
+							ppb_info->b_io.size,
+							ppb_info->b_io.size);
+			if (! ppb_info->b_io.base) {
+				KERN_MSG(K_WARN,
+					"Could not allocate %ld bytes of PCI "
+					"I/O space for PPB 0x%p\n",
+					ppb_info->b_io.size, (void *)slot_vhdl);
+				goto err;
+			}
+		}
+
+		if (ppb_info->b_mem.size) {
+			ppb_info->b_mem.size =
+				ppb_roundup_log2(ppb_info->b_mem.size);
+#if PPB_DEBUG
+			KERN_MSG(K_NOTE,
+				"ppb_scan 0x%x:  allocating 0x%x bytes "
+				"from PCIIO_SPACE_MEM32\n",
+				slot_vhdl, ppb_info->b_mem.size);
+#endif
+
+			ppb_info->b_mem.base = pciio_piospace_alloc(slot_vhdl,
+							NULL,
+							PCIIO_SPACE_MEM32,
+							ppb_info->b_mem.size,
+							ppb_info->b_mem.size);
+			if (! ppb_info->b_mem.base) {
+				KERN_MSG(K_WARN,
+					"Could not allocate %ld bytes of PCI "
+					"MEM space for PPB 0x%p\n",
+					ppb_info->b_mem.size, (void *)slot_vhdl);
+				goto err;
+			}
+		}
+
+		if (ppb_info->b_mempf.size) {
+			ppb_info->b_mempf.size =
+				ppb_roundup_log2(ppb_info->b_mempf.size);
+#if PPB_DEBUG
+			KERN_MSG(K_NOTE,
+				"ppb_scan 0x%x:  allocating 0x%x bytes "
+				"from PCIIO_SPACE_MEM\n",
+				slot_vhdl, ppb_info->b_mempf.size);
+#endif
+
+			ppb_info->b_mempf.base = pciio_piospace_alloc(slot_vhdl,
+							NULL,
+							PCIIO_SPACE_MEM32,
+							ppb_info->b_mempf.size,
+							ppb_info->b_mempf.size);
+			if (! ppb_info->b_mempf.base) {
+				KERN_MSG(K_WARN,
+					"Could not allocate %ld bytes of PCI "
+					"MEM space (prefetch) for PPB %p\n",
+					ppb_info->b_mempf.size, (void *)slot_vhdl);
+				goto err;
+			}
+		}
+	}
+
+	return 0;
+
+err:
+
+	if (ppb_info->b_io.base) {
+		pciio_piospace_free(slot_vhdl, PCIIO_SPACE_IO,
+				ppb_info->b_io.base, ppb_info->b_io.size);
+	}
+
+	if (ppb_info->b_mem.base) {
+		pciio_piospace_free(slot_vhdl, PCIIO_SPACE_MEM32,
+				ppb_info->b_mem.base, ppb_info->b_mem.size);
+	}
+
+	if (ppb_info->b_mempf.base) {
+		pciio_piospace_free(slot_vhdl, PCIIO_SPACE_MEM32,
+				ppb_info->b_mempf.base, ppb_info->b_mempf.size);
+	}
+
+	return -1;
+}
+
+static int
+do_ppb_scan(vertex_hdl_t slot_vhdl)
+{
+	uint16_t vid;
+	char ppb_name[16];
+	pciio_ppb_t ppb_tmp;
+
+	pciio_bus_t bus;
+	pciio_bus_t newbus;
+	pciio_slot_t localslot;
+	pciio_info_t slot_pci_info;
+	pciio_ppb_t ppb_info;
+	vertex_hdl_t ppb_vhdl;
+	pciio_info_t ppb_pci_info;
+
+	/*
+	 * Allocate next bus number from global pool.
+	 */
+
+	slot_pci_info = pciio_info_get(slot_vhdl);
+	bus = pciio_info_bus_get(slot_pci_info);
+	newbus = ppb_bus_alloc();
+
+	NEW(ppb_info);		/* zeros storage */
+
+	ppb_info->b_primary = bus;
+	pciio_config_set(slot_vhdl, PCI_CFG_PPB_BUS_PRI, 1, bus);
+	ppb_info->b_secondary = newbus;
+	pciio_config_set(slot_vhdl, PCI_CFG_PPB_BUS_SEC, 1, newbus);
+
+	/*
+	 * Link ppb_info to the pciio_info_t associated with slot_vhdl
+	 */
+
+	ppb_info->b_pciio = slot_pci_info;
+	slot_pci_info->c_ppb = ppb_info;
+
+	/*
+	 * Add a hwgraph component of the form "ppb_X" where X is the
+	 * primary bus number.
+	 */
+
+	sprintf(ppb_name, "ppb_%d", ppb_info->b_secondary);
+	if (hwgraph_path_add(slot_vhdl,
+			     ppb_name, &ppb_vhdl) != GRAPH_SUCCESS) {
+		KERN_MSG(K_WARN,
+			"Could not add ppb name %s under vhdl 0x%p\n",
+			ppb_name, (void *)ppb_vhdl);
+		DEL(ppb_info);
+		ppb_bus_free(newbus);
+		return -1;
+	}
+	hwgraph_vertex_unref(ppb_vhdl);
+	ppb_info->b_vhdl = ppb_vhdl;
+
+	/*
+	 * Duplicate slot_vhdl's pciio_info_t on this vhdl, changing the
+	 * bus number.
+	 */
+
+	NEW(ppb_pci_info);
+	*ppb_pci_info = *slot_pci_info;
+	ppb_pci_info->c_bus = PCIIO_BUS_NONE;
+	ppb_pci_info->c_slot = PCIIO_SLOT_NONE;
+	ppb_pci_info->c_func = PCIIO_FUNC_NONE;
+	ppb_pci_info->c_vertex = ppb_vhdl;
+	ppb_pci_info->c_type1 = 1;
+	pciio_info_set(ppb_vhdl, ppb_pci_info);
+
+	/*
+	 * Adjust upstream subordinate bus numbers
+	 */
+
+	ppb_tmp = ppb_info;
+	do {
+		vertex_hdl_t tmp_vhdl;
+
+		ppb_tmp->b_subordinate = newbus;
+		tmp_vhdl = pciio_info_dev_get(ppb_tmp->b_pciio);
+
+		pciio_config_set(tmp_vhdl, PCI_CFG_PPB_BUS_SUB, 1, newbus);
+
+		ppb_tmp = ppb_tmp->b_pciio->c_parent_ppb;
+	} while (ppb_tmp != NULL);
+
+	/*
+	 * Scan all slots 
+	 */
+
+	for (localslot = 0; localslot < 32; localslot++) {
+		vid = pcibr_config_get_safe(ppb_vhdl,
+			PCI_TYPE1(newbus, localslot, 0, PCI_CFG_VENDOR_ID), 2);
+		if (vid == 0xffff) {
+			continue;
+		}
+
+		ppb_device_init(ppb_info, localslot);
+	}
+
+	/*
+	 * Round ppb sizes up to appropriate bounds
+	 */
+
+	ROUNDUP(ppb_info->b_io.size, PCI_PPB_IO_ALIGN);
+	ROUNDUP(ppb_info->b_mem.size, PCI_PPB_MEM_ALIGN);
+	ROUNDUP(ppb_info->b_mempf.size, PCI_PPB_MEMPF_ALIGN);
+
+#if PPB_DEBUG
+	KERN_MSG(K_NOTE, "vhdl 0x%x - b_io 0x%x b_mem 0x%x b_mempf 0x%x\n",
+		slot_vhdl, ppb_info->b_io.size,
+		ppb_info->b_mem.size, ppb_info->b_mempf.size);
+#endif
+
+	return 0;
+}
+
+static int
+ppb_device_init(pciio_ppb_t ppb, pciio_slot_t localslot)
+{
+	uint16_t		vid;
+	uint16_t		devid;
+	uint8_t			htype;
+	pciio_bus_t		bus;
+	pciio_function_t	func;
+	pciio_function_t	nfunc;
+	vertex_hdl_t		connectpt;
+	vertex_hdl_t		master;
+	vertex_hdl_t		ppb_vhdl;
+	pciio_info_t		func_pciio_info;
+
+	bus = ppb->b_secondary;
+
+	ppb_vhdl = ppb->b_vhdl;
+	connectpt = ppb_vhdl;
+	master = pciio_info_master_get(ppb->b_pciio);
+
+	vid = pciio_config_get(ppb_vhdl,
+			PCI_TYPE1(bus, localslot, 0, PCI_CFG_VENDOR_ID), 2);
+	devid = pciio_config_get(ppb_vhdl,
+			PCI_TYPE1(bus, localslot, 0, PCI_CFG_DEVICE_ID), 2);
+	htype = pciio_config_get(ppb_vhdl,
+			PCI_TYPE1(bus, localslot, 0, PCI_CFG_HEADER_TYPE), 1);
+
+	nfunc = (htype & 0x80) ? 8 : 1;
+	for (func = 0; func < nfunc; func++) {
+		vid = pcibr_config_get_safe(ppb_vhdl,
+			PCI_TYPE1(bus, localslot, func, PCI_CFG_VENDOR_ID), 2);
+		if (vid == 0xffff) {
+			continue;
+		}
+		devid = pciio_config_get(ppb_vhdl,
+			PCI_TYPE1(bus, localslot, func, PCI_CFG_DEVICE_ID), 2);
+
+		func_pciio_info = pciio_device_info_new(NULL,
+					master, localslot,
+					nfunc == 1 ? PCIIO_FUNC_NONE : func,
+					vid, devid);
+
+		(void)pciio_device_info_register(connectpt, func_pciio_info);
+
+		/*
+		 * ###maule:  Order here is important - assign c_hostvertex
+		 * after pciio_device_info_register() because the function
+		 * assigns c_hostvertex as c_vertex by default.  Need to clean
+		 * this up.
+		 */
+
+		func_pciio_info->c_hostvertex = ppb->b_pciio->c_hostvertex;
+		func_pciio_info->c_bus = bus;
+		ppb_link(func_pciio_info, ppb);
+
+		ppb_function_init(func_pciio_info);
+	}
+
+	return 0;
+}
+
+/*
+ * Preliminary initialization of a function's config registers.  Does not
+ * establish final BAR addresses, but determines BAR sizes.
+ *
+ * This code was derived from the pcibr code.
+ *
+ * ###Does not handle 64-bit BAR's.
+ *
+ * ###Could make this a generic device initialization called by providers.
+ */
+
+static int
+ppb_function_init(pciio_info_t func_pciio_info)
+{
+	uint8_t class;
+	uint8_t subclass;
+	pciio_slot_t slot;
+	pciio_function_t function;
+	pciio_vendor_id_t vendor;
+	pciio_device_id_t device;
+	uint32_t upper;
+	unsigned htype;
+	unsigned lt_time;
+	int nbars;
+	int win;
+	pciio_space_t space;
+	uint16_t cmd_reg;
+	vertex_hdl_t func_vhdl;
+	pciio_ppb_t parent_ppb;
+	iopaddr_t mask;
+	size_t size;
+
+	func_vhdl = pciio_info_dev_get(func_pciio_info);
+	slot = pciio_info_slot_get(func_pciio_info);
+	function = pciio_info_function_get(func_pciio_info);
+	vendor = pciio_info_vendor_id_get(func_pciio_info);
+	device = pciio_info_device_id_get(func_pciio_info);
+	htype = pciio_config_get(func_vhdl, PCI_CFG_HEADER_TYPE, 1);
+
+	htype &= 0x7f;
+	if (htype == 0x00) {	/* type 0 header */
+		nbars = PCI_CFG_BASE_ADDRS;
+	} else if (htype == 0x01) {	/* type 1 header */
+		nbars = PCI_CFG_PPB_BASE_ADDRS;
+	} else {		/* unknown/unsupported header */
+		KERN_MSG(K_WARN,
+			"ppb_function_init: pci slot %d func %d has unknown "
+			"header type 0x%x\n",
+			slot, function, htype);
+		nbars = 2;
+	}
+
+	/* 
+	 * If the latency timer has already been set, by prom or by the
+	 * card itself, use that value.  Otherwise look at the device's
+	 * 'min_gnt' and attempt to calculate a latency time. 
+	 *
+	 * NOTE: For now if the device is on the 'real time' arbitration
+	 * ring we dont set the latency timer.  
+	 *
+	 * WAR: SGI's RAD devices target abort if you write a 
+	 * single byte into their config space.  So don't set the Latency
+	 * Timer for these devices
+	 */
+
+	lt_time = pciio_config_get(func_vhdl, PCI_CFG_LATENCY_TIMER, 1);
+
+	/*
+	 * ###maule:  what to do about BRIDGE_DEV_RT check - remove it? 
+	 * Probably since prom won't be setting this stuff up anyway.
+	 */
+
+	if ((lt_time == 0) & !(device == 0x5 /*RAD_DEV */ )) {
+		unsigned min_gnt;
+		unsigned min_gnt_mult;
+
+		/* 'min_gnt' indicates how long of a burst period a device
+		 * needs in increments of 250ns.  But latency timer is in
+		 * PCI clock cycles, so a conversion is needed.
+		 */
+
+		min_gnt = pciio_config_get(func_vhdl, PCI_MIN_GNT, 1);
+
+#ifdef UNDEFmaule
+#ifdef SN1
+		/* XXX -FIXME- PIC: Add support for 133MHz, 100MHz,.... */
+		if ((pcibr_soft->bs_xbridge) &&
+		    ((bridge->b_wid_control & BRIDGE_CTRL_BUS_SPEED_MASK) ==
+		     BRIDGE_CTRL_BUS_SPEED_66))
+			min_gnt_mult = 16;	/* 250ns @ 66MHz, in clocks */
+		else
+#endif
+#endif
+			min_gnt_mult = 8;	/* 250ns @ 33MHz, in clocks */
+
+		if ((min_gnt != 0) && ((min_gnt * min_gnt_mult) < 256))
+			lt_time = (min_gnt * min_gnt_mult);
+		else
+			lt_time = 4 * min_gnt_mult;	/* 1 micro second */
+
+		pciio_config_set(func_vhdl, PCI_CFG_LATENCY_TIMER, 1, lt_time);
+	}
+
+	/*
+	 * Determine window sizes
+	 */
+
+	parent_ppb = func_pciio_info->c_parent_ppb;
+	cmd_reg = pciio_config_get(func_vhdl, PCI_CFG_COMMAND, 2);
+
+	for (win = 0; win < nbars; ++win) {
+		iopaddr_t base, code;
+		uchar_t bar_offset;
+
+		bar_offset = PCI_CFG_BASE_ADDR_0 + (win * 4);
+
+		/*
+		 * GET THE BASE & SIZE OF THIS WINDOW:
+		 *
+		 * The low two or four bits of the BASE register
+		 * determines which address space we are in; the
+		 * rest is a base address. BASE registers
+		 * determine windows that are power-of-two sized
+		 * and naturally aligned, so we can get the size
+		 * of a window by writing all-ones to the
+		 * register, reading it back, and seeing which
+		 * bits are used for decode; the least
+		 * significant nonzero bit is also the size of
+		 * the window.
+		 *
+		 * WARNING: someone may already have allocated
+		 * some PCI space to this window, and in fact
+		 * PIO may be in process at this very moment
+		 * from another processor (or even from this
+		 * one, if we get interrupted)! So, if the BASE
+		 * already has a nonzero address, be generous
+		 * and use the LSBit of that address as the
+		 * size; this could overstate the window size.
+		 * Usually, when one card is set up, all are set
+		 * up; so, since we don't bitch about
+		 * overlapping windows, we are ok.
+		 *
+		 * UNFORTUNATELY, some cards do not clear their
+		 * BASE registers on reset. I have two heuristics
+		 * that can detect such cards: first, if the
+		 * decode enable is turned off for the space
+		 * that the window uses, we can disregard the
+		 * initial value. second, if the address is
+		 * outside the range that we use, we can disregard
+		 * it as well.
+		 *
+		 * This is looking very PCI generic. Except for
+		 * knowing how many slots and where their config
+		 * spaces are, this window loop and the next one
+		 * could probably be shared with other PCI host
+		 * adapters. It would be interesting to see if
+		 * this could be pushed up into pciio, when we
+		 * start supporting more PCI providers.
+		 */
+		base = pciio_config_get(func_vhdl, bar_offset, 4);
+
+		if (base & PCI_BA_IO_SPACE) {
+			/* BASE is in I/O space. */
+			space = PCIIO_SPACE_IO;
+			mask = -4;
+			code = base & 3;
+			base = base & mask;
+
+			if (base == 0) {
+				;	/* not assigned */
+			} else if (!(cmd_reg & PCI_CMD_IO_SPACE)) {
+				base = 0;	/* decode not enabled */
+			}
+		} else {
+			/* BASE is in MEM space. */
+			space = PCIIO_SPACE_MEM;
+			mask = -16;
+			code = base & PCI_BA_MEM_CODEMASK;
+			base = base & mask;
+			if (base == 0) {
+				;	/* not assigned */
+			} else if (!(cmd_reg & PCI_CMD_MEM_SPACE)) {
+				base = 0;	/* decode not enabled */
+			} else if (base & 0xC0000000) {
+				base = 0;	/* outside permissable range */
+			} else if ((code & PCI_BA_MEM_LOCATION) == PCI_BA_MEM_64BIT) {
+				upper = pciio_config_get(func_vhdl,
+							 bar_offset + 4, 4);
+				if (upper != 0) {
+					base = 0;	/* outside range */
+				}
+			}
+		}
+
+		if (base != 0) {	/* estimate size */
+			size = base & -base;
+		} else {	/* calculate size */
+			pciio_config_set(func_vhdl, bar_offset, 4, 0xffffffff);
+			size = pciio_config_get(func_vhdl, bar_offset, 4);
+			size &= mask;	/* keep addr */
+			size &= -size;	/* keep lsbit */
+			if (size == 0)
+				continue;
+		}
+
+		/*
+		 * Set various window parameters.  w_base gets set in
+		 * function_addr_space_assign().
+		 */ 
+
+		func_pciio_info->c_window[win].w_space = space;
+		func_pciio_info->c_window[win].w_code = code;
+		func_pciio_info->c_window[win].w_size = size;
+
+#if PPB_DEBUG
+		KERN_MSG(K_NOTE,
+			"ppb_function_init 0x%x - win %d space %d "
+			"code %d size 0x%x", func_vhdl, win, space, code, size);
+#endif
+
+		/*
+		 * Accumulate window space needed on this bus and
+		 * store it with the parent PPB (if there is one).
+		 */
+
+		if (parent_ppb) {
+			pciio_ppbspace_t	*ppbspace;
+
+			if (space == PCIIO_SPACE_IO) {
+				ppbspace = &parent_ppb->b_io;
+			} else if (code & PCI_BA_PREFETCH) {
+				ppbspace = &parent_ppb->b_mempf;
+			} else {
+				ppbspace = &parent_ppb->b_mem;
+			}
+
+			/*
+			 * Add in size, rounding up to size alignment
+			 * first.
+			 */
+
+			ROUNDUP(ppbspace->size, size);
+			ppbspace->size += size;
+		}
+		if (code == PCI_BA_MEM_64BIT) {
+			win++;	/* skip upper half */
+			pciio_config_set(func_vhdl, bar_offset+4, 4, 0);
+		}
+	}
+
+	/*
+	 * Allocate space for the EXPANSION ROM
+	 */
+
+	pciio_config_set(func_vhdl, PCI_EXPANSION_ROM, 4, 0xfffff800);
+	mask = pciio_config_get(func_vhdl, PCI_EXPANSION_ROM, 4);
+	size = mask & -mask;
+
+	func_pciio_info->c_rsize = size;	/* set rbase in */
+						/* function_addr_space_assign */
+
+	if (size && parent_ppb) {
+		ROUNDUP(parent_ppb->b_mem.size, size);
+		parent_ppb->b_mem.size += size;
+	}
+
+	/*
+	 * Determine if this function is DMA Master capable.  Need to set the
+	 * BUS_MASTER bit and see if it stays set in order to make this
+	 * determination.
+	 */
+
+	if (! (cmd_reg & PCI_CMD_BUS_MASTER)) {
+		cmd_reg |= PCI_CMD_BUS_MASTER;
+		pciio_config_set(func_vhdl, PCI_CFG_COMMAND, 2, cmd_reg);
+		cmd_reg = pciio_config_get(func_vhdl, PCI_CFG_COMMAND, 2);
+	}
+
+	class = pciio_config_get(func_vhdl, PCI_CFG_BASE_CLASS, 1);
+	subclass = pciio_config_get(func_vhdl, PCI_CFG_SUB_CLASS, 1);
+
+	/*	
+	 * Account for secondary masters.  Don't count PPB's advertising
+	 * PCI_CMD_BUS_MASTER since they're just mastering DMA on behalf of
+	 * other cards.
+	 */
+
+	if ((cmd_reg & PCI_CMD_BUS_MASTER) && parent_ppb &&
+					!IS_PPB(class, subclass)) {
+		parent_ppb->b_master_sec++;
+		parent_ppb->b_master_sub++;
+	}
+
+	/*
+	 * If this function represents a ppb, dive down into it.
+	 */
+
+	if (IS_PPB(class, subclass)) {
+		do_ppb_scan(func_vhdl);
+
+		/*
+		 * Add in io/mem space required behind the ppb, after
+		 * first rounding up to the alignment required
+		 * by the first bar found on the secondary bus
+		 */
+
+		if (parent_ppb) {
+			pciio_ppb_t	ppb;
+
+			ppb = func_pciio_info->c_ppb;
+			ASSERT(ppb);
+			ASSERT(parent_ppb);
+
+			parent_ppb->b_io.size += ppb->b_io.size;
+			parent_ppb->b_mem.size += ppb->b_mem.size;
+			parent_ppb->b_mempf.size += ppb->b_mempf.size;
+
+			parent_ppb->b_master_sub += ppb->b_master_sub;
+		}
+	}
+
+	return (0);
+}
+
+static int
+ppb_addr_space_assign(pciio_info_t pciio_info)
+{
+	uint32_t	type;
+	uint64_t	base;
+	uint64_t	limit;
+	pciio_info_t	func_info;
+	pciio_ppb_t	ppb_info;
+	vertex_hdl_t	func_vhdl;
+
+	ppb_info = pciio_info->c_ppb;
+	ppb_info->b_io.next = ppb_info->b_io.base;
+	ppb_info->b_mem.next = ppb_info->b_mem.base;
+	ppb_info->b_mempf.next = ppb_info->b_mempf.base;
+
+	func_info = ppb_info->b_fns;
+	while (func_info) {
+		function_addr_space_assign(func_info);
+		func_info = func_info->c_forw;
+	}
+
+	/*
+	 * Init bridge base/limit registers
+	 */
+
+	func_vhdl = pciio_info_dev_get(pciio_info);
+
+	/*
+	 * Set up IO base/limit registers
+	 */
+
+	if (ppb_info->b_io.size) {
+		base = ppb_info->b_io.base;
+		limit = base + ppb_info->b_io.size - 1;
+	} else {
+		base = 0;
+		limit = 0;
+	}
+
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_IOBASE, 1, 
+						(base & 0xf000) >> 8);
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_IOLIM, 1,
+						(limit & 0xf000) >> 8);
+						 
+	type = pciio_config_get(func_vhdl, PCI_CFG_PPB_IOBASE, 1) & 0x0f;
+	if (type == PCI_PPB_IO_AD16) {	/* 16-bit I/O address decode */
+		if (base >> 16 || limit >> 16) {
+			KERN_MSG(K_WARN, "*** ppb IO base/limit conflict\n");
+		}
+	} else {		/* 32-bit I/O address decode */
+		pciio_config_set(func_vhdl, PCI_CFG_PPB_IOBASEHI, 2,
+						base >> 16);
+		pciio_config_set(func_vhdl, PCI_CFG_PPB_IOLIMHI, 2, 
+						limit >> 16);
+	}
+
+	/*
+	 * Set up MEM base/limit registers.
+	 */
+
+	if (ppb_info->b_mem.size) {
+		base = ppb_info->b_mem.base;
+		limit = base + ppb_info->b_mem.size - 1;
+	} else {
+		base = 0;
+		limit = 0;
+	}
+
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMBASE, 2, 
+						(base & 0xfff00000) >> 16);
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMLIM, 2, 
+						(limit & 0xfff00000) >> 16);
+
+	/*
+	 * Set up prefetchable MEM base/limit registers.
+	 */
+
+	if (ppb_info->b_mempf.size) {
+		base = ppb_info->b_mempf.base;
+		limit = base + ppb_info->b_mempf.size - 1;
+	} else {
+		base = 0;
+		limit = 0;
+	}
+
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMPFBASE, 2,
+						(base & 0xfff00000) >> 16);
+	pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMPFLIM, 2,
+						(limit & 0xfff00000) >> 16);
+
+	type = pciio_config_get(func_vhdl, PCI_CFG_PPB_MEMPFBASE, 1) & 0xf;
+	if (type == PCI_PPB_MEMPF_AD32) {	/* 32-bit memory decoder */
+		if (base >> 32 || limit >> 32) {
+			KERN_MSG(K_WARN,
+				"*** ppb prefetch MEM base/limit conflict\n");
+		}
+	} else {		/* 64-bit memory decoder */
+		pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMPFBASEHI, 4,
+						base >> 32);
+		pciio_config_set(func_vhdl, PCI_CFG_PPB_MEMPFLIMHI, 4,
+						limit >> 32);
+	}
+
+	return 0;
+}
+
+static int
+function_addr_space_assign(pciio_info_t func_info)
+{
+	int		i;
+	uint8_t		class;
+	uint8_t		subclass;
+	uint32_t	bar;
+	vertex_hdl_t	func_vhdl;
+	pciio_ppb_t	parent_ppb_info;
+	uint16_t	command_reg;
+
+	func_vhdl = pciio_info_dev_get(func_info);
+	class = pciio_config_get(func_vhdl, PCI_CFG_BASE_CLASS, 1);
+	subclass = pciio_config_get(func_vhdl, PCI_CFG_SUB_CLASS, 1);
+
+	parent_ppb_info = func_info->c_parent_ppb;
+
+	/*
+	 * Assign BAR's
+	 */
+
+	for (i = 0; i < PCI_CFG_BASE_ADDRS; i++) {
+		pciio_ppbspace_t	*ppbspace;
+		pciio_win_info_t	win;
+
+		win = &func_info->c_window[i];
+		if (! win->w_size) {
+			continue;
+		}
+
+		if (win->w_space == PCIIO_SPACE_IO) {
+			ppbspace = &parent_ppb_info->b_io;
+		} else if (win->w_code & PCI_BA_PREFETCH) {
+			ppbspace = &parent_ppb_info->b_mempf;
+		} else {
+			ppbspace = &parent_ppb_info->b_mem;
+		}
+
+		/*
+		 * Round up address to size multiple
+		 */
+
+		ROUNDUP(ppbspace->next, win->w_size);
+
+		bar = pciio_config_get(func_vhdl, PCI_CFG_BASE_ADDR(i), 4);
+
+		pciio_config_set(func_vhdl, PCI_CFG_BASE_ADDR(i),
+					4, ppbspace->next & 0xffffffff);
+
+		/*
+		 * If win represents a 64-bit bar set the upper 32 bits (if
+		 * needed) and advance i.
+		 *
+		 * The upper 32 bits was cleared in ppb_function_init(),
+		 * so no need to write them unless they're actually set.
+		 *
+	 	 * Note:  It appears that at least one card (Tigon 3 by 3com)
+		 * has trouble setting the upper 32 bits.  In at least one
+		 * version of this card, setting the upper 32 bits appears to
+		 * clear the lower 32 bits.
+		 */
+
+		func_info->c_window[i].w_base = ppbspace->next;
+
+		if (bar & PCI_BA_MEM_64BIT) {
+			i++;
+
+			if (ppbspace->next >> 32) {
+				pciio_config_set(func_vhdl,
+						PCI_CFG_BASE_ADDR(i),
+						4, ppbspace->next >> 32);
+			}
+		}
+
+		ppbspace->next += win->w_size;
+	}
+
+	/*
+	 * Assign expansion ROM
+	 */
+
+	if (func_info->c_rsize) {
+		ROUNDUP(parent_ppb_info->b_mem.next, func_info->c_rsize);
+		func_info->c_rbase = parent_ppb_info->b_mem.next;
+
+		/*
+		 * Set the base.  Don't enable the decoder though as some cards
+		 * implement only 1 memory decoder and that can interfere with
+		 * a mem BAR.
+		 */
+
+		pciio_config_set(func_vhdl, PCI_EXPANSION_ROM,
+					4, parent_ppb_info->b_mem.next);
+	}
+
+	/*
+	 * If function is a PPB, dive down into it
+	 */
+
+	if (IS_PPB(class, subclass)) {
+		pciio_ppb_t	ppb_info;
+
+		ppb_info = func_info->c_ppb;
+
+		ppb_info->b_io.base =
+			ROUND(parent_ppb_info->b_io.next, PCI_PPB_IO_ALIGN);
+		ppb_info->b_mem.base =
+			ROUND(parent_ppb_info->b_mem.next, PCI_PPB_MEM_ALIGN);
+		ppb_info->b_mempf.base =
+			ROUND(parent_ppb_info->b_mempf.next, PCI_PPB_MEMPF_ALIGN);
+
+		ppb_addr_space_assign(func_info);
+
+		/*
+		 * Adjust parent io/mem next paramaters up
+		 */
+
+		parent_ppb_info->b_io.next = ppb_info->b_io.next;
+		ROUNDUP(parent_ppb_info->b_io.next, PCI_PPB_IO_ALIGN);
+
+		parent_ppb_info->b_mem.next = ppb_info->b_mem.next;
+		ROUNDUP(parent_ppb_info->b_mem.next, PCI_PPB_MEM_ALIGN);
+
+		parent_ppb_info->b_mempf.next = ppb_info->b_mempf.next;
+		ROUNDUP(parent_ppb_info->b_mempf.next, PCI_PPB_MEMPF_ALIGN);
+	}
+
+	/*
+	 * Enable IO & MEM decoders, and DMA MASTER bit
+	 *
+	 * ###maule:  Should only enable relavent bits?  How about other bits
+	 * like PCI_CMD_SPEC_CYCLES, et. al.
+	 *
+	 * ###maule:  See pcibr_slot.c for special cases and incorporate them
+	 */
+
+	command_reg = pciio_config_get(func_vhdl, PCI_CFG_COMMAND, 2);
+	command_reg |=
+		(PCI_CMD_IO_SPACE | PCI_CMD_MEM_SPACE | PCI_CMD_BUS_MASTER);
+	pciio_config_set(func_vhdl, PCI_CFG_COMMAND, 2, command_reg);
+
+	return 0;
+}
+
+static void
+ppb_function_print(pciio_info_t fn_info, char *indent)
+{
+	uint8_t		htype;
+	uint8_t		bar;
+	uint8_t		nbars;
+	uint8_t		class;
+	uint8_t		subclass;
+	pciio_bus_t	bus;
+	pciio_slot_t	slot;
+	vertex_hdl_t	fn_vhdl;
+	pciio_function_t	function;
+
+	bus = pciio_info_bus_get(fn_info);
+	slot = pciio_info_slot_get(fn_info);
+	function = pciio_info_function_get(fn_info);
+	if (function == PCIIO_FUNC_NONE) {
+		function = 0;
+	}
+	fn_vhdl = pciio_info_dev_get(fn_info);
+
+	htype = pciio_config_get(fn_vhdl, PCI_CFG_HEADER_TYPE, 1);
+	nbars = ((htype & 0x7f) == 0) ? 6 : 2;
+
+	printk("\n%sDevice at bus/slot/function %d/%d/%d (vhdl 0x%p)\n",
+		indent, bus, slot, function, (void *)fn_vhdl);
+	printk("%s    vendor id 0x%lx device id 0x%lx class code 0x%lx\n",
+		indent,
+		pciio_config_get(fn_vhdl, PCI_CFG_VENDOR_ID, 2),
+		pciio_config_get(fn_vhdl, PCI_CFG_DEVICE_ID, 2),
+		pciio_config_get(fn_vhdl, PCI_CFG_CLASS_CODE, 3));
+	printk("%s    header type 0x%lx latency timer 0x%lx max latency 0x%lx\n",
+		indent,
+		pciio_config_get(fn_vhdl, PCI_CFG_HEADER_TYPE, 1),
+		pciio_config_get(fn_vhdl, PCI_CFG_LATENCY_TIMER, 1),
+		pciio_config_get(fn_vhdl, PCI_MAX_LAT, 1));
+
+	for (bar = 0; bar < nbars; bar++) {
+		pciio_win_info_t	win;
+		char			str[32], *cp;
+
+		win = &fn_info->c_window[bar];
+		if (! win->w_size) {
+			continue;
+		}
+
+		str[0] = '\0';
+		cp = str;
+
+		if (win->w_code & PCI_BA_IO_SPACE) {
+			strcat(cp, ",IO");
+		} else {
+			strcat(cp, ",MEM");
+
+			if (win->w_code & PCI_BA_PREFETCH) {
+				strcat(cp, ",PREFETCH");
+			}
+
+			if (win->w_code & PCI_BA_MEM_1MEG) {
+				strcat(cp, ",1MEG");
+			} else if (win->w_code & PCI_BA_MEM_64BIT) {
+				strcat(cp, ",64BIT");
+			} else {
+				strcat(cp, ",32BIT");
+			}
+		}
+
+		cp++;		/* skip leading , */
+
+		printk("%s    bar[%d] = 0x%lx [%s] size 0x%lx\n",
+			indent, bar,
+			win->w_base,
+			cp,
+			win->w_size);
+	}
+
+	if (fn_info->c_rsize) {
+		printk("%s    ROM   = 0x%lx size 0x%lx\n", 
+			indent, fn_info->c_rbase, fn_info->c_rsize);
+	}
+
+	class = pciio_config_get(fn_vhdl, PCI_CFG_BASE_CLASS, 1);
+	subclass = pciio_config_get(fn_vhdl, PCI_CFG_SUB_CLASS, 1);
+	if (IS_PPB(class, subclass)) {
+		char		new_indent[64];
+		pciio_ppb_t	ppb;
+		pciio_info_t	f;
+		uint64_t	basehi, baselow, base;
+		uint64_t	limhi, limlow, lim;
+
+		ppb = fn_info->c_ppb;
+		printk("%s    PPB primary/secondary/subordinate %d/%d/%d\n",
+			indent,
+			ppb->b_primary, ppb->b_secondary, ppb->b_subordinate);
+
+		/*
+		 * Compute I/O base/limit values
+		 */
+
+		basehi = pciio_config_get(fn_vhdl, PCI_CFG_PPB_IOBASEHI, 2);
+		baselow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_IOBASE, 1);
+		base = (basehi << 16 | baselow << 8) & 0xfffff000;
+
+		limhi = pciio_config_get(fn_vhdl, PCI_CFG_PPB_IOLIMHI, 2);
+		limlow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_IOLIM, 1);
+		lim = (limhi << 16 | limlow << 8) & 0xfffff000;
+		if (lim) {
+			lim |= ~0xfffff000;
+		}
+
+		printk("%s    IO     size/base/limit 0x%lx/0x%lx/0x%lx\n",
+			indent, ppb->b_io.size, base, lim);
+
+		/*
+		 * Compute MEM base/limit values
+		 */
+
+		baselow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMBASE, 2);
+		base = (baselow << 16) & 0xfff00000;
+
+		limlow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMLIM, 2);
+		lim = (limlow << 16) & 0xfff00000;
+		if (lim) {
+			lim |= ~0xfff00000;
+		}
+
+		printk("%s    MEM    size/base/limit 0x%lx/0x%lx/0x%lx\n",
+			indent, ppb->b_mem.size, base, lim);
+
+		/*
+		 * Compute prefetchable MEM base/limit values
+		 */
+
+		basehi = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMPFBASEHI, 4);
+		baselow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMPFBASE, 2);
+		base = (basehi << 32 | baselow << 16) & 0xfffffffffff00000;
+
+		limhi = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMPFLIMHI, 4);
+		limlow = pciio_config_get(fn_vhdl, PCI_CFG_PPB_MEMPFLIM, 2);
+		lim = (limhi << 32 | limlow << 16) & 0xfffffffffff00000;
+		if (lim) {
+			lim |= ~0xfffffffffff00000;
+		}
+
+		printk("%s    MEMPF  size/base/limit 0x%lx/0x%lx/0x%lx\n",
+			indent, ppb->b_mempf.size, base, lim);
+
+		sprintf(new_indent, "%s    ", indent);
+		if (ppb->b_fns == NULL) {
+			printk("%s    <no slots occupied>\n", indent);
+		} else {
+			for (f = ppb->b_fns; f; f = f->c_forw) {
+				ppb_function_print(f, new_indent);
+			}
+		}
+	}
+}
+
+
+static void
+ppb_link(pciio_info_t pciio_info, pciio_ppb_t ppb)
+{
+	pciio_info_t	tmp, prev;
+
+	pciio_info->c_parent_ppb = ppb;
+
+	/*
+	 * First function on the ppb list.  Init fields and 
+	 * return.
+	 */
+
+	if (ppb->b_fns == NULL) {
+		ppb->b_fns = pciio_info;
+		pciio_info->c_forw = NULL;
+		pciio_info->c_back = NULL;
+		return;
+	}
+
+	/*
+	 * Not the first function - scan the list to insert in
+	 * the proper place - sorted by slot/function
+	 */
+
+	prev = NULL;
+	for (tmp = ppb->b_fns; tmp != NULL; tmp = tmp->c_forw) {
+		if (pciio_info->c_slot < tmp->c_slot ||
+		    (pciio_info->c_slot == tmp->c_slot &&
+		     pciio_info->c_func < tmp->c_func)) {
+			break;
+		}
+
+		prev = tmp;
+	}
+
+	if (prev == NULL) {
+		/*
+		 * Insert at head of the list
+		 */
+
+		ppb->b_fns = pciio_info;
+		tmp->c_back = pciio_info;
+		pciio_info->c_forw = tmp;
+		pciio_info->c_back = NULL;
+	} else if (tmp == NULL) {
+		/*
+		 * Insert at tail of the list
+		 */
+
+		prev->c_forw = pciio_info;
+		pciio_info->c_forw = NULL;
+		pciio_info->c_back = prev;
+	} else {
+		/*
+		 * Insert in the middle
+		 */
+
+		pciio_info->c_forw = prev->c_forw;
+		pciio_info->c_forw->c_back = prev;
+		prev->c_forw = pciio_info;
+		pciio_info->c_back = prev;
+	}
+}
+
+
+/*
+ * Table to remap interrupt pins for devices residing behind a PPB.  The first
+ * index is the device slot%4 and the second index is the interrupt to remap.
+ * The result is the interrupt line which will be used on the primary side of
+ * of the ppb.
+ * 
+ * This table taken from "PCI System Architecture" Fourth Edition, table 24-13.
+ */
+
+static int	ppb_intr_map[4][4] = {
+ { PCIIO_INTR_LINE_A, PCIIO_INTR_LINE_B, PCIIO_INTR_LINE_C, PCIIO_INTR_LINE_D },
+ { PCIIO_INTR_LINE_B, PCIIO_INTR_LINE_C, PCIIO_INTR_LINE_D, PCIIO_INTR_LINE_A },
+ { PCIIO_INTR_LINE_C, PCIIO_INTR_LINE_D, PCIIO_INTR_LINE_A, PCIIO_INTR_LINE_B },
+ { PCIIO_INTR_LINE_D, PCIIO_INTR_LINE_A, PCIIO_INTR_LINE_B, PCIIO_INTR_LINE_C },
+};
+
+/*
+ * Given a pciio_info_t and a mask of interrupt lines, remap to the lines that
+ * will be used on the host slot accounting for multiple ppb layers.
+ *
+ * External interface called from pciio.c
+ */
+
+int
+pciio_ppb_intr_map(pciio_info_t pci_info, int mask)
+{
+	int		newmask, tmpmask, int_line;
+	pciio_slot_t	slot;
+	pciio_ppb_t	ppb;
+
+	newmask = mask;
+	slot = pciio_info_slot_get(pci_info);
+
+	ppb = pci_info->c_parent_ppb;
+	while (ppb) {
+		tmpmask = newmask;
+		newmask = 0;
+
+		for (int_line = 0; int_line < 4; int_line++) {
+			if (tmpmask & PCIIO_INTR_LINE(int_line)) {
+				newmask |= ppb_intr_map[slot%4][int_line];
+			}
+		}
+
+		slot = pciio_info_slot_get(ppb->b_pciio);
+		ppb = ppb->b_pciio->c_parent_ppb;
+	};
+
+	return newmask;
+}
+
+/*
+ * Bus number allocation.  Currently, subordinate bus numbers start at 16 to
+ * give providers some flexibility in how they assign host bus numbers.  Bus
+ * 255 is reserved.  The remaining bus numbers are shared among all PCI
+ * secondary busses in the system (ie. not per-provider).
+ *
+ * Bus numbers are never returned to the pool (except when there's an error 
+ * enumerating a bus) currently as there is no
+ * facility for hot unplugging or detaching the ppb driver.
+ */
+
+static uint32_t	ppb_bus_map[8];		/* 256 bits */
+
+#define bus_map_index(x)	((x) / 32)
+#define bus_map_mask(x)		(1L << (31 - ((x) % 32)))
+
+static  pciio_bus_t
+ppb_bus_alloc(void)
+{
+	pciio_bus_t	bus;
+	uint32_t	index, mask;
+
+	for (index = 0; index < 8; index++) {
+		if (ppb_bus_map[index] < 0xffffffff) {
+			break;
+		}
+	}
+
+	if (index >= 8) {
+		return PCIIO_BUS_NONE;
+	}
+
+	bus = index * 32;
+	for (mask = 0x80000000; mask > 0; mask >>= 1) {
+		if (! (ppb_bus_map[index] & mask)) {
+			ppb_bus_map[index] |= mask;
+			return  bus;
+		}
+
+		bus++;
+	}
+
+	ASSERT(0);		/* should never get here */
+	return PCIIO_BUS_NONE;
+}
+
+static void
+ppb_bus_reserve(pciio_bus_t bus)
+{
+	uint32_t	index, mask;
+
+	index = bus_map_index(bus);
+	mask = bus_map_mask(bus);
+
+	ppb_bus_map[index] |= mask;
+}
+
+static void
+ppb_bus_free(pciio_bus_t bus)
+{
+	uint32_t	index, mask;
+
+	index = bus_map_index(bus);
+	mask = bus_map_mask(bus);
+
+	ppb_bus_map[index] &= ~mask;
+}
+
+/*
+ * Standard device driver entry points.  Only use init/attach currently.
+ */
+
+void 
+pciio_ppb_init(void)
+{
+	pciio_bus_t	bus;
+	int reserved_buses = 9;
+
+	sema_init(&pciio_ppb_sema, 1);
+
+	for (bus = 0; bus < reserved_buses*16; bus++) {
+		ppb_bus_reserve(bus);
+	}
+
+	ppb_bus_reserve(255);	/* reserved for PCIIO_BUS_NONE */
+}
+
+int
+pciio_ppb_attach(vertex_hdl_t conn)
+{
+	int		rval;
+	uchar_t		class;
+	uchar_t		subclass;
+	pciio_info_t	info = pciio_info_get(conn);
+#if defined(PCIIO_SLOT_NONE)
+	pciio_slot_t	slot = pciio_info_slot_get(info);
+#endif
+
+#if defined(PCIIO_SLOT_NONE)
+	if (slot == PCIIO_SLOT_NONE) {
+		return 0;
+	}
+#endif
+
+	class = pciio_config_get(conn, PCI_CFG_BASE_CLASS, 1);
+	subclass = pciio_config_get(conn, PCI_CFG_SUB_CLASS, 1);
+
+	if (!IS_PPB(class, subclass)) {
+		return 0;
+	}
+
+	rval = ppb_scan(conn);
+	if (rval != 0) {
+		return rval;
+	}
+
+	/*
+	 * assign address space
+	 */
+
+	ppb_addr_space_assign(info);
+
+	if (ppb_showconfig) {
+		ppb_function_print(info, "");
+	}
+
+	/*
+	 * Initiate driver attach routines.  This is done in a thread
+	 * to avoid possible complications with making recursive calls
+	 * into cdl.
+	 */
+	kernel_thread((st_func_t *)ppb_attach_thread, info, 0);
+
+	return 0;
+}
+
+void
+ppb_attach_thread(pciio_info_t info)
+{
+	ppb_attach_function(info);
+}
+
+void
+ppb_attach_function(pciio_info_t info)
+{
+	pciio_ppb_t	ppbinfo = info->c_ppb;
+	pciio_info_t	fn_info;
+	vertex_hdl_t	vhdl;
+
+	if (ppbinfo == NULL) {
+		return;
+	}
+
+	for (fn_info = ppbinfo->b_fns; fn_info; fn_info = fn_info->c_forw) {
+		if (fn_info->c_ppb) {
+			ppb_attach_function(fn_info);
+			continue;
+		}
+
+		vhdl = pciio_info_dev_get(fn_info);
+		pciio_device_attach(vhdl, 0);
+	}
+}
+
+int
+pciio_ppb_detach(vertex_hdl_t conn)
+{
+	uchar_t	class;
+	uchar_t	subclass;
+
+	class = pciio_config_get(conn, PCI_CFG_BASE_CLASS, 1);
+	subclass = pciio_config_get(conn, PCI_CFG_SUB_CLASS, 1);
+
+	/*
+	 * Technically, the ppb driver does not allow itself to be
+	 * detached.  However, for hot plug/unplug support, we need to
+	 * be prepared to be called because we registered ourselves as
+	 * a wildcarded driver.
+	 *
+	 * So, if we're called with a device that is not a PPB, return 0,
+	 * otherwise, return 1 indicating to the caller that we cannot
+	 * be detached.
+	 */
+	return IS_PPB(class, subclass);
+}
+
+
+static uint64_t
+ppb_roundup_log2(uint64_t val)
+{
+	uint64_t	bit;
+
+	if (val == 0 || (val & (1LL << 63))) 
+		return 0;
+
+	for (bit = 1; bit && bit < val; bit <<= 1)
+		;
+
+	return bit;
+}
+
+pciio_businfo_t
+pciio_ppb_businfo_get(vertex_hdl_t conn)
+{
+	pciio_info_t	 info = pciio_info_get(conn);
+	pciio_businfo_t businfo;
+
+	if (info == NULL || info->c_ppb == NULL) {
+		return NULL;
+	}
+
+	businfo = &info->c_ppb->b_businfo;
+	businfo->bi_multi_master = 1;
+	businfo->bi_asic_type = PCIIO_ASIC_TYPE_UNKNOWN;
+	businfo->bi_bus_type = PCIIO_BUS_TYPE_UNKNOWN;
+	businfo->bi_bus_speed = PCIIO_BUS_SPEED_UNKNOWN;
+
+	return businfo;
+}
diff -Nru a/arch/ia64/sn/io/sn2/pic.c b/arch/ia64/sn/io/sn2/pic.c
--- a/arch/ia64/sn/io/sn2/pic.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/pic.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,65 +6,58 @@
  * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
+#include <linux/interrupt.h>
 #include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
+#include <asm/sn/hcl_util.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pcibr.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/io.h>
+#include <asm/sn/pci/pic.h>
 #include <asm/sn/sn_private.h>
 
+extern struct file_operations pcibr_fops;
+extern pcibr_list_p	pcibr_list;
 
-#define PCI_BUS_NO_1 1
+static int		pic_attach2(vertex_hdl_t, pci_bridge_t *, vertex_hdl_t,
+ 				int, pcibr_soft_t *);
 
-extern int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t, int, pcibr_soft_t *);
-extern void pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
-extern void pcibr_driver_unreg_callback(vertex_hdl_t, int, int, int);
+extern int		isIO9(nasid_t);
+extern char	       *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
+extern int		pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
+extern pcibr_hints_t	pcibr_hints_get(vertex_hdl_t, int);
+extern unsigned		pcibr_intr_bits(pciio_info_t info,
+				pciio_intr_line_t lines, int nslots);
+extern void		pcibr_setwidint(xtalk_intr_t);
+extern int		pcibr_error_handler_wrapper(error_handler_arg_t, int,
+				ioerror_mode_t, ioerror_t *);
+extern void		pcibr_error_intr_handler(intr_arg_t);
+extern void		pcibr_directmap_init(pcibr_soft_t);
+extern int		pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
+extern int		pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
+extern int		pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
+extern int		pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
+extern int		pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
+extern int		pcibr_slot_call_device_attach(vertex_hdl_t,
+				pciio_slot_t, int);
+extern void		pcibr_rrb_alloc_init(pcibr_soft_t, int, int, int);
+extern int		pcibr_pcix_rbars_calc(pcibr_soft_t);
+extern pcibr_info_t	pcibr_device_info_new(pcibr_soft_t, pciio_slot_t,
+				pciio_function_t, pciio_vendor_id_t,
+				pciio_device_id_t);
+extern int		pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, 
+				pciio_slot_t);
+extern void		xwidget_error_register(vertex_hdl_t, error_handler_f *,
+				error_handler_arg_t);
+extern void		pcibr_clearwidint(pci_bridge_t *);
 
 
-/*
- * copy inventory_t from conn_v to peer_conn_v
- */
-int
-pic_bus1_inventory_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v)
-{
-	inventory_t *pinv, *peer_pinv;
-
-	if (hwgraph_info_get_LBL(conn_v, INFO_LBL_INVENT,
-				(arbitrary_info_t *)&pinv) == GRAPH_SUCCESS)
- {
-		NEW(peer_pinv);
-		memcpy(peer_pinv, pinv, sizeof(inventory_t));
-		if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_INVENT,
-			    (arbitrary_info_t)peer_pinv) != GRAPH_SUCCESS) {
-			DEL(peer_pinv);
-			return 0;
-		}
-		return 1;
-	}
-
-	printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ", (uint64_t)conn_v);
-	return 0;
-}
 
 /*
  * copy xwidget_info_t from conn_v to peer_conn_v
  */
-int
+static int
 pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
 							cnodeid_t xbow_peer)
 {
@@ -83,15 +75,15 @@
 
 	if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET,
 			(arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) {
-		NEW(peer_widget_info);
-		peer_widget_info->w_vertex = peer_conn_v;
-		peer_widget_info->w_id = widget_info->w_id;
-		peer_widget_info->w_master = peer_hubv;
-		peer_widget_info->w_masterid = peer_hub_info->h_widgetid;
+    		NEW(peer_widget_info);
+    		peer_widget_info->w_vertex = peer_conn_v;
+    		peer_widget_info->w_id = widget_info->w_id;
+    		peer_widget_info->w_master = peer_hubv;
+    		peer_widget_info->w_masterid = peer_hub_info->h_widgetid;
 		/* structure copy */
-		peer_widget_info->w_hwid = widget_info->w_hwid;
-		peer_widget_info->w_efunc = 0;
-		peer_widget_info->w_einfo = 0;
+    		peer_widget_info->w_hwid = widget_info->w_hwid;
+    		peer_widget_info->w_efunc = 0;
+    		peer_widget_info->w_einfo = 0;
 		peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL);
 		strcpy(peer_widget_info->w_name, peer_path);
 
@@ -100,14 +92,13 @@
 				DEL(peer_widget_info);
 				return 0;
 		}
-
 		xwidget_info_set(peer_conn_v, peer_widget_info);
 
 		return 1;
 	}
 
-	printk("pic_bus1_widget_info_dup: "
-			"cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v);
+	KERN_MSG(K_WARN, "pic_bus1_widget_info_dup: INFO_LBL_XWIDGET failed "
+			 "for conn_v=0x%lx\n", (uint64_t)conn_v);
 	return 0;
 }
 
@@ -119,7 +110,7 @@
  * If not successful, return zero and both buses will attach to the
  * vertex passed into pic_attach().
  */
-vertex_hdl_t
+static vertex_hdl_t
 pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
 {
 	cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
@@ -127,7 +118,7 @@
 	char pathname[256], peer_path[256], tmpbuf[256];
 	char *p;
 	int rc;
-	vertex_hdl_t peer_conn_v;
+	vertex_hdl_t peer_conn_v, hubv;
 	int pos;
 	slabid_t slab;
 
@@ -136,7 +127,7 @@
 		/* pcibr widget hw/module/001c11/slab/0/Pbrick/xtalk/12 */
 		/* sprintf(pathname, "%v", conn_v); */
 		xbow_peer = NASID_TO_COMPACT_NODEID(NODEPDA(cnode)->xbow_peer);
-		pos = hwgfs_generate_path(conn_v, tmpbuf, 256);
+		pos = hwgraph_generate_path(conn_v, tmpbuf, 256);
 		strcpy(pathname, &tmpbuf[pos]);
 		p = pathname + strlen("hw/module/001c01/slab/0/");
 
@@ -150,36 +141,37 @@
 		 */
 		rc = hwgraph_traverse(hwgraph_root, peer_path, &peer_conn_v);
 		if (GRAPH_SUCCESS == rc)
-			printk("pic_attach: found unexpected vertex: 0x%lx\n",
-								(uint64_t)peer_conn_v);
+			KERN_MSG(K_WARN, "pic_bus1_redist(): found an "
+				"unexpected vertex 0x%lx\n", (uint64_t)peer_conn_v);
 		else if (GRAPH_NOT_FOUND != rc) {
-			printk("pic_attach: hwgraph_traverse unexpectedly"
-					" returned 0x%x\n", rc);
+			KERN_MSG(K_WARN, "pic_bus1_redist(): hwgraph_traverse "
+				"unexpectedly returned 0x%x\n", rc);
 		} else {
 			/* try to add the widget vertex to the peer Cbrick */
-			rc = hwgraph_path_add(hwgraph_root, peer_path, &peer_conn_v);
-
+			rc = hwgraph_path_add(hwgraph_root, peer_path,
+								&peer_conn_v);
 			if (GRAPH_SUCCESS != rc)
-			    printk("pic_attach: hwgraph_path_add"
-						" failed with 0x%x\n", rc);
+			    KERN_MSG(K_WARN, "pic_bus1_redist(): "
+				    "hwgraph_path_add failed with 0x%x\n", rc);
 			else {
 			    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
-					"pic_bus1_redist: added vertex %v\n", peer_conn_v)); 
+		    			"pic_bus1_redist: added vertex 0x%lx\n",
+					peer_conn_v));
 
 			    /* Now hang appropiate stuff off of the new
-			     * vertex.	We bail out if we cannot add something.
+			     * vertex.  We bail out if we cannot add something.
 			     * In that case, we don't remove the newly added
 			     * vertex but that should be safe and we don't
 			     * really expect the additions to fail anyway.
 			     */
-#if 0
-			    if (!pic_bus1_inventory_dup(conn_v, peer_conn_v))
-					return 0;
-			    pic_bus1_device_desc_dup(conn_v, peer_conn_v);
-#endif
 			    if (!pic_bus1_widget_info_dup(conn_v, peer_conn_v, xbow_peer))
 					return 0;
 
+			    hubv = cnodeid_to_vertex(xbow_peer);
+			    ASSERT(hubv != GRAPH_VERTEX_NONE);
+			    device_master_set(peer_conn_v, hubv);
+			    xtalk_provider_register(hubv, &hub_provider);
+			    xtalk_provider_startup(hubv);
 			    return peer_conn_v;
 			}
 		}
@@ -188,29 +180,35 @@
 }
 
 
+/*
+ * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
+ * to attach each of those buses.
+ */
 int
 pic_attach(vertex_hdl_t conn_v)
 {
 	int		rc;
-	bridge_t	*bridge0, *bridge1 = (bridge_t *)0;
-	vertex_hdl_t	pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
-	pcibr_soft_t	bus0_soft, bus1_soft = (pcibr_soft_t)0;
-	vertex_hdl_t  conn_v0, conn_v1, peer_conn_v;
+	pci_bridge_t	*bridge0, *bridge1;
+	vertex_hdl_t	pcibr_vhdl0, pcibr_vhdl1;
+	pcibr_soft_t	bus0_soft, bus1_soft;
+	vertex_hdl_t	conn_v0, conn_v1, peer_conn_v;
+	int		bricktype;
+	int             iobrick_type_get_nasid(nasid_t nasid);
 
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));
 
-	bridge0 = (bridge_t *) xtalk_piotrans_addr(conn_v, NULL,
-	                        0, sizeof(bridge_t), 0);
-	bridge1 = (bridge_t *)((char *)bridge0 + PIC_BUS1_OFFSET);
+	bridge0 = pcibr_bridge_ptr_get(conn_v, 0);
+	bridge1 = pcibr_bridge_ptr_get(conn_v, 1);
 
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
-		    "pic_attach: bridge0=0x%x, bridge1=0x%x\n", 
+		    "pic_attach: bridge0=0x%lx, bridge1=0x%lx\n", 
 		    bridge0, bridge1));
 
 	conn_v0 = conn_v1 = conn_v;
 
 	/* If dual-ported then split the two PIC buses across both Cbricks */
-	if ((peer_conn_v = (pic_bus1_redist(NASID_GET(bridge0), conn_v))))
+	peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v);
+	if (peer_conn_v)
 		conn_v1 = peer_conn_v;
 
 	/*
@@ -225,13 +223,21 @@
 	 * the Bridge registers themselves.
 	 */
 	/* FIXME: what should the hwgraph path look like ? */
-	rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
-	ASSERT(rc == GRAPH_SUCCESS);
-	rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
-	ASSERT(rc == GRAPH_SUCCESS);
+	bricktype = iobrick_type_get_nasid(NASID_GET(bridge0));
+	if ( bricktype == MODULE_CGBRICK ) {
+		rc = hwgraph_path_add(conn_v0, EDGE_LBL_AGP_0, &pcibr_vhdl0);
+		ASSERT(rc == GRAPH_SUCCESS);
+		rc = hwgraph_path_add(conn_v1, EDGE_LBL_AGP_1, &pcibr_vhdl1);
+		ASSERT(rc == GRAPH_SUCCESS);
+	} else {
+		rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
+		ASSERT(rc == GRAPH_SUCCESS);
+		rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
+		ASSERT(rc == GRAPH_SUCCESS);
+	}
 
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
-		    "pic_attach: pcibr_vhdl0=%v, pcibr_vhdl1=%v\n",
+		    "pic_attach: pcibr_vhdl0=0x%lx, pcibr_vhdl1=0x%lx\n",
 		    pcibr_vhdl0, pcibr_vhdl1));
 
 	/* register pci provider array */
@@ -241,20 +247,515 @@
 	pciio_provider_startup(pcibr_vhdl0);
 	pciio_provider_startup(pcibr_vhdl1);
 
-	pcibr_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
-	pcibr_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);
+	pic_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
+	pic_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);
 
 	/* save a pointer to the PIC's other bus's soft struct */
         bus0_soft->bs_peers_soft = bus1_soft;
         bus1_soft->bs_peers_soft = bus0_soft;
 
 	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
-		    "pic_attach: bus0_soft=0x%x, bus1_soft=0x%x\n",
+		    "pic_attach: bus0_soft=0x%lx, bus1_soft=0x%lx\n",
 		    bus0_soft, bus1_soft));
 
 	return 0;
 }
 
+
+/*
+ * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
+ * to attach each of those buses.
+ */
+static int
+pic_attach2(vertex_hdl_t xconn_vhdl, pci_bridge_t *bridge,
+	      vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
+{
+    vertex_hdl_t	    ctlr_vhdl;
+    pcibr_soft_t	    pcibr_soft;
+    pcibr_info_t	    pcibr_info;
+    xwidget_info_t	    info;
+    xtalk_intr_t	    xtalk_intr;
+    pcibr_list_p	    self;
+    int			    entry, slot, ibit, i;
+    vertex_hdl_t	    noslot_conn;
+    char		    devnm[MAXDEVNAME], *s;
+    pcibr_hints_t	    pcibr_hints;
+    picreg_t		    id;
+    picreg_t		    int_enable;
+    picreg_t		    pic_ctrl_reg;
+
+    int			    iobrick_type_get_nasid(nasid_t nasid);
+    int			    iobrick_module_get_nasid(nasid_t nasid);
+    int			    irq;
+    int			    cpu;
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+		"pic_attach2: bridge=0x%lx, busnum=%d\n", bridge, busnum));
+
+    ctlr_vhdl = NULL;
+    ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
+		0, 0, 0,
+		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+		(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
+    ASSERT(ctlr_vhdl != NULL);
+
+    id = pcireg_id_get(bridge);
+    hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV,
+                         (arbitrary_info_t)XWIDGET_PART_REV_NUM(id));
+
+    /*
+     * Get the hint structure; if some NIC callback marked this vertex as
+     * "hands-off" then we just return here, before doing anything else.
+     */
+    pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
+
+    if (pcibr_hints && pcibr_hints->ph_hands_off)
+        return -1;
+
+    /* allocate soft structure to hang off the vertex.  Link the new soft
+     * structure to the pcibr_list linked list
+     */
+    NEW(pcibr_soft);
+    NEW(self);
+    self->bl_soft = pcibr_soft;
+    self->bl_vhdl = pcibr_vhdl;
+    self->bl_next = pcibr_list;
+    pcibr_list = self;
+
+    if (ret_softp)
+        *ret_softp = pcibr_soft;
+
+    memset(pcibr_soft, 0, sizeof *pcibr_soft);
+    pcibr_soft_set(pcibr_vhdl, pcibr_soft);
+
+    s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
+    pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
+    strcpy(pcibr_soft->bs_name, s);
+
+    pcibr_soft->bs_conn = xconn_vhdl;
+    pcibr_soft->bs_vhdl = pcibr_vhdl;
+    pcibr_soft->bs_base = (void *)bridge;
+    pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id);
+    pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
+    pcibr_soft->bsi_err_intr = 0;
+    pcibr_soft->bs_min_slot = 0;
+    pcibr_soft->bs_max_slot = 3;
+    pcibr_soft->bs_busnum = busnum;
+    pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
+    pcibr_soft->bs_int_ate_size = PIC_INTERNAL_ATES;
+    pcibr_soft->bs_bridge_mode = (pcireg_speed_get(pcibr_soft) << 1) |
+                                  pcireg_mode_get(pcibr_soft);
+
+    info = xwidget_info_get(xconn_vhdl);
+    pcibr_soft->bs_xid = xwidget_info_id_get(info);
+    pcibr_soft->bs_master = xwidget_info_master_get(info);
+    pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
+
+    strcpy(pcibr_soft->bs_asic_name, "PIC");
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+                "pic_attach2: pcibr_soft=0x%lx, mode=0x%x\n",
+                pcibr_soft, pcibr_soft->bs_bridge_mode));
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+                "pic_attach2: %s ASIC: rev %s (code=0x%x)\n",
+                pcibr_soft->bs_asic_name,
+                (IS_PIC_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A" :
+                (IS_PIC_PART_REV_B(pcibr_soft->bs_rev_num)) ? "B" :
+                (IS_PIC_PART_REV_C(pcibr_soft->bs_rev_num)) ? "C" :
+                "unknown", pcibr_soft->bs_rev_num));
+
+    /* PV854845: Must clear write request buffer to avoid parity errors */
+    for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
+        ((pic_t *)bridge)->p_wr_req_lower[i] = 0;
+        ((pic_t *)bridge)->p_wr_req_upper[i] = 0;
+        ((pic_t *)bridge)->p_wr_req_parity[i] = 0;
+    }
+
+    pcibr_soft->bs_nasid = NASID_GET(bridge);
+
+    pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid);
+    if (pcibr_soft->bs_bricktype < 0)
+        KERN_MSG(K_WARN, "%s: bricktype was unknown by L1 (ret val = 0x%x)\n",
+                pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
+
+    pcibr_soft->bs_moduleid = iobrick_module_get_nasid(pcibr_soft->bs_nasid);
+
+    if (pcibr_soft->bs_bricktype > 0) {
+        switch (pcibr_soft->bs_bricktype) {
+	case MODULE_PXBRICK:
+	case MODULE_IXBRICK:
+	case MODULE_OPUSBRICK:
+            pcibr_soft->bs_first_slot = 0;
+            pcibr_soft->bs_last_slot = 1;
+            pcibr_soft->bs_last_reset = 1;
+
+            /* Bus 1 of IXBrick has a IO9, so there are 4 devices, not 2 */
+	    if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) 
+		    && isIO9(pcibr_soft->bs_nasid)) {
+                pcibr_soft->bs_last_slot = 3;
+                pcibr_soft->bs_last_reset = 3;
+            }
+            break;
+
+        case MODULE_CGBRICK:
+            pcibr_soft->bs_first_slot = 0;
+            pcibr_soft->bs_last_slot = 0;
+            pcibr_soft->bs_last_reset = 0;
+            break;
+
+        default:
+	    KERN_MSG(K_WARN, "%s: Unknown bricktype: 0x%x\n",
+                    pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
+            break;
+        }
+
+        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+                    "pic_attach2: bricktype=%d, slots %d-%d\n",
+                    pcibr_soft->bs_bricktype,
+                    pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
+    }
+
+    /*
+     * Initialize bridge and bus locks
+     */
+    spin_lock_init(&pcibr_soft->bs_lock);
+#ifdef PCI_HOTPLUG
+    mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
+#endif
+
+{ /*habeck: mv to common routine in pcibr_hints.c? */
+    /*
+     * If we have one, process the hints structure.
+     */
+    if (pcibr_hints) {
+        unsigned	rrb_fixed;
+        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
+                    "pic_attach2: pcibr_hints=0x%lx\n", pcibr_hints));
+
+        rrb_fixed = pcibr_hints->ph_rrb_fixed;
+
+        pcibr_soft->bs_rrb_fixed = rrb_fixed;
+
+        if (pcibr_hints->ph_intr_bits)
+            pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
+
+
+        for (slot = pcibr_soft->bs_min_slot;
+                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+            int hslot = pcibr_hints->ph_host_slot[slot] - 1;
+
+            if (hslot < 0) {
+                pcibr_soft->bs_slot[slot].host_slot = slot;
+            } else {
+                pcibr_soft->bs_slot[slot].has_host = 1;
+                pcibr_soft->bs_slot[slot].host_slot = hslot;
+            }
+        }
+    }
+} /*habeck: endif. mv to common routine*/
+
+    /*
+     * Set-up initial values for state fields
+     */
+    for (slot = pcibr_soft->bs_min_slot;
+                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+        pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
+        pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
+        pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
+        pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
+        pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
+    }
+
+    for (ibit = 0; ibit < 8; ++ibit) {
+        pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
+        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
+    }
+
+
+    /*
+     * connect up our error handler.  PIC has 2 busses (thus resulting in 2
+     * pcibr_soft structs under 1 widget), so only register a xwidget error
+     * handler for PIC's bus0.  NOTE: for PIC pcibr_error_handler_wrapper()
+     * is a wrapper routine we register that will call the real error handler
+     * pcibr_error_handler() with the correct pcibr_soft struct.
+     */
+    if (busnum == 0) {
+        xwidget_error_register(xconn_vhdl,
+                                pcibr_error_handler_wrapper, pcibr_soft);
+    }
+
+    /*
+     * Clear all pending interrupts.  Assume all interrupts are from slot 3
+     * until otherise setup.
+     */
+    pcireg_intr_reset_set(pcibr_soft, PCIBR_IRR_ALL_CLR);
+    pcireg_intr_device_set(pcibr_soft, 0x006db6db);
+
+    /* Setup the mapping register used for direct mapping */
+    pcibr_directmap_init(pcibr_soft);
+
+    /*
+     * Initialize the PICs control register.
+     */
+    pic_ctrl_reg = pcireg_control_get(pcibr_soft);
+
+    /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
+    pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
+    pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
+    pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
+    pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
+
+    pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
+    pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
+
+    /* enable parity checking on PICs internal RAM */
+    pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
+    pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
+
+    /* PIC BRINGUP WAR (PV# 862253): dont enable write request parity */
+    if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
+        pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
+    }
+
+    pic_ctrl_reg |= PIC_CTRL_PAGE_SIZE;
+
+    pcireg_control_set(pcibr_soft, pic_ctrl_reg);
+
+    /* Initialize internal mapping entries (ie. the ATEs) */
+    for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
+	pcireg_int_ate_set(pcibr_soft, entry, 0);
+
+    pcibr_soft->bs_int_ate_resource.start = 0;
+    pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
+    pcibr_soft->bs_allocated_ate_res = (void *) kmalloc(pcibr_soft->bs_int_ate_size * sizeof(unsigned long), GFP_KERNEL);
+	memset(pcibr_soft->bs_allocated_ate_res, 0x0, pcibr_soft->bs_int_ate_size * sizeof(unsigned long));
+
+    /* Setup the PICs error interrupt handler. */
+    xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
+
+    ASSERT(xtalk_intr != NULL);
+
+    irq = ((hub_intr_t)xtalk_intr)->i_bit;
+    cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
+
+    intr_unreserve_level(cpu, irq);
+    ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
+    xtalk_intr->xi_vector = SGI_PCIBR_ERROR;
+
+    pcibr_soft->bsi_err_intr = xtalk_intr;
+
+    /*
+     * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
+     * in order to work around some addressing limitations.  In order
+     * for that fire wall to work properly, we need to make sure we
+     * start from a known clean state.
+     */
+    pcibr_clearwidint(bridge);
+
+    xtalk_intr_connect(xtalk_intr,
+		       (intr_func_t) pcibr_error_intr_handler,
+		       (intr_arg_t) pcibr_soft,
+		       (xtalk_intr_setfunc_t) pcibr_setwidint,
+		       (void *) bridge);
+
+    request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, 
+			"PCIBR error", (intr_arg_t) pcibr_soft);
+
+    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
+		"pcibr_setwidint: target_id=0x%lx, int_addr=0x%lx\n",
+		pcireg_intr_dst_target_id_get(pcibr_soft),
+		pcireg_intr_dst_addr_get(pcibr_soft)));
+
+    /* now we can start handling error interrupts */
+    int_enable = pcireg_intr_enable_get(pcibr_soft);
+    int_enable |= PCIBR_ISR_ERRORS;
+
+    /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
+     * locked out to be freed up sooner (by timing out) so that the
+     * read tnums are never completely used up.
+     */
+    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
+	int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
+	int_enable &= ~PIC_ISR_XREAD_REQ_TIMEOUT;
+
+	pcireg_req_timeout_set(pcibr_soft, 0x750);
+    }
+
+    pcireg_intr_enable_set(pcibr_soft, int_enable);
+    pcireg_intr_mode_set(pcibr_soft, 0); /* dont send 'clear interrupt' pkts */
+    pcireg_tflush_get(pcibr_soft);       /* wait until Bridge PIO complete */
+
+    /*
+     * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
+     * RRB0, RRB8, RRB1, and RRB9.  Assign them to DEVICE[2|3]--VCHAN3
+     * so they are not used.  This works since there is currently no
+     * API to penable VCHAN3.
+     */
+    if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
+	pcireg_rrb_bit_set(pcibr_soft, 0, 0x000f000f);	/* even rrb reg */
+	pcireg_rrb_bit_set(pcibr_soft, 1, 0x000f000f);	/* odd rrb reg */
+    }
+
+    /* PIC only supports 64-bit direct mapping in PCI-X mode.  Since
+     * all PCI-X devices that initiate memory transactions must be
+     * capable of generating 64-bit addressed, we force 64-bit DMAs.
+     */
+    pcibr_soft->bs_dma_flags = 0;
+    if (IS_PCIX(pcibr_soft)) {
+	pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
+    }
+
+    {
+
+    iopaddr_t		    prom_base_addr = pcibr_soft->bs_xid << 24;
+    int			    prom_base_size = 0x1000000;
+    int			    status;
+    struct resource	    *res;
+
+    /* Allocate resource maps based on bus page size; for I/O and memory
+     * space, free all pages except those in the base area and in the
+     * range set by the PROM.
+     *
+     * PROM creates BAR addresses in this format: 0x0ws00000 where w is
+     * the widget number and s is the device register offset for the slot.
+     */
+
+    /* Setup the Bus's PCI IO Root Resource. */
+    pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
+    pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
+    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
+    if (!res)
+	panic("PCIBR:Unable to allocate resource structure\n");
+
+    /* Block off the range used by PROM. */
+    res->start = prom_base_addr;
+    res->end = prom_base_addr + (prom_base_size - 1);
+    status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
+    if (status)
+	panic("PCIBR:Unable to request_resource()\n");
+
+    /* Setup the Small Window Root Resource */
+    pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
+    pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
+
+    /* Setup the Bus's PCI Memory Root Resource */
+    pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
+    pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
+    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
+    if (!res)
+	panic("PCIBR:Unable to allocate resource structure\n");
+
+    /* Block off the range used by PROM. */
+    res->start = prom_base_addr;
+    res->end = prom_base_addr + (prom_base_size - 1);;
+    status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
+    if (status)
+	panic("PCIBR:Unable to request_resource()\n");
+
+    }
+
+
+    /* build "no-slot" connection point */
+    pcibr_info = pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE,
+		 PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
+    noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
+
+    /* Store no slot connection point info for tearing it down during detach. */
+    pcibr_soft->bs_noslot_conn = noslot_conn;
+    pcibr_soft->bs_noslot_info = pcibr_info;
+
+    for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	/* Find out what is out there */
+	(void)pcibr_slot_info_init(pcibr_vhdl, slot);
+    }
+
+    for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	/* Set up the address space for this slot in the PCI land */
+	(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
+    }
+
+    for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	/* Setup the device register */
+	(void)pcibr_slot_device_init(pcibr_vhdl, slot);
+    }
+
+    if (IS_PCIX(pcibr_soft)) {
+	pcibr_soft->bs_pcix_rbar_inuse = 0;
+	pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
+	pcibr_soft->bs_pcix_rbar_percent_allowed =
+					pcibr_pcix_rbars_calc(pcibr_soft);
+
+	for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	    /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
+	    (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
+	}
+    }
+
+    for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	/* Setup host/guest relations */
+	(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
+    }
+
+    /* Handle initial RRB management */
+    pcibr_initial_rrb(pcibr_vhdl,
+		      pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
+
+   /* Before any drivers get called that may want to re-allocate RRB's,
+    * let's get some special cases pre-allocated. Drivers may override
+    * these pre-allocations, but by doing pre-allocations now we're
+    * assured not to step all over what the driver intended.
+    */
+    if (pcibr_soft->bs_bricktype > 0) {
+	switch (pcibr_soft->bs_bricktype) {
+	case MODULE_PXBRICK:
+	case MODULE_IXBRICK:
+	case MODULE_OPUSBRICK:
+		/*
+		 * If IO9 in bus 1, allocate RRBs to all the IO9 devices
+		 */
+		if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
+		    (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
+		    (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
+			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
+			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
+			pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4);
+			pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4);
+		} else {
+			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
+			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
+		}
+		break;
+
+	case MODULE_CGBRICK:
+		pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 8);
+		break;
+	} /* switch */
+    }
+
+
+    for (slot = pcibr_soft->bs_min_slot;
+				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+	/* Call the device attach */
+	(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
+    }
+
+    pciio_device_attach(noslot_conn, 0);
+
+    return 0;
+}
+
+
 /*
  * pci provider functions
  *
@@ -289,15 +790,15 @@
     (pciio_provider_startup_f *) pcibr_provider_startup,
     (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
     (pciio_reset_f *) pcibr_reset,
-    (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
     (pciio_endian_set_f *) pcibr_endian_set,
-    (pciio_priority_set_f *) pcibr_priority_set,
     (pciio_config_get_f *) pcibr_config_get,
     (pciio_config_set_f *) pcibr_config_set,
-    (pciio_error_devenable_f *) 0,
-    (pciio_error_extract_f *) 0,
+
+    (pciio_error_devenable_f *) pcibr_error_devenable,
+    (pciio_error_extract_f *) pcibr_error_extract,
+
     (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
     (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
     (pciio_device_unregister_f 	*) pcibr_device_unregister,
-    (pciio_dma_enabled_f		*) pcibr_dma_enabled,
+    (pciio_businfo_get_f *) pcibr_businfo_get,
 };
diff -Nru a/arch/ia64/sn/io/sn2/shub.c b/arch/ia64/sn/io/sn2/shub.c
--- a/arch/ia64/sn/io/sn2/shub.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/shub.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,18 +6,18 @@
  * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-#ident  "$Revision: 1.167 $"
-
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
 #include <asm/smp.h>
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
 #include <asm/system.h>
 #include <asm/sn/sgi.h>
+#include <asm/uaccess.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/io.h>
@@ -30,51 +29,25 @@
 #include <asm/sn/xtalk/xtalk.h>
 #include <asm/sn/pci/pcibr_private.h>
 #include <asm/sn/intr.h>
+#include <asm/sn/sn2/shub_mmr.h>
 #include <asm/sn/sn2/shub_mmr_t.h>
 #include <asm/sal.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/sndrv.h>
 #include <asm/sn/sn2/shubio.h>
-#include <asm/sn/sn2/shub_mmr.h>
-
-/*
- * Shub WAR for Xbridge Little Endian problem:
- *	Xbridge has to run in BIG ENDIAN even with Shub.
- */
-
-
-/*
- * io_sh_swapper: Turn on Shub byte swapping.
- *	All data destined to and from Shub to XIO are byte-swapped.
- */
-void
-io_sh_swapper(nasid_t nasid, int onoff)
-{
-    ii_iwc_u_t      ii_iwc;
-
-    ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
 
-    ii_iwc.ii_iwc_fld_s.i_dma_byte_swap = onoff;
-    REMOTE_HUB_S(nasid, IIO_IWC, ii_iwc.ii_iwc_regval);
-    ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
-
-}
+#define SHUB_NUM_ECF_REGISTERS 8
 
 /*
- * io_get_sh_swapper: Return current Swap mode.
- *	1 = Swap on, 0 = Swap off.
+ * A backport of the 2.5 scheduler is used by many vendors of 2.4-based
+ * distributions.
+ * We can only guess its presence by the lack of the SCHED_YIELD flag.
+ * If the heuristic doesn't work, change this define by hand.
  */
-int
-io_get_sh_swapper(nasid_t nasid)
-{
-    ii_iwc_u_t      ii_iwc;
+#ifndef SCHED_YIELD
+#define __HAVE_NEW_SCHEDULER    1
+#endif
 
-    ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
-    return(ii_iwc.ii_iwc_fld_s.i_dma_byte_swap);
-
-}
-
-#define SHUB_NUM_ECF_REGISTERS 8
 
 static uint32_t	shub_perf_counts[SHUB_NUM_ECF_REGISTERS];
 
@@ -204,6 +177,8 @@
 	int		nasid;
 
         cnode = (cnodeid_t)file->f_dentry->d_fsdata;
+        if (cnode < 0 || cnode >= numnodes)
+                return -ENODEV;
 
         switch (cmd) {
 	case SNDRV_SHUB_CONFIGURE:
@@ -245,7 +220,7 @@
 }
 
 struct file_operations shub_mon_fops = {
-	        .ioctl          = shubstats_ioctl,
+	        .ioctl         = shubstats_ioctl,
 };
 
 /*
@@ -414,9 +389,8 @@
 }
 
 int
-sn_linkstats_get(char *page)
+sn_linkstats_get(struct seq_file *p, void *v)
 {
-	int			n = 0;
 	int			cnode;
 	int			nlport;
 	struct s_linkstats	*lsp;
@@ -432,10 +406,10 @@
 	spin_lock(&sn_linkstats_lock);
 	secs = (jiffies - sn_linkstats_starttime) / HZ;
 
-	n += sprintf(page, "# SGI Numalink stats v1 : %lu samples, %lu o/flows, update %lu msecs\n",
+	seq_printf(p, "# SGI Numalink stats v1 : %lu samples, %lu o/flows, update %lu msecs\n",
 		sn_linkstats_samples, sn_linkstats_overflows, sn_linkstats_update_msecs);
 
-	n += sprintf(page+n, "%-37s %8s %8s %8s %8s\n",
+	seq_printf(p, "%-37s %8s %8s %8s %8s\n",
 		"# Numalink", "sn errs", "cb errs", "cb/min", "retries");
 
 	for (lsp=sn_linkstats, cnode=0; cnode < numnodes; cnode++, lsp++) {
@@ -447,11 +421,7 @@
 			snsum += lsp->hs_ni_sn_errors[nlport];
 			retrysum += lsp->hs_ni_retry_errors[nlport];
 
-			/* avoid buffer overrun (should be using seq_read API) */
-			if (numnodes > 64)
-				continue;
-
-			n += sprintf(page + n, "/%s/link/%d  %8lu %8lu %8s %8lu\n",
+			seq_printf(p, "/%s/link/%d  %8lu %8lu %8s %8lu\n",
 			    npda->hwg_node_name, nlport+1, lsp->hs_ni_sn_errors[nlport],
 			    lsp->hs_ni_cb_errors[nlport], 
 			    rate_per_minute(lsp->hs_ni_cb_errors[nlport], secs),
@@ -460,7 +430,7 @@
 
 		/* one II port on each SHub (may not be connected) */
 		if (lsp->hs_ii_up) {
-		    n += sprintf(page + n, "/%s/xtalk   %8lu %8lu %8s %8lu\n",
+		    seq_printf(p, "/%s/xtalk   %8lu %8lu %8s %8lu\n",
 			npda->hwg_node_name, lsp->hs_ii_sn_errors,
 			lsp->hs_ii_cb_errors, rate_per_minute(lsp->hs_ii_cb_errors, secs),
 			lsp->hs_ii_retry_errors);
@@ -471,17 +441,17 @@
 		}
 	}
 
-	n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+	seq_printf(p, "%-37s %8lu %8lu %8s %8lu\n",
 		"System wide NL totals", snsum, cbsum, 
 		rate_per_minute(cbsum, secs), retrysum);
 
-	n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+	seq_printf(p, "%-37s %8lu %8lu %8s %8lu\n",
 		"System wide II totals", snsum_ii, cbsum_ii, 
 		rate_per_minute(cbsum_ii, secs), retrysum_ii);
 
 	spin_unlock(&sn_linkstats_lock);
 
-	return n;
+	return 0;
 }
 
 static int __init
diff -Nru a/arch/ia64/sn/io/sn2/shub_intr.c b/arch/ia64/sn/io/sn2/shub_intr.c
--- a/arch/ia64/sn/io/sn2/shub_intr.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/shub_intr.c	Thu Nov  6 13:42:35 2003
@@ -18,7 +18,6 @@
 #include <asm/sn/io.h>
 #include <asm/sn/sn_private.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/hcl_util.h>
 #include <asm/sn/intr.h>
@@ -26,6 +25,8 @@
 #include <asm/sn/klconfig.h>
 #include <asm/sn/sn2/shub_mmr.h>
 #include <asm/sn/sn_cpuid.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
 
 /* ARGSUSED */
 void
@@ -36,11 +37,15 @@
 xwidgetnum_t
 hub_widget_id(nasid_t nasid)
 {
-        hubii_wcr_t     ii_wcr; /* the control status register */
-        
-        ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid,IIO_WCR);
-        
-        return ii_wcr.wcr_fields_s.wcr_widget_id;
+
+	if (!(nasid & 1)) {
+        	hubii_wcr_t     ii_wcr; /* the control status register */
+        	ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid,IIO_WCR);
+        	return ii_wcr.wcr_fields_s.wcr_widget_id;
+	} else {
+		/* ICE does not have widget id. */
+		return(-1);
+	}
 }
 
 static hub_intr_t
@@ -49,7 +54,7 @@
 		vertex_hdl_t owner_dev,
 		int uncond_nothread)
 {
-	cpuid_t		cpu = 0;
+	cpuid_t		cpu;
 	int		vector;
 	hub_intr_t	intr_hdl;
 	cnodeid_t	cnode;
@@ -58,9 +63,9 @@
 	iopaddr_t	xtalk_addr;
 	struct xtalk_intr_s	*xtalk_info;
 	xwidget_info_t	xwidget_info;
-	ilvl_t		intr_swlevel = 0;
 
 	cpu = intr_heuristic(dev, -1, &vector);
+
 	if (cpu == CPU_NONE) {
 		printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
 		return(0);
@@ -77,8 +82,9 @@
 		xtalk_addr = SH_II_INT0 | ((unsigned long)nasid << 36) | (1UL << 47);
 	}
 
-	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, cnode);
+	intr_hdl = kmalloc(sizeof(struct hub_intr_s), GFP_KERNEL);
 	ASSERT_ALWAYS(intr_hdl);
+	memset(intr_hdl, 0, sizeof(struct hub_intr_s));
 
 	xtalk_info = &intr_hdl->i_xtalk_info;
 	xtalk_info->xi_dev = dev;
@@ -90,12 +96,11 @@
 		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
 	}
 
-	intr_hdl->i_swlevel = intr_swlevel;
 	intr_hdl->i_cpuid = cpu;
 	intr_hdl->i_bit = vector;
 	intr_hdl->i_flags |= HUB_INTR_IS_ALLOCED;
 
-	return(intr_hdl);
+	return intr_hdl;
 }
 
 hub_intr_t
@@ -184,4 +189,73 @@
 	rv = intr_disconnect_level(cpu, bit);
 	ASSERT(rv == 0);
 	intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
+}
+
+/* 
+ * Redirect an interrupt to another cpu.
+ */
+
+void
+sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu) {
+	unsigned long bit;
+	int cpuphys, slice;
+	nasid_t nasid;
+	unsigned long xtalk_addr;
+	pci_bridge_t	*bridge = intr->bi_soft->bs_base;
+	int		irq;
+	int		i;
+	int		old_cpu;
+	int		new_cpu;
+
+	cpuphys = cpu_physical_id(cpu);
+	slice = cpu_physical_id_to_slice(cpuphys);
+	nasid = cpu_physical_id_to_nasid(cpuphys);
+
+	for (i = CPUS_PER_NODE - 1; i >= 0; i--) {
+		new_cpu = nasid_slice_to_cpuid(nasid, i);
+		if (new_cpu == NR_CPUS) {
+			continue;
+		}
+
+		if (!cpu_online(new_cpu)) {
+			continue;
+		}
+		break;
+	}
+
+	if (enable_shub_wars_1_1() && slice != i) {
+		printk("smp_affinity WARNING: SHUB 1.1 present: cannot target cpu %d, targeting cpu %d instead.\n",(int)cpu, new_cpu);
+		cpu = new_cpu;
+		slice = i;
+	}
+
+	if (slice) {    
+		xtalk_addr = SH_II_INT1 | ((unsigned long)nasid << 36) | (1UL << 47);
+	} else {
+		xtalk_addr = SH_II_INT0 | ((unsigned long)nasid << 36) | (1UL << 47);
+	}
+
+	for (bit = 0; bit < 8; bit++) {
+		if (intr->bi_ibits & (1 << bit) ) {
+			/* Disable interrupts. */
+			pcireg_intr_enable_bit_clr(bridge, bit);
+			/* Reset Host address (Interrupt destination) */
+			pcireg_intr_addr_addr_set(bridge, bit, xtalk_addr);
+			/* Enable interrupt */
+			pcireg_intr_enable_bit_set(bridge, bit);
+			/* Force an interrupt, just in case. */
+			pcireg_force_intr_set(bridge, bit);
+		}
+	}
+	irq = intr->bi_irq;
+	old_cpu = intr->bi_cpu;
+	if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+		pdacpu(cpu)->sn_first_irq = irq;
+	}
+	if (pdacpu(cpu)->sn_last_irq < irq) {
+		pdacpu(cpu)->sn_last_irq = irq;
+	}
+	pdacpu(old_cpu)->sn_num_irqs--;
+	pdacpu(cpu)->sn_num_irqs++;
+	intr->bi_cpu = (int)cpu;
 }
diff -Nru a/arch/ia64/sn/io/sn2/shuberror.c b/arch/ia64/sn/io/sn2/shuberror.c
--- a/arch/ia64/sn/io/sn2/shuberror.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/shuberror.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: shuberror.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -14,10 +13,10 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/smp.h>
+#include <asm/delay.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/sn_private.h>
@@ -173,7 +172,6 @@
     hubinfo_t		hinfo; 
     ii_wstat_u_t	wstat;
     hubreg_t		idsr;
-    ii_ilcsr_u_t	ilcsr;
 
 
     /* two levels of casting avoids compiler warning.!! */
@@ -228,11 +226,9 @@
 	
 	}
 	/*
-	 * Note: we may never be able to print this, if the II talking
-	 * to Xbow which hosts the console is dead. 
+	 * Only print the II_ECRAZY message if there is an attached xbow.
 	 */
-	ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
-	if (ilcsr.ii_ilcsr_fld_s.i_llp_en == 1) {	/* Link is enabled */
+	if (NODEPDA(hinfo->h_cnodeid)->xbow_vhdl != 0) {
 	    printk("Hub %d, cnode %d to Xtalk Link failed (II_ECRAZY) Reason: %s", 
 		hinfo->h_nasid, hinfo->h_cnodeid, reason);
 	}
@@ -299,7 +295,7 @@
 	* Wait till hub indicates it's done.
 	*/
 	while (REMOTE_HUB_L(hinfo->h_nasid, IIO_ICDR) & IIO_ICDR_PND)
-		us_delay(1);
+		udelay(1);
 
 }
 
diff -Nru a/arch/ia64/sn/io/sn2/shubio.c b/arch/ia64/sn/io/sn2/shubio.c
--- a/arch/ia64/sn/io/sn2/shubio.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/shubio.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: shubio.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -14,7 +13,6 @@
 #include <asm/sn/sgi.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/sn_private.h>
@@ -310,8 +308,7 @@
 			if (!is_widget_pio_enabled(ioerror)) {
 				if (error_state_get(hub_v) == 
 				    ERROR_STATE_ACTION)
-					ioerror_dump("No outbound widget"
-						     " access - ", 
+					snia_ioerror_dump("No outbound widget access - ", 
 						     error_code, mode, ioerror);
 				return(IOERROR_HANDLED);
 			}
@@ -352,8 +349,7 @@
 			if (!is_widget_pio_enabled(ioerror)) {
 				if (error_state_get(hub_v) == 
 				    ERROR_STATE_ACTION)
-					ioerror_dump("No outbound widget"
-						     " access - ", 
+					snia_ioerror_dump("No outbound widget access - ", 
 						     error_code, mode, ioerror);
 				return(IOERROR_HANDLED);
 			}
@@ -478,7 +474,7 @@
 error_state_set(vertex_hdl_t v,error_state_t new_state)
 {
         error_state_t   old_state;
-        boolean_t       replace = B_TRUE;
+        int       replace = 1;
 
         /* Check if we have a valid hwgraph vertex */
         if ( v == (vertex_hdl_t)0 )
@@ -497,7 +493,7 @@
          * for this vertex.
          */
         if (v_error_state_get(v,old_state) != GRAPH_SUCCESS)
-                replace = B_FALSE;
+                replace = 0;
 
         if (v_error_state_set(v,new_state,replace) != GRAPH_SUCCESS) {
                 return(ERROR_RETURN_CODE_CANNOT_SET_STATE);
diff -Nru a/arch/ia64/sn/io/sn2/tio.c b/arch/ia64/sn/io/sn2/tio.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/tio.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,743 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/sn2/tio.h>
+
+extern xtalk_provider_t tio_provider;
+extern void tio_intr_init(vertex_hdl_t hubv);
+
+static int force_fire_and_forget = 1;
+static int ignore_conveyor_override;
+
+/*
+ * Terminology:
+ *   xtalk: generic "iobus" implementation.  The specific, HW instantiation of
+ *      xtalk is, currently, crosstalk and coretalk.  This is the coretalk provider.
+ *   widget:  xtalk bridge or device.  On crosstalk, an example would be xbridge or PIC.
+ *      On coretalk, examples would be corelets or SSP.
+ */
+
+/* 
+ * Implementation of TIO iobus operations.
+ *
+ * TIO provides a coretalk "iobus".  These routines
+ * provide a platform-specific implementation of generic xtalk.
+ *
+ * Called from corresponding xtalk_* routines.
+ */
+
+
+/* PIO MANAGEMENT */
+/* For mapping system virtual address space to xtalk space on a specified widget */
+
+/*
+ * Setup pio structures needed for a particular TIO.
+ */
+static void
+tio_pio_init(vertex_hdl_t hubv)
+{
+	xwidgetnum_t widget;
+	hubinfo_t hubinfo;
+	nasid_t nasid;
+	int bigwin;
+	hub_piomap_t hub_piomap;
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+
+	/* Initialize small window piomaps for this hub */
+	for (widget=0; widget <= TIO_WIDGET_ID_MAX; widget++) {
+		hub_piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
+		hub_piomap->hpio_xtalk_info.xp_target = widget;
+		hub_piomap->hpio_xtalk_info.xp_xtalk_addr = 0;
+		hub_piomap->hpio_xtalk_info.xp_mapsz = TIO_SWIN_SIZE;
+		hub_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)TIO_SWIN_BASE(nasid, widget);
+		hub_piomap->hpio_hub = hubv;
+		hub_piomap->hpio_flags = HUB_PIOMAP_IS_VALID;
+	}
+
+	/* Initialize big window piomaps for this hub */
+	for (bigwin=0; bigwin < TIO_NUM_BIG_WINDOW; bigwin++) {
+		hub_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);
+		hub_piomap->hpio_xtalk_info.xp_mapsz = TIO_BWIN_SIZE;
+		hub_piomap->hpio_hub = hubv;
+		hub_piomap->hpio_holdcnt = 0;
+		hub_piomap->hpio_flags = HUB_PIOMAP_IS_BIGWINDOW;
+		TIO_ITTE_DISABLE(nasid, bigwin);
+	}
+
+	spin_lock_init(&hubinfo->h_bwlock);
+/*
+ * If this lock can be acquired from interrupts or bh's, add SV_INTS or SV_BHS,
+ * respectively, to the flags here.
+ */
+	init_waitqueue_head(&hubinfo->h_bwwait);
+}
+
+/* 
+ * Create a caddr_t-to-xtalk_addr mapping.
+ *
+ * Use a small window if possible (that's the usual case), but
+ * manage big windows if needed.  Big window mappings can be
+ * either FIXED or UNFIXED -- we keep at least 1 big window available
+ * for UNFIXED mappings.
+ *
+ * Returns an opaque pointer-sized type which can be passed to
+ * other hub_pio_* routines on success, or NULL if the request
+ * cannot be satisfied.
+ */
+/* ARGSUSED */
+hub_piomap_t
+tio_piomap_alloc(vertex_hdl_t dev,	/* set up mapping for this device */
+		device_desc_t dev_desc,	/* device descriptor */
+		iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
+		size_t byte_count,
+		size_t byte_count_max, 	/* maximum size of a mapping */
+		unsigned flags)		/* defined in sys/pio.h */
+{
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
+	hubinfo_t hubinfo;
+	hub_piomap_t bw_piomap;
+	int bigwin, free_bw_index;
+	nasid_t nasid;
+	volatile hubreg_t junk;
+	caddr_t kvaddr;
+#ifdef PIOMAP_UNC_ACC_SPACE
+	uint64_t addr;
+#endif
+
+	/* sanity check */
+	if (byte_count_max > byte_count)
+		return(NULL);
+
+	hubinfo_get(hubv, &hubinfo);
+
+	/* If xtalk_addr range is mapped by a small window, we don't have 
+	 * to do much 
+	 */
+	if (xtalk_addr + byte_count <= TIO_SWIN_SIZE) {
+		hub_piomap_t piomap;
+
+		piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
+#ifdef PIOMAP_UNC_ACC_SPACE
+		if (flags & PIOMAP_UNC_ACC) {
+			addr = (uint64_t)piomap->hpio_xtalk_info.xp_kvaddr;
+			addr |= PIOMAP_UNC_ACC_SPACE;
+			piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)addr;
+		}
+#endif
+		return piomap;
+	}
+
+	/* We need to use a big window mapping.  */
+
+	/*
+	 * TBD: Allow requests that would consume multiple big windows --
+	 * split the request up and use multiple mapping entries.
+	 * For now, reject requests that span big windows.
+	 */
+	if ((xtalk_addr % TIO_BWIN_SIZE) + byte_count > TIO_BWIN_SIZE)
+		return(NULL);
+
+
+	/* Round xtalk address down for big window alignement */
+	xtalk_addr = xtalk_addr & ~(TIO_BWIN_SIZE-1);
+
+	/*
+	 * Check to see if an existing big window mapping will suffice.
+	 */
+tryagain:
+	free_bw_index = -1;
+	spin_lock(&hubinfo->h_bwlock);
+	for (bigwin=0; bigwin < TIO_NUM_BIG_WINDOW; bigwin++) {
+		bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);
+
+		/* If mapping is not valid, skip it */
+		if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
+			free_bw_index = bigwin;
+			continue;
+		}
+
+		/* 
+		 * If mapping is UNFIXED, skip it.  We don't allow sharing
+		 * of UNFIXED mappings, because this would allow starvation.
+		 */
+		if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
+			continue;
+
+		if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
+		     widget == bw_piomap->hpio_xtalk_info.xp_target) {
+			bw_piomap->hpio_holdcnt++;
+			spin_unlock(&hubinfo->h_bwlock);
+			return(bw_piomap);
+		}
+	}
+
+	/*
+	 * None of the existing big window mappings will work for us --
+	 * we need to establish a new mapping.
+	 */
+
+	/* Insure that we don't consume all big windows with FIXED mappings */
+	if (flags & PIOMAP_FIXED) {
+		if (hubinfo->h_num_big_window_fixed < TIO_NUM_BIG_WINDOW-1) {
+			ASSERT(free_bw_index >= 0);
+			hubinfo->h_num_big_window_fixed++;
+		} else {
+			bw_piomap = NULL;
+			goto done;
+		}
+	} else /* PIOMAP_UNFIXED */ {
+		if (free_bw_index < 0) {
+			if (flags & PIOMAP_NOSLEEP) {
+				bw_piomap = NULL;
+				goto done;
+			} else {
+				DECLARE_WAITQUEUE(wait, current);
+				spin_unlock(&hubinfo->h_bwlock);
+				set_current_state(TASK_UNINTERRUPTIBLE);
+				add_wait_queue_exclusive(&hubinfo->h_bwwait, &wait);
+				schedule();
+				remove_wait_queue(&hubinfo->h_bwwait, &wait);
+				goto tryagain;
+			}
+		}
+	}
+
+
+	/* OK!  Allocate big window free_bw_index for this mapping. */
+ 	/* 
+	 * The code below does a PIO write to setup an ITTE entry.
+	 * We need to prevent other CPUs from seeing our updated memory 
+	 * shadow of the ITTE (in the piomap) until the ITTE entry is 
+	 * actually set up; otherwise, another CPU might attempt a PIO 
+	 * prematurely.  
+	 *
+	 * Also, the only way we can know that an entry has been received 
+	 * by the hub and can be used by future PIO reads/writes is by 
+	 * reading back the ITTE entry after writing it.
+	 *
+	 * For these two reasons, we PIO read back the ITTE entry after
+	 * we write it.
+	 */
+
+	nasid = hubinfo->h_nasid;
+	TIO_ITTE_PUT(nasid, free_bw_index, widget, xtalk_addr, 1);	
+	junk = HUB_L(TIO_ITTE_GET(nasid, free_bw_index));
+
+	bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
+	bw_piomap->hpio_xtalk_info.xp_dev = dev;
+	bw_piomap->hpio_xtalk_info.xp_target = widget;
+	bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
+	kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
+#ifdef PIOMAP_UNC_ACC_SPACE
+	if (flags & PIOMAP_UNC_ACC) {
+		addr = (uint64_t)kvaddr;
+		addr |= PIOMAP_UNC_ACC_SPACE;
+		kvaddr = (caddr_t)addr;
+	}
+#endif
+	bw_piomap->hpio_xtalk_info.xp_kvaddr = kvaddr;
+	bw_piomap->hpio_holdcnt++;
+	bw_piomap->hpio_bigwin_num = free_bw_index;
+
+	if (flags & PIOMAP_FIXED)
+		bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
+	else
+		bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;
+
+done:
+	spin_unlock(&hubinfo->h_bwlock);
+	return(bw_piomap);
+}
+
+/*
+ * tio_piomap_free destroys a caddr_t-to-xtalk pio mapping and frees
+ * any associated mapping resources.  
+ *
+ * If this * piomap was handled with a small window, or if it was handled
+ * in a big window that's still in use by someone else, then there's 
+ * nothing to do.  On the other hand, if this mapping was handled 
+ * with a big window, AND if we were the final user of that mapping, 
+ * then destroy the mapping.
+ */
+void
+tio_piomap_free(hub_piomap_t hub_piomap)
+{
+	vertex_hdl_t hubv;
+	hubinfo_t hubinfo;
+	nasid_t nasid;
+
+	/* 
+	 * Small windows are permanently mapped to corresponding widgets,
+	 * so there're no resources to free.
+	 */
+	if (!(hub_piomap->hpio_flags & HUB_PIOMAP_IS_BIGWINDOW))
+		return;
+
+	ASSERT(hub_piomap->hpio_flags & HUB_PIOMAP_IS_VALID);
+	ASSERT(hub_piomap->hpio_holdcnt > 0);
+
+	hubv = hub_piomap->hpio_hub;
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+
+	spin_lock(&hubinfo->h_bwlock);
+
+	/*
+	 * If this is the last hold on this mapping, free it.
+	 */
+	if (--hub_piomap->hpio_holdcnt == 0) {
+		TIO_ITTE_DISABLE(nasid, hub_piomap->hpio_bigwin_num );
+
+		if (hub_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED) {
+			hub_piomap->hpio_flags &= ~(HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED);
+			hubinfo->h_num_big_window_fixed--;
+			ASSERT(hubinfo->h_num_big_window_fixed >= 0);
+		} else
+			hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID;
+
+		wake_up(&hubinfo->h_bwwait);
+	}
+
+	spin_unlock(&hubinfo->h_bwlock);
+}
+
+/*
+ * Establish a mapping to a given xtalk address range using the resources
+ * allocated earlier.
+ */
+caddr_t
+tio_piomap_addr(hub_piomap_t hub_piomap,	/* mapping resources */
+		iopaddr_t xtalk_addr,		/* map for this xtalk address */
+		size_t byte_count)		/* map this many bytes */
+{
+	/* Verify that range can be mapped using the specified piomap */
+	if (xtalk_addr < hub_piomap->hpio_xtalk_info.xp_xtalk_addr)
+		return(0);
+
+	if (xtalk_addr + byte_count > 
+		( hub_piomap->hpio_xtalk_info.xp_xtalk_addr + 
+			hub_piomap->hpio_xtalk_info.xp_mapsz))
+		return(0);
+
+	if (hub_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)
+		return(hub_piomap->hpio_xtalk_info.xp_kvaddr + 
+			(xtalk_addr % hub_piomap->hpio_xtalk_info.xp_mapsz));
+	else
+		return(0);
+}
+
+
+/*
+ * Driver indicates that it's done with PIO's from an earlier piomap_addr.
+ */
+/* ARGSUSED */
+void
+tio_piomap_done(hub_piomap_t hub_piomap)	/* done with these mapping resources */
+{
+	/* Nothing to do */
+}
+
+
+/*
+ * For translations that require no mapping resources, supply a kernel virtual
+ * address that maps to the specified xtalk address range.
+ */
+/* ARGSUSED */
+caddr_t
+tio_piotrans_addr(	vertex_hdl_t dev,	/* translate to this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			iopaddr_t xtalk_addr,	/* Crosstalk address */
+			size_t byte_count,	/* map this many bytes */
+			unsigned flags)		/* (currently unused) */
+{
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
+	hub_piomap_t hub_piomap;
+	hubinfo_t hubinfo;
+	caddr_t addr;
+
+	hubinfo_get(hubv, &hubinfo);
+
+	if (xtalk_addr + byte_count <= TIO_SWIN_SIZE) {
+		hub_piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
+		addr = hub_piomap_addr(hub_piomap, xtalk_addr, byte_count);
+#ifdef PIOMAP_UNC_ACC_SPACE
+		if (flags & PIOMAP_UNC_ACC) {
+			uint64_t iaddr;
+			iaddr = (uint64_t)addr;
+			iaddr |= PIOMAP_UNC_ACC_SPACE;
+			addr = (caddr_t)iaddr;
+		}
+#endif
+		return(addr);
+	} else
+		return(0);
+}
+
+
+/* DMA MANAGEMENT */
+/* Mapping from crosstalk space to system physical space */
+
+
+/*
+ * Allocate resources needed to set up DMA mappings up to a specified size
+ * on a specified adapter.
+ * 
+ * We don't actually use the adapter ID for anything.  It's just the adapter
+ * that the lower level driver plans to use for DMA.
+ */
+/* ARGSUSED */
+hub_dmamap_t
+tio_dmamap_alloc(	vertex_hdl_t dev,	/* set up mappings for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			size_t byte_count_max, 	/* max size of a mapping */
+			unsigned flags)		/* defined in dma.h */
+{
+	hub_dmamap_t dmamap;
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
+
+	dmamap = kmalloc(sizeof(struct hub_dmamap_s), GFP_ATOMIC);
+	if (!dmamap)
+		return NULL;
+	dmamap->hdma_xtalk_info.xd_dev = dev;
+	dmamap->hdma_xtalk_info.xd_target = widget;
+	dmamap->hdma_hub = hubv;
+	dmamap->hdma_flags = HUB_DMAMAP_IS_VALID;
+ 	if (flags & XTALK_FIXED)
+		dmamap->hdma_flags |= HUB_DMAMAP_IS_FIXED;
+
+	return(dmamap);
+}
+
+/*
+ * Destroy a DMA mapping from crosstalk space to system address space.
+ * There is no actual mapping hardware to destroy, but we at least mark
+ * the dmamap INVALID and free the space that it took.
+ */
+void
+tio_dmamap_free(hub_dmamap_t hub_dmamap)
+{
+	hub_dmamap->hdma_flags &= ~HUB_DMAMAP_IS_VALID;
+	kfree(hub_dmamap);
+}
+
+/*
+ * Establish a DMA mapping using the resources allocated in a previous dmamap_alloc.
+ * Return an appropriate crosstalk address range that maps to the specified physical 
+ * address range.
+ */
+/* ARGSUSED */
+extern iopaddr_t
+tio_dmamap_addr(	hub_dmamap_t dmamap,	/* use these mapping resources */
+			paddr_t paddr,		/* map for this address */
+			size_t byte_count)	/* map this many bytes */
+{
+	vertex_hdl_t vhdl;
+
+	ASSERT(dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
+
+	if (dmamap->hdma_flags & HUB_DMAMAP_USED) {
+	    /* If the map is FIXED, re-use is OK. */
+	    if (!(dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
+		vhdl = dmamap->hdma_xtalk_info.xd_dev;
+		printk(KERN_WARNING  "%s: hub_dmamap_addr re-uses dmamap.\n",
+						vertex_to_name(vhdl, name, MAXDEVNAME));
+	    }
+	} else {
+		dmamap->hdma_flags |= HUB_DMAMAP_USED;
+	}
+
+	/* There isn't actually any DMA mapping hardware on the hub. */
+#ifdef CONFIG_IA64_SGI_SN2
+        return( (PHYS_TO_DMA(paddr)) );
+#else
+        /* no translation needed */
+        return(paddr);
+#endif
+}
+
+/*
+ * Establish a DMA mapping using the resources allocated in a previous dmamap_alloc.
+ * Return an appropriate crosstalk address list that maps to the specified physical 
+ * address list.
+ */
+/* ARGSUSED */
+alenlist_t
+tio_dmamap_list(hub_dmamap_t hub_dmamap,	/* use these mapping resources */
+		alenlist_t palenlist,		/* map this area of memory */
+		unsigned flags)
+{
+	vertex_hdl_t vhdl;
+
+	ASSERT(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
+
+	if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
+	    /* If the map is FIXED, re-use is OK. */
+	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
+		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
+		printk(KERN_WARNING  "%s: hub_dmamap_list re-uses dmamap\n",
+					vertex_to_name(vhdl, name, MAXDEVNAME));
+	    }
+	} else {
+		hub_dmamap->hdma_flags |= HUB_DMAMAP_USED;
+	}
+
+	/* There isn't actually any DMA mapping hardware on the hub.  */
+	return(palenlist);
+}
+
+/*
+ * Driver indicates that it has completed whatever DMA it may have started
+ * after an earlier dmamap_addr or dmamap_list call.
+ */
+void
+tio_dmamap_done(hub_dmamap_t hub_dmamap)	/* done with these mapping resources */
+{
+	vertex_hdl_t vhdl;
+
+	if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
+		hub_dmamap->hdma_flags &= ~HUB_DMAMAP_USED;
+	} else {
+	    /* If the map is FIXED, re-done is OK. */
+	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		char name[MAXDEVNAME];
+		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
+		printk(KERN_WARNING  "%s: hub_dmamap_done already done with dmamap\n",
+						vertex_to_name(vhdl, name, MAXDEVNAME));
+	    }
+	}
+}
+
+/*
+ * Translate a single system physical address into a crosstalk address.
+ */
+/* ARGSUSED */
+iopaddr_t
+tio_dmatrans_addr(	vertex_hdl_t dev,	/* translate for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			paddr_t paddr,		/* system physical address */
+			size_t byte_count,	/* length */
+			unsigned flags)		/* defined in dma.h */
+{
+#ifdef CONFIG_IA64_SGI_SN2
+	return( (PHYS_TO_DMA(paddr)) );
+#else
+	/* no translation needed */
+	return(paddr);
+#endif
+}
+
+/*
+ * Translate a list of IP27 addresses and lengths into a list of crosstalk 
+ * addresses and lengths.  No actual hardware mapping takes place; the hub 
+ * has no DMA mapping registers -- crosstalk addresses map directly.
+ */
+/* ARGSUSED */
+alenlist_t
+tio_dmatrans_list(	vertex_hdl_t dev,	/* translate for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			alenlist_t palenlist,	/* system address/length list */
+			unsigned flags)		/* defined in dma.h */
+{
+	BUG();
+	/* no translation needed */
+	return(palenlist);
+}
+
+/*ARGSUSED*/
+void
+tio_dmamap_drain(	hub_dmamap_t map)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+/*ARGSUSED*/
+void
+tio_dmaaddr_drain(	vertex_hdl_t vhdl,
+			paddr_t addr,
+			size_t bytes)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+/*ARGSUSED*/
+void
+tio_dmalist_drain(	vertex_hdl_t vhdl,
+			alenlist_t list)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+
+
+/* CONFIGURATION MANAGEMENT */
+
+/*
+ * Perform initializations that allow this hub to start crosstalk support.
+ */
+void
+tio_provider_startup(vertex_hdl_t hubv)
+{
+	tio_pio_init(hubv);
+	tio_intr_init(hubv);
+}
+
+/*
+ * Shutdown crosstalk support from a hub.
+ */
+void
+tio_provider_shutdown(vertex_hdl_t hub)
+{
+	/* TBD */
+	xtalk_provider_unregister(hub);
+}
+
+/*
+ * Check that an address is in teh real small window widget 0 space
+ * or else in the big window we're using to emulate small window 0
+ * in the kernel.
+ */
+int
+tio_check_is_widget0(void *addr)
+{
+	nasid_t nasid = NASID_GET(addr);
+
+	if (((__psunsigned_t)addr >= RAW_TIO_SWIN_BASE(nasid, 0)) &&
+	    ((__psunsigned_t)addr < RAW_TIO_SWIN_BASE(nasid, 1)))
+		return 1;
+	return 0;
+}
+
+
+
+/*
+ * tio_setup_prb(nasid, prbnum, credits, conveyor)
+ *
+ * 	Put a PRB into fire-and-forget mode if conveyor isn't set.  Otehrwise,
+ * 	put it into conveyor belt mode with the specified number of credits.
+ */
+void
+tio_setup_prb(nasid_t nasid, int prbnum, int credits, int conveyor)
+{
+	iprb_t prb;
+	int prb_offset;
+/* ZZZZ FIXME */
+	return;
+
+	if (force_fire_and_forget && !ignore_conveyor_override)
+	    if (conveyor == HUB_PIO_CONVEYOR)
+		conveyor = HUB_PIO_FIRE_N_FORGET;
+
+	/*
+	 * Get the current register value.
+	 */
+	prb_offset = IIO_IOPRB(prbnum);
+	prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
+
+	/*
+	 * Clear out some fields.
+	 */
+	prb.iprb_ovflow = 1;
+	prb.iprb_bnakctr = 0;
+	prb.iprb_anakctr = 0;
+
+	/*
+	 * Enable or disable fire-and-forget mode.
+	 */
+	prb.iprb_ff = ((conveyor == HUB_PIO_CONVEYOR) ? 0 : 1);
+
+	/*
+	 * Set the appropriate number of PIO cresits for the widget.
+	 */
+	prb.iprb_xtalkctr = credits;
+
+	/*
+	 * Store the new value to the register.
+	 */
+	REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
+}
+
+/* Interface to allow special drivers to set hub specific
+ * device flags.
+ * Return 0 on failure , 1 on success
+ */
+int
+tio_widget_flags_set(nasid_t		nasid,
+		     xwidgetnum_t	widget_num,
+		     hub_widget_flags_t	flags)
+{
+
+/* ZZZZZ FIXME */
+	return 0;
+#if not_yet
+	ASSERT((flags & HUB_WIDGET_FLAGS) == flags);
+
+	if (flags & HUB_PIO_CONVEYOR) {
+		hub_setup_prb(nasid,widget_num,
+			      3,HUB_PIO_CONVEYOR); /* set the PRB in conveyor 
+						    * belt mode with 3 credits
+						    */
+	} else if (flags & HUB_PIO_FIRE_N_FORGET) {
+		hub_setup_prb(nasid,widget_num,
+			      3,HUB_PIO_FIRE_N_FORGET); /* set the PRB in fire
+							 *  and forget mode 
+							 */
+	}
+
+	return 1;
+#endif
+}
+
+/*
+ * A pointer to this structure hangs off of every hub hwgraph vertex.
+ * The generic xtalk layer may indirect through it to get to this specific
+ * crosstalk bus provider.
+ */
+xtalk_provider_t tio_provider = {
+	(xtalk_piomap_alloc_f *)	tio_piomap_alloc,
+	(xtalk_piomap_free_f *)		tio_piomap_free,
+	(xtalk_piomap_addr_f *)		tio_piomap_addr,
+	(xtalk_piomap_done_f *)		tio_piomap_done,
+	(xtalk_piotrans_addr_f *)	tio_piotrans_addr,
+
+	(xtalk_dmamap_alloc_f *)	tio_dmamap_alloc,
+	(xtalk_dmamap_free_f *)		tio_dmamap_free,
+	(xtalk_dmamap_addr_f *)		tio_dmamap_addr,
+	(xtalk_dmamap_list_f *)		tio_dmamap_list,
+	(xtalk_dmamap_done_f *)		tio_dmamap_done,
+	(xtalk_dmatrans_addr_f *)	tio_dmatrans_addr,
+	(xtalk_dmatrans_list_f *)	tio_dmatrans_list,
+	(xtalk_dmamap_drain_f *)	tio_dmamap_drain,
+	(xtalk_dmaaddr_drain_f *)	tio_dmaaddr_drain,
+	(xtalk_dmalist_drain_f *)	tio_dmalist_drain,
+
+	(xtalk_intr_alloc_f *)		tio_intr_alloc,
+	(xtalk_intr_alloc_f *)		tio_intr_alloc_nothd,
+	(xtalk_intr_free_f *)		tio_intr_free,
+	(xtalk_intr_connect_f *)	tio_intr_connect,
+	(xtalk_intr_disconnect_f *)	tio_intr_disconnect,
+	(xtalk_provider_startup_f *)	tio_provider_startup,
+	(xtalk_provider_shutdown_f *)	tio_provider_shutdown,
+};
diff -Nru a/arch/ia64/sn/io/sn2/tio_intr.c b/arch/ia64/sn/io/sn2/tio_intr.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/tio_intr.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,166 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn2/tio.h>
+#include <asm/sn/sn2/iceio.h>
+
+/* ARGSUSED */
+void
+tio_intr_init(vertex_hdl_t hubv)
+{
+}
+
+
+static hub_intr_t
+do_tio_intr_alloc(vertex_hdl_t dev,
+		device_desc_t dev_desc,
+		vertex_hdl_t owner_dev,
+		int uncond_nothread)
+{
+	cpuid_t		cpu;
+	int		vector;
+	hub_intr_t	intr_hdl;
+	cnodeid_t	cnode;
+	int		cpuphys, slice;
+	int		nasid;
+	iopaddr_t	xtalk_addr;
+	struct xtalk_intr_s	*xtalk_info;
+	xwidget_info_t	xwidget_info;
+
+	cpu = tio_intr_heuristic(dev, -1, &vector);
+
+	if (cpu == CPU_NONE) {
+		printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
+		return(0);
+	}
+
+	cpuphys = cpu_physical_id(cpu);
+	slice = cpu_physical_id_to_slice(cpuphys);
+	nasid = cpu_physical_id_to_nasid(cpuphys);
+	cnode = cpuid_to_cnodeid(cpu);
+
+	if (slice) {
+		xtalk_addr = TIO_INT1 | ((unsigned long)nasid << 38) | (1UL << 35);
+	} else {
+		xtalk_addr = TIO_INT0 | ((unsigned long)nasid << 38) | (1UL << 35);
+	}
+
+	intr_hdl = kmalloc(sizeof(struct hub_intr_s), GFP_KERNEL);
+	ASSERT_ALWAYS(intr_hdl);
+	memset(intr_hdl, 0, sizeof(struct hub_intr_s));
+
+	xtalk_info = &intr_hdl->i_xtalk_info;
+	xtalk_info->xi_dev = dev;
+	xtalk_info->xi_vector = vector;
+	xtalk_info->xi_addr = xtalk_addr;
+
+	xwidget_info = xwidget_info_get(dev);
+	if (xwidget_info) {
+		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
+	}
+
+	intr_hdl->i_cpuid = cpu;
+	intr_hdl->i_bit = vector;
+	intr_hdl->i_flags |= HUB_INTR_IS_ALLOCED;
+
+	return intr_hdl;
+}
+
+hub_intr_t
+tio_intr_alloc(vertex_hdl_t dev,
+		device_desc_t dev_desc,
+		vertex_hdl_t owner_dev)
+{
+	return(do_tio_intr_alloc(dev, dev_desc, owner_dev, 0));
+}
+
+hub_intr_t
+tio_intr_alloc_nothd(vertex_hdl_t dev,
+		device_desc_t dev_desc,
+		vertex_hdl_t owner_dev)
+{
+	return(do_tio_intr_alloc(dev, dev_desc, owner_dev, 1));
+}
+
+void
+tio_intr_free(hub_intr_t intr_hdl)
+{
+	cpuid_t		cpu = intr_hdl->i_cpuid;
+	int		vector = intr_hdl->i_bit;
+	xtalk_intr_t	xtalk_info;
+
+	if (intr_hdl->i_flags & HUB_INTR_IS_CONNECTED) {
+		xtalk_info = &intr_hdl->i_xtalk_info;
+		xtalk_info->xi_dev = 0;
+		xtalk_info->xi_vector = 0;
+		xtalk_info->xi_addr = 0;
+		tio_intr_disconnect(intr_hdl);
+	}
+
+	if (intr_hdl->i_flags & HUB_INTR_IS_ALLOCED) {
+		kfree(intr_hdl);
+	}
+	intr_unreserve_level(cpu, vector);
+}
+
+int
+tio_intr_connect(hub_intr_t intr_hdl,
+		intr_func_t intr_func,          /* xtalk intr handler */
+		void *intr_arg,                 /* arg to intr handler */
+		xtalk_intr_setfunc_t setfunc,
+		void *setfunc_arg)
+{
+	int		rv;
+	cpuid_t		cpu = intr_hdl->i_cpuid;
+	int 		vector = intr_hdl->i_bit;
+
+	ASSERT(intr_hdl->i_flags & HUB_INTR_IS_ALLOCED);
+
+	rv = intr_connect_level(cpu, vector);
+	if (rv < 0) {
+		return rv;
+	}
+
+	intr_hdl->i_xtalk_info.xi_setfunc = setfunc;
+	intr_hdl->i_xtalk_info.xi_sfarg = setfunc_arg;
+
+	if (setfunc) {
+		(*setfunc)((xtalk_intr_t)intr_hdl);
+	}
+
+	intr_hdl->i_flags |= HUB_INTR_IS_CONNECTED;
+
+	return 0;
+}
+
+/*
+ * Disassociate handler with the specified interrupt.
+ */
+void
+tio_intr_disconnect(hub_intr_t intr_hdl)
+{
+	/*REFERENCED*/
+	int rv;
+	cpuid_t cpu = intr_hdl->i_cpuid;
+	int bit = intr_hdl->i_bit;
+	xtalk_intr_setfunc_t setfunc;
+
+	setfunc = intr_hdl->i_xtalk_info.xi_setfunc;
+
+	/* TBD: send disconnected interrupts somewhere harmless */
+	if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
+
+	rv = intr_disconnect_level(cpu, bit);
+	ASSERT(rv == 0);
+	intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
+}
diff -Nru a/arch/ia64/sn/io/sn2/tiocp.c b/arch/ia64/sn/io/sn2/tiocp.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/sn2/tiocp.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,963 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/pci_defs.h>
+#include <asm/sn/pci/tiocp.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_private.h>
+
+extern struct file_operations pcibr_fops;
+extern pcibr_list_p pcibr_list;
+
+extern int isIO9(nasid_t);
+extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
+extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
+extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
+extern unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines,
+				int nslots);
+extern void pcibr_setwidint(xtalk_intr_t);
+extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t,
+			       ioerror_t *);
+extern void pcibr_error_intr_handler(intr_arg_t);
+extern void pcibr_directmap_init(pcibr_soft_t);
+extern int pcibr_slot_info_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_addr_space_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
+extern int pcibr_slot_guest_info_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_call_device_attach(vertex_hdl_t, pciio_slot_t, int);
+extern void pcibr_rrb_alloc_init(pcibr_soft_t, int, int, int);
+extern int pcibr_pcix_rbars_calc(pcibr_soft_t);
+extern pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t,
+					  pciio_function_t, pciio_vendor_id_t,
+					  pciio_device_id_t);
+extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+extern void xwidget_error_register(vertex_hdl_t, error_handler_f *,
+				   error_handler_arg_t);
+
+int
+tiocp_attach(vertex_hdl_t conn_vhdl)
+{
+	graph_error_t rc;
+	pci_bridge_t *bridge;
+	vertex_hdl_t pcibr_vhdl;
+	vertex_hdl_t ctlr_vhdl;
+	pcibr_soft_t pcibr_soft;
+	pcibr_info_t pcibr_info;
+	xwidget_info_t info;
+	xtalk_intr_t xtalk_intr;
+	int irq, cpu;
+	int corelet;
+	pcibr_list_p self;
+	int entry, slot, ibit, i;
+	vertex_hdl_t noslot_conn;
+	char devnm[MAXDEVNAME], *s;
+	pcibr_hints_t pcibr_hints;
+	tiocp_reg_t id;
+	tiocp_reg_t int_enable;
+	tiocp_reg_t tiocp_ctrl_reg;
+
+	int iobrick_type_get_nasid(nasid_t nasid);
+	int iobrick_module_get_nasid(nasid_t nasid);
+
+	iopaddr_t prom_base_addr;
+	int prom_base_size = 0x1000000;
+	int status;
+	struct resource *res;
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_vhdl, "tiocp_attach()\n"));
+
+	bridge = pcibr_bridge_ptr_get(conn_vhdl, 0);
+	corelet = TIO_SWIN_WIDGETNUM((uint64_t)bridge);
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_vhdl,
+			    "tiocp_attach: bridge=0x%lx, corelet=0x%x\n", 
+			    bridge, corelet));
+	/*
+	 * Create the vertex for the PCI buses, which we will also use to 
+	 * hold the pcibr_soft and which will be the "master" vertex for
+	 * all the pciio connection points we will hang off it.
+	 */
+	rc = hwgraph_path_add(conn_vhdl, EDGE_LBL_PCIX_0, &pcibr_vhdl);
+	ASSERT(rc == GRAPH_SUCCESS);
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_vhdl,
+			    "tiocp_attach: pcibr_vhdl=0x%lx\n", pcibr_vhdl));
+
+	/* register pci provider array */
+	pciio_provider_register(pcibr_vhdl, &pci_tiocp_provider);
+	pciio_provider_startup(pcibr_vhdl);
+
+	ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
+				     0, 0, 0,
+				     S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0,
+				     0, (struct file_operations *) &pcibr_fops,
+				     (void *) pcibr_vhdl);
+	ASSERT(ctlr_vhdl != NULL);
+
+	id = pcireg_id_get(bridge);
+	hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV,
+			     (arbitrary_info_t) XWIDGET_PART_REV_NUM(id));
+
+	/*
+	 * Get the hint structure; if some NIC callback marked this vertex as
+	 * "hands-off" then we just return here, before doing anything else.
+	 */
+	pcibr_hints = pcibr_hints_get(conn_vhdl, 0);
+
+	if (pcibr_hints && pcibr_hints->ph_hands_off) {
+		return -1;
+	}
+
+	/* allocate soft structure to hang off the vertex.  Link the new soft
+	 * structure to the pcibr_list linked list
+	 */
+	pcibr_soft = kmalloc(sizeof(*pcibr_soft), GFP_KERNEL);
+	if (!pcibr_soft)
+		return -ENOMEM;
+	self = kmalloc(sizeof(*self), GFP_KERNEL);
+	if (!self) {
+		kfree(pcibr_soft);
+		return -ENOMEM;
+	}
+	self->bl_soft = pcibr_soft;
+	self->bl_vhdl = pcibr_vhdl;
+	self->bl_next = pcibr_list;
+	pcibr_list = self;
+
+	memset(pcibr_soft, 0, sizeof *pcibr_soft);
+	pcibr_soft_set(pcibr_vhdl, pcibr_soft);
+
+	s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
+	pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
+	if (!pcibr_soft->bs_name)
+		return -ENOMEM;
+	strcpy(pcibr_soft->bs_name, s);
+
+	pcibr_soft->bs_conn = conn_vhdl;
+	pcibr_soft->bs_vhdl = pcibr_vhdl;
+	pcibr_soft->bs_base = (void *) bridge;
+	pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id);
+	pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *) pcibr_intr_bits;
+	pcibr_soft->bsi_err_intr = 0;
+	pcibr_soft->bs_min_slot = 0;
+	pcibr_soft->bs_max_slot = 3;
+	pcibr_soft->bs_busnum = 0;
+	pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_TIOCP;
+	pcibr_soft->bs_int_ate_size = TIOCP_INTERNAL_ATES;
+#if 0	/* habeck:   hack until medusa can read all 256 bytes of cfg space */
+	pcibr_soft->bs_bridge_mode =
+	    (pcireg_speed_get(pcibr_soft) << 1) | pcireg_mode_get(pcibr_soft);
+#else
+	pcibr_soft->bs_bridge_mode = PCIBR_BRIDGEMODE_PCI_66;
+#endif
+
+	info = xwidget_info_get(conn_vhdl);
+	pcibr_soft->bs_xid = xwidget_info_id_get(info);
+	pcibr_soft->bs_master = xwidget_info_master_get(info);
+	pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
+
+	strcpy(pcibr_soft->bs_asic_name, "TIOCP");
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+			    "tiocp_attach: pcibr_soft=0x%lx, mode=0x%x\n",
+			    pcibr_soft, pcibr_soft->bs_bridge_mode));
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+			    "tiocp_attach: %s ASIC: rev %s (code=0x%x)\n",
+			    pcibr_soft->bs_asic_name,
+			    (IS_TIOCP_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A"
+			    : (IS_TIOCP_PART_REV_B(pcibr_soft->bs_rev_num)) ?
+			    "B" : "unknown", pcibr_soft->bs_rev_num));
+
+	/* PV854845: Must clear write request buffer to avoid parity errors */
+	for (i = 0; i < TIOCP_WR_REQ_BUFSIZE; i++) {
+		((tiocp_t *) bridge)->cp_wr_req_lower[i] = 0;
+		((tiocp_t *) bridge)->cp_wr_req_upper[i] = 0;
+		((tiocp_t *) bridge)->cp_wr_req_parity[i] = 0;
+	}
+
+	pcibr_soft->bs_nasid = NASID_GET(bridge);
+
+	pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid);
+	if (pcibr_soft->bs_bricktype < 0) {
+		KERN_MSG(K_WARN,
+			 "%s: bricktype was unknown by L1 (ret val = 0x%x)\n",
+			 pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
+	}
+
+	pcibr_soft->bs_moduleid = iobrick_module_get_nasid(pcibr_soft->bs_nasid);
+
+	if (pcibr_soft->bs_bricktype > 0) {
+		switch (pcibr_soft->bs_bricktype) {
+		case MODULE_IABRICK:
+		case MODULE_PABRICK:
+			pcibr_soft->bs_first_slot = 0;
+			/* 
+			 * CP0 has 1 slot (the slot is BaseIO enabled)
+			 * CP1 has 2 slots.
+			 */
+			if (corelet == 0) {
+				if ((pcibr_widget_to_bus(pcibr_vhdl) == 1)
+				    && isIO9(pcibr_soft->bs_nasid)) {
+					pcibr_soft->bs_last_slot = 3;
+					pcibr_soft->bs_last_reset = 3;
+				} else {
+					pcibr_soft->bs_last_slot = 0;
+					pcibr_soft->bs_last_reset = 0;
+				}
+			} else {    /* corelet 1 */
+				pcibr_soft->bs_last_slot = 1;
+				pcibr_soft->bs_last_reset = 1;
+			}
+			break;
+
+		case MODULE_BUBRICK:	/* TIO brinup brick */
+			pcibr_soft->bs_first_slot = 0;
+			/* 
+			 * CP1 has 2 slots (slot 1 is BaseIO enabled)
+			 * CP0 has 2 slots.
+		 	 */
+			if (corelet == 1) {
+				if ((pcibr_widget_to_bus(pcibr_vhdl) == 1)
+				    && isIO9(pcibr_soft->bs_nasid)) {
+					pcibr_soft->bs_last_slot = 3;
+					pcibr_soft->bs_last_reset = 3;
+				} else {
+					pcibr_soft->bs_last_slot = 1;
+					pcibr_soft->bs_last_reset = 1;
+				}
+			} else {    /* corelet 1 */
+				pcibr_soft->bs_last_slot = 1;
+				pcibr_soft->bs_last_reset = 1;
+			}
+			break;
+
+		default:
+			KERN_MSG(K_WARN, "%s: Unknown bricktype: 0x%x\n",
+				 pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
+			break;
+		}
+
+		PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
+				    "tiocp_attach: bricktype=%d, brickbus=%d, "
+				    "slots %d-%d\n", pcibr_soft->bs_bricktype,
+				    pcibr_widget_to_bus(pcibr_vhdl),
+				    pcibr_soft->bs_first_slot,
+				    pcibr_soft->bs_last_slot));
+	}
+
+	/*
+	 * Initialize bridge and bus locks
+	 */
+	spin_lock_init(&pcibr_soft->bs_lock);
+#ifdef PCI_HOTPLUG
+	mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
+#endif
+
+	/*
+	 * If we have one, process the hints structure.
+	 */
+	if (pcibr_hints) {
+		unsigned rrb_fixed;
+		PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
+				    "tiocp_attach: pcibr_hints=0x%lx\n",
+				    pcibr_hints));
+
+		rrb_fixed = pcibr_hints->ph_rrb_fixed;
+
+		pcibr_soft->bs_rrb_fixed = rrb_fixed;
+
+		if (pcibr_hints->ph_intr_bits) {
+			pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
+		}
+
+		for (slot = pcibr_soft->bs_min_slot;
+		     slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+			int hslot = pcibr_hints->ph_host_slot[slot] - 1;
+
+			if (hslot < 0) {
+				pcibr_soft->bs_slot[slot].host_slot = slot;
+			} else {
+				pcibr_soft->bs_slot[slot].has_host = 1;
+				pcibr_soft->bs_slot[slot].host_slot = hslot;
+			}
+		}
+	}
+
+	/*
+	 * Set-up initial values for state fields
+	 */
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		pcibr_soft->bs_slot[slot].bss_devio.bssd_space =
+		    PCIIO_SPACE_NONE;
+		pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
+		pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
+		pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
+		pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
+	}
+
+	for (ibit = 0; ibit < 8; ++ibit) {
+		pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft =
+		    pcibr_soft;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
+		pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
+	}
+
+	xwidget_error_register(conn_vhdl, pcibr_error_handler, pcibr_soft);
+
+	/*
+	 * Clear all pending interrupts.  Assume all interrupts are from slot 
+	 * 3 until otherise setup.
+	 */
+	pcireg_intr_reset_set(pcibr_soft, PCIBR_IRR_ALL_CLR);
+	pcireg_intr_device_set(pcibr_soft, 0x006db6db);
+
+	/* Setup the mapping register used for direct mapping */
+	pcibr_directmap_init(pcibr_soft);
+
+	/*
+	 * Initialize the TIOCPs control register.
+	 */
+	tiocp_ctrl_reg = pcireg_control_get(pcibr_soft);
+
+	/* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
+	tiocp_ctrl_reg &= ~TIOCP_CTRL_BUS_NUM_MASK;
+	tiocp_ctrl_reg |= TIOCP_CTRL_BUS_NUM(0);
+	tiocp_ctrl_reg &= ~TIOCP_CTRL_DEV_NUM_MASK;
+	tiocp_ctrl_reg &= ~TIOCP_CTRL_FUN_NUM_MASK;
+
+	tiocp_ctrl_reg &= ~TIOCP_CTRL_NO_SNOOP;
+	tiocp_ctrl_reg &= ~TIOCP_CTRL_RELAX_ORDER;
+
+	/* enable parity checking on TIOCPs internal RAM */
+	tiocp_ctrl_reg |= TIOCP_CTRL_PAR_EN_RESP;
+	tiocp_ctrl_reg |= TIOCP_CTRL_PAR_EN_ATE;
+	tiocp_ctrl_reg |= TIOCP_CTRL_PAR_EN_REQ;
+
+	tiocp_ctrl_reg |= TIOCP_CTRL_PAGE_SIZE;
+
+	pcireg_control_set(pcibr_soft, tiocp_ctrl_reg);
+
+	/* Initialize internal mapping entries (ie. the ATEs) */
+	for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
+		pcireg_int_ate_set(pcibr_soft, entry, 0);
+	}
+
+	pcibr_soft->bs_int_ate_resource.start = 0;
+	pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
+	pcibr_soft->bs_allocated_ate_res =
+	    (void *) kmalloc(pcibr_soft->bs_int_ate_size *
+			     sizeof (unsigned long), GFP_KERNEL);
+	if (!pcibr_soft->bs_allocated_ate_res)
+		return -ENOMEM;
+	memset(pcibr_soft->bs_allocated_ate_res, 0x0,
+	       pcibr_soft->bs_int_ate_size * sizeof (unsigned long));
+
+	/* Setup the TIOCPs error interrupt handler. */
+	xtalk_intr = xtalk_intr_alloc(conn_vhdl, (device_desc_t) 0, pcibr_vhdl);
+	ASSERT(xtalk_intr != NULL);
+
+	irq = ((hub_intr_t) xtalk_intr)->i_bit;
+	cpu = ((hub_intr_t) xtalk_intr)->i_cpuid;
+	intr_unreserve_level(cpu, irq);
+	((hub_intr_t) xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
+	xtalk_intr->xi_vector = SGI_PCIBR_ERROR;
+
+	pcibr_soft->bsi_err_intr = xtalk_intr;
+
+	xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
+			   (intr_arg_t) pcibr_soft,
+			   (xtalk_intr_setfunc_t) pcibr_setwidint,
+			   (void *) bridge);
+
+	request_irq(SGI_PCIBR_ERROR, (void *) pcibr_error_intr_handler,
+		    SA_SHIRQ, "PCIBR error", (intr_arg_t) pcibr_soft);
+
+	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
+			    "pcibr_setwidint: target_id=0x%lx, addr=0x%lx\n",
+			    pcireg_intr_dst_target_id_get(pcibr_soft),
+			    pcireg_intr_dst_addr_get(pcibr_soft)));
+
+	/* now we can start handling error interrupts */
+	int_enable = pcireg_intr_enable_get(pcibr_soft);
+	int_enable |= PCIBR_ISR_ERRORS;
+
+	pcireg_intr_enable_set(pcibr_soft, int_enable);
+	pcireg_intr_mode_set(pcibr_soft, 0);	/* no 'clear interrupt' pkts */
+	pcireg_tflush_get(pcibr_soft);	/* wait until Bridge PIO complete */
+
+	/* Allocate resource maps based on bus page size; for I/O and memory
+	 * space, free all pages except those in the base area and in the
+	 * range set by the PROM.
+	 *
+	 * PROM creates BAR addresses in this format: 0x0ws00000 where w is 
+	 * the widget/corelet number and s is the device register offset for 
+	 * the slot.
+	 */
+
+	/* Setup the Bus's PCI IO Root Resource. */
+	pcibr_soft->bs_io_win_root_resource.start = 0;
+	pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
+	res = (struct resource *) kmalloc(sizeof (struct resource), GFP_KERNEL);
+	if (!res) {
+		panic("PCIBR:Unable to allocate resource structure\n");
+	}
+
+	/* Block off the range used by PROM. */
+	prom_base_addr = pcibr_soft->bs_xid << 24;
+	res->start = prom_base_addr;
+	res->end = prom_base_addr + (prom_base_size - 1);
+	status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
+	if (status) {
+		panic("PCIBR:Unable to request_resource()\n");
+	}
+
+	/* Setup the Small Window Root Resource */
+	pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
+	pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
+
+	/* Setup the Bus's PCI Memory Root Resource */
+	pcibr_soft->bs_mem_win_root_resource.start = 0;
+	pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
+	res = (struct resource *) kmalloc(sizeof (struct resource), GFP_KERNEL);
+	if (!res) {
+		panic("PCIBR:Unable to allocate resource structure\n");
+	}
+
+	/* Block off the range used by PROM. */
+	res->start = prom_base_addr;
+	res->end = prom_base_addr + (prom_base_size - 1);;
+	status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
+	if (status) {
+		panic("PCIBR:Unable to request_resource()\n");
+	}
+
+	/* build "no-slot" connection point */
+	pcibr_info =
+	    pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
+				  PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
+	noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
+
+	/* Store no slot connection point info */
+	pcibr_soft->bs_noslot_conn = noslot_conn;
+	pcibr_soft->bs_noslot_info = pcibr_info;
+
+#if 0
+	/* If the bridge has been reset then there is no need to reset
+	 * the individual PCI slots.
+	 */
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Reset all the slots */
+		(void) pcibr_slot_reset(pcibr_vhdl, slot);
+	}
+#endif
+
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Find out what is out there */
+		(void) pcibr_slot_info_init(pcibr_vhdl, slot);
+	}
+
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Set up the address space for this slot in the PCI land */
+		(void) pcibr_slot_addr_space_init(pcibr_vhdl, slot);
+	}
+
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Setup the device register */
+		(void) pcibr_slot_device_init(pcibr_vhdl, slot);
+	}
+
+	if (IS_PCIX(pcibr_soft)) {
+		pcibr_soft->bs_pcix_rbar_inuse = 0;
+		pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
+		pcibr_soft->bs_pcix_rbar_percent_allowed =
+		    pcibr_pcix_rbars_calc(pcibr_soft);
+
+		for (slot = pcibr_soft->bs_min_slot;
+		     slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
+			/* Setup the PCI-X Read Buffer Attribute Registers */
+			(void) pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
+		}
+	}
+
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Setup host/guest relations */
+		(void) pcibr_slot_guest_info_init(pcibr_vhdl, slot);
+	}
+
+	/* Handle initial RRB management */
+	pcibr_initial_rrb(pcibr_vhdl, pcibr_soft->bs_first_slot,
+			  pcibr_soft->bs_last_slot);
+
+	/* Before any drivers get called that may want to re-allocate RRB's,
+	 * let's get some special cases pre-allocated. Drivers may override
+	 * these pre-allocations, but by doing pre-allocations now we're
+	 * assured not to step all over what the driver intended.
+	 */
+	if (pcibr_soft->bs_bricktype > 0) {
+		switch (pcibr_soft->bs_bricktype) {
+		case MODULE_IABRICK:
+		case MODULE_PABRICK:
+		case MODULE_BUBRICK:
+			/*
+			 * If IO9 or IO10 is in bus 1, allocate RRBs to all 
+			 * the IO9 or IO10 devices
+			 */
+			if ((pcibr_widget_to_bus(pcibr_vhdl) == 1)
+			    && isIO9(pcibr_soft->bs_nasid)) {
+				pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
+				pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
+				pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4);
+				pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4);
+			} else {
+				pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
+				pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
+			}
+			break;
+		}		/* switch */
+	}
+
+	for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft);
+	     ++slot) {
+		/* Call the device attach */
+		(void) pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
+	}
+
+	pciio_device_attach(noslot_conn, 0);
+
+	return 0;
+}
+
+/*
+ * pci provider functions
+ *
+ * mostly in pcibr.c but if any are needed here then
+ * this might be a way to get them here.
+ */
+pciio_provider_t pci_tiocp_provider = {
+	(pciio_piomap_alloc_f *) pcibr_piomap_alloc,
+	(pciio_piomap_free_f *) pcibr_piomap_free,
+	(pciio_piomap_addr_f *) pcibr_piomap_addr,
+	(pciio_piomap_done_f *) pcibr_piomap_done,
+	(pciio_piotrans_addr_f *) pcibr_piotrans_addr,
+	(pciio_piospace_alloc_f *) pcibr_piospace_alloc,
+	(pciio_piospace_free_f *) pcibr_piospace_free,
+
+	(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
+	(pciio_dmamap_free_f *) pcibr_dmamap_free,
+	(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
+	(pciio_dmamap_done_f *) pcibr_dmamap_done,
+	(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
+	(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
+	(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
+	(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
+
+	(pciio_intr_alloc_f *) pcibr_intr_alloc,
+	(pciio_intr_free_f *) pcibr_intr_free,
+	(pciio_intr_connect_f *) pcibr_intr_connect,
+	(pciio_intr_disconnect_f *) pcibr_intr_disconnect,
+	(pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
+
+	(pciio_provider_startup_f *) pcibr_provider_startup,
+	(pciio_provider_shutdown_f *) pcibr_provider_shutdown,
+	(pciio_reset_f *) pcibr_reset,
+	(pciio_endian_set_f *) pcibr_endian_set,
+	(pciio_config_get_f *) pcibr_config_get,
+	(pciio_config_set_f *) pcibr_config_set,
+
+	(pciio_error_devenable_f *) pcibr_error_devenable,
+	(pciio_error_extract_f *) pcibr_error_extract,
+
+	(pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
+	(pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
+	(pciio_device_unregister_f *) pcibr_device_unregister,
+	(pciio_businfo_get_f *) pcibr_businfo_get,
+};
+
+#ifdef CONFIG_KDB
+
+xb_reg_desc_t tiocp_offsets[] = {
+    { "TIO_CP_IDENTIFICATION",
+	0x0, RO, "Identification" },
+    { "TIO_CP_STATUS",
+	0x08, RO, "Status" },
+    { "TIO_CP_BUS_SIDE_ERROR_ADDR_UPPER",
+	0x010, RO, "Upper Address Holding Register Bus Side Errors" },
+    { "TIO_CP_BUS_SIDE_ERROR_ADDR_LOWER",
+	0x018, RO, "Lower Address Holding Register Bus Side Errors" },
+    { "TIO_CP_CONTROL",
+	0x020, RW, "Control" },
+    { "TIO_CP_REQUEST_TIMEOUT",
+	0x028, RW, "PCI Request Time-out Value Register" },
+    { "TIO_CP_INTERRUPT_DEST_UPPER",
+	0x030, RW, "Interrupt Destination Upper Address Register" },
+    { "TIO_CP_INTERRUPT_DEST_LOWER",
+	0x038, RW, "Interrupt Destination Lower Address Register" },
+    { "TIO_CP_BUS_SIDE_ERROR_CMD_WRD",
+	0x040, RO, "Command Word Holding Register Bus Side Errors" },
+    { "TIO_CP_TARGET_FLUSH",
+	0x050, RW, "PCI/PCI-X Target Flush Register" },
+    { "TIO_CP_LINK_SIDE_ERROR_CMD_WRD",
+	0x058, RO, "Command Word Holding Register Link Side Errors" },
+    { "TIO_CP_RSP_BUFFER_UPPER",
+	0x060, RO, "PCI Response Buffer Error Upper Address Holding Reg" },
+    { "TIO_CP_RSP_BUFFER_LOWER",
+	0x068, RO, "PCI Response Buffer Error Lower Address Holding Reg" },
+    { "TIO_CP_DEBUG_PORT_CONTROL",
+	0x070, RW, "Debug Port Control Register" },
+    { "TIO_CP_LINK_SIDE_ERROR_ADDR_HOLD",
+	0x078, RO, "Address Holding Register Link Side Errors" },
+    { "TIO_CP_PCI_DIRECT_MAPPING",
+	0x080, RW, "PCI Direct Mapping Register" },
+    { "TIO_CP_PAGE_MAP_FAULT_ADD",
+	0x090, RO, "PCI Page Map Fault Address Register" },
+    { "TIO_CP_ARBITRATION_PRIORITY",
+	0x0a0, RW, "Arbitration Priority Register" },
+    { "TIO_CP_INTERNAL_RAM_PARITY_ERROR",
+	0x0b0, RO, "Internal Ram Parity Error Register" },
+    { "TIO_CP_TIMEOUT",
+	0x0c0, RW, "Time-out Register" },
+    { "TIO_CP_TYPE1_CONFIGURATION",
+	0x0c8, RW, "PCI/PCI-X Type 1 Configuration Register" },
+    { "TIO_CP_BUS_ERROR_ADDR_UPPER",
+	0x0d0, RO, "PCI Bus Error Upper Address Holding Register" },
+    { "TIO_CP_BUS_ERROR_ADDR_LOWER",
+	0x0d8, RO, "PCI Bus Error Lower Address Holding Register" },
+    { "TIO_CP_INT_STATUS",
+	0x0100, RO, "INT_STATUS Register" },
+    { "TIO_CP_INTERRUPT_ENABLE",
+	0x0108, RW, "Interrupt Enable Register" },
+    { "TIO_CP_RESET_INTERRUPT",
+	0x0110, WO, "Reset Interrupt Register" },
+    { "TIO_CP_INTERRUPT_MODE",
+	0x0118, RW, "Interrupt Mode Register" },
+    { "TIO_CP_INTERRUPT_DEVICE",
+	0x0120, RW, "Interrupt Device Select Register" },
+    { "TIO_CP_HOST_ERROR_INTERRUPT",
+	0x0128, RW, "Host Error Interrupt Field Register" },
+    { "TIO_CP_INTERRUPT_0_HOST_ADDRESS",
+	0x0130, RW, "Interrupt (0) Host Address Register" },
+    { "TIO_CP_INTERRUPT_1_HOST_ADDRESS",
+	0x0138, RW, "Interrupt (1) Host Address Register" },
+    { "TIO_CP_INTERRUPT_2_HOST_ADDRESS",
+	0x0140, RW, "Interrupt (2) Host Address Register" },
+    { "TIO_CP_INTERRUPT_3_HOST_ADDRESS",
+	0x0148, RW, "Interrupt (3) Host Address Register" },
+    { "TIO_CP_INTERRUPT_4_HOST_ADDRESS",
+	0x0150, RW, "Interrupt (4) Host Address Register" },
+    { "TIO_CP_INTERRUPT_5_HOST_ADDRESS",
+	0x0158, RW, "Interrupt (5) Host Address Register" },
+    { "TIO_CP_INTERRUPT_6_HOST_ADDRESS",
+	0x0160, RW, "Interrupt (6) Host Address Register" },
+    { "TIO_CP_INTERRUPT_7_HOST_ADDRESS",
+	0x0168, RW, "Interrupt (7) Host Address Register" },
+    { "TIO_CP_ERROR_INTERRUPT_VIEW",
+	0x0170, RO, "Error Interrupt View Register" },
+    { "TIO_CP_MULTIPLE_INTERRUPT",
+	0x0178, RO, "Multiple Interrupt Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_0",
+	0x0180, WO, "Force Always Interrupt (0) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_1",
+	0x0188, WO, "Force Always Interrupt (1) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_2",
+	0x0190, WO, "Force Always Interrupt (2) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_3",
+	0x0198, WO, "Force Always Interrupt (3) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_4",
+	0x01a0, WO, "Force Always Interrupt (4) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_5",
+	0x01a8, WO, "Force Always Interrupt (5) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_6",
+	0x01b0, WO, "Force Always Interrupt (6) Register" },
+    { "TIO_CP_FORCE_ALWAYS_INTERRUPT_7",
+	0x01b8, WO, "Force Always Interrupt (7) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_0",
+	0x01c0, WO, "Force Interrupt (0) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_1",
+	0x01c8, WO, "Force Interrupt (1) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_2",
+	0x01d0, WO, "Force Interrupt (2) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_3",
+	0x01d8, WO, "Force Interrupt (3) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_4",
+	0x01e0, WO, "Force Interrupt (4) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_5",
+	0x01e8, WO, "Force Interrupt (5) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_6",
+	0x01f0, WO, "Force Interrupt (6) Register" },
+    { "TIO_CP_FORCE_INTERRUPT_7",
+	0x01f8, WO, "Force Interrupt (7) Register" },
+    { "TIO_CP_DEVICE_0",
+	0x0200, RW, "Device (0)" },
+    { "TIO_CP_DEVICE_1",
+	0x0208, RW, "Device (1)" },
+    { "TIO_CP_DEVICE_2",
+	0x0210, RW, "Device (2)" },
+    { "TIO_CP_DEVICE_3",
+	0x0218, RW, "Device (3)" },
+    { "TIO_CP_DEVICE_0_WRITE_FLUSH",
+	0x0240, RO, "Device (0) Write Request Buffer Flush" },
+    { "TIO_CP_DEVICE_1_WRITE_FLUSH",
+	0x0248, RO, "Device (1) Write Request Buffer Flush" },
+    { "TIO_CP_DEVICE_2_WRITE_FLUSH",
+	0x0250, RO, "Device (2) Write Request Buffer Flush" },
+    { "TIO_CP_DEVICE_3_WRITE_FLUSH",
+	0x0258, RO, "Device (3) Write Request Buffer Flush" },
+    { "TIO_CP_EVEN_READ_RSP_BUFFER",
+	0x0280, RW, "Even Device Read Response Buffer Register (PCI Only)" },
+    { "TIO_CP_ODD_READ_RSP_BUFFER",
+	0x0288, RW, "Odd Device Read Response Buffer Register (PCI Only)" },
+    { "TIO_CP_READ_RSP_BUFFER_STATUS",
+	0x0290, RO, "Read Response Buffer Status Register (PCI Only)" },
+    { "TIO_CP_READ_RSP_BUFFER_CLEAR",
+	0x0298, WO, "Read Response Buffer Clear Register (PCI Only)" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_0",
+	0x0300, RO, "PCI Read Response Buffer (0) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_0",
+	0x0308, RO, "PCI Read Response Buffer (0) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_1",
+	0x0310, RO, "PCI Read Response Buffer (1) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_1",
+	0x0318, RO, "PCI Read Response Buffer (1) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_2",
+	0x0320, RO, "PCI Read Response Buffer (2) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_2",
+	0x0328, RO, "PCI Read Response Buffer (2) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_3",
+	0x0330, RO, "PCI Read Response Buffer (3) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_3",
+	0x0338, RO, "PCI Read Response Buffer (3) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_4",
+	0x0340, RO, "PCI Read Response Buffer (4) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_4",
+	0x0348, RO, "PCI Read Response Buffer (4) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_5",
+	0x0350, RO, "PCI Read Response Buffer (5) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_5",
+	0x0358, RO, "PCI Read Response Buffer (5) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_6",
+	0x0360, RO, "PCI Read Response Buffer (6) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_6",
+	0x0368, RO, "PCI Read Response Buffer (6) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_7",
+	0x0370, RO, "PCI Read Response Buffer (7) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_7",
+	0x0378, RO, "PCI Read Response Buffer (7) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_8",
+	0x0380, RO, "PCI Read Response Buffer (8) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_8",
+	0x0388, RO, "PCI Read Response Buffer (8) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_9",
+	0x0390, RO, "PCI Read Response Buffer (9) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_9",
+	0x0398, RO, "PCI Read Response Buffer (9) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_10",
+	0x03a0, RO, "PCI Read Response Buffer (10) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_10",
+	0x03a8, RO, "PCI Read Response Buffer (10) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_11",
+	0x03b0, RO, "PCI Read Response Buffer (11) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_11",
+	0x03b8, RO, "PCI Read Response Buffer (11) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_12",
+	0x03c0, RO, "PCI Read Response Buffer (12) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_12",
+	0x03c8, RO, "PCI Read Response Buffer (12) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_13",
+	0x03d0, RO, "PCI Read Response Buffer (13) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_13",
+	0x03d8, RO, "PCI Read Response Buffer (13) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_14",
+	0x03e0, RO, "PCI Read Response Buffer (14) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_14",
+	0x03e8, RO, "PCI Read Response Buffer (14) Lower Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_UPPER_ADDRESS_15",
+	0x03f0, RO, "PCI Read Response Buffer (15) Upper Address Match Register" },
+    { "TIO_CP_PCI_READ_RSP_BUFFER_LOWER_ADDRESS_15",
+	0x03f8, RO, "PCI Read Response Buffer (15) Lower Address Match Register" },
+    { "TIO_CP_PCI_BUFFER_0_FLUSH_COUNT_TOUCH",
+	0x0400, RW, "PCI Buffer (0) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_0_FLUSH_COUNT_NOTOUCH",
+	0x0408, RW, "PCI Buffer (0) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_0_REQ_COUNT",
+	0x0410, RW, "PCI Buffer (0) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_0_PREFETCH_COUNT",
+	0x0418, RW, "PCI Buffer (0) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_0_RETRY_TOTAL_COUNT",
+	0x0420, RW, "PCI Buffer (0) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_0_RETRY_MAX_COUNT",
+	0x0428, RW, "PCI Buffer (0) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_0_MAX_LATENCY",
+	0x0430, RW, "PCI Buffer (0) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_0_CLEAR",
+	0x0438, RW, "PCI Buffer (0) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_1_FLUSH_COUNT_TOUCH",
+	0x0440, RW, "PCI Buffer (1) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_1_FLUSH_COUNT_NOTOUCH",
+	0x0448, RW, "PCI Buffer (1) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_1_REQ_COUNT",
+	0x0450, RW, "PCI Buffer (1) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_1_PREFETCH_COUNT",
+	0x0458, RW, "PCI Buffer (1) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_1_RETRY_TOTAL_COUNT",
+	0x0460, RW, "PCI Buffer (1) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_1_RETRY_MAX_COUNT",
+	0x0468, RW, "PCI Buffer (1) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_1_MAX_LATENCY",
+	0x0470, RW, "PCI Buffer (1) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_1_CLEAR",
+	0x0478, RW, "PCI Buffer (1) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_2_FLUSH_COUNT_TOUCH",
+	0x0480, RW, "PCI Buffer (2) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_2_FLUSH_COUNT_NOTOUCH",
+	0x0488, RW, "PCI Buffer (2) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_2_REQ_COUNT",
+	0x0490, RW, "PCI Buffer (2) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_2_PREFETCH_COUNT",
+	0x0498, RW, "PCI Buffer (2) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_2_RETRY_TOTAL_COUNT",
+	0x04a0, RW, "PCI Buffer (2) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_2_RETRY_MAX_COUNT",
+	0x04a8, RW, "PCI Buffer (2) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_2_MAX_LATENCY",
+	0x04b0, RW, "PCI Buffer (2) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_2_CLEAR",
+	0x04b8, RW, "PCI Buffer (2) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_3_FLUSH_COUNT_TOUCH",
+	0x04c0, RW, "PCI Buffer (3) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_3_FLUSH_COUNT_NOTOUCH",
+	0x04c8, RW, "PCI Buffer (3) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_3_REQ_COUNT",
+	0x04d0, RW, "PCI Buffer (3) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_3_PREFETCH_COUNT",
+	0x04d8, RW, "PCI Buffer (3) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_3_RETRY_TOTAL_COUNT",
+	0x04e0, RW, "PCI Buffer (3) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_3_RETRY_MAX_COUNT",
+	0x04e8, RW, "PCI Buffer (3) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_3_MAX_LATENCY",
+	0x04f0, RW, "PCI Buffer (3) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_3_CLEAR",
+	0x04f8, RW, "PCI Buffer (3) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_4_FLUSH_COUNT_TOUCH",
+	0x0500, RW, "PCI Buffer (4) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_4_FLUSH_COUNT_NOTOUCH",
+	0x0508, RW, "PCI Buffer (4) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_4_REQ_COUNT",
+	0x0510, RW, "PCI Buffer (4) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_4_PREFETCH_COUNT",
+	0x0518, RW, "PCI Buffer (4) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_4_RETRY_TOTAL_COUNT",
+	0x0520, RW, "PCI Buffer (4) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_4_RETRY_MAX_COUNT",
+	0x0528, RW, "PCI Buffer (4) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_4_MAX_LATENCY",
+	0x0530, RW, "PCI Buffer (4) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_4_CLEAR",
+	0x0538, RW, "PCI Buffer (4) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_5_FLUSH_COUNT_TOUCH",
+	0x0540, RW, "PCI Buffer (5) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_5_FLUSH_COUNT_NOTOUCH",
+	0x0548, RW, "PCI Buffer (5) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_5_REQ_COUNT",
+	0x0550, RW, "PCI Buffer (5) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_5_PREFETCH_COUNT",
+	0x0558, RW, "PCI Buffer (5) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_5_RETRY_TOTAL_COUNT",
+	0x0560, RW, "PCI Buffer (5) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_5_RETRY_MAX_COUNT",
+	0x0568, RW, "PCI Buffer (5) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_5_MAX_LATENCY",
+	0x0570, RW, "PCI Buffer (5) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_5_CLEAR",
+	0x0578, RW, "PCI Buffer (5) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_6_FLUSH_COUNT_TOUCH",
+	0x0580, RW, "PCI Buffer (6) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_6_FLUSH_COUNT_NOTOUCH",
+	0x0588, RW, "PCI Buffer (6) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_6_REQ_COUNT",
+	0x0590, RW, "PCI Buffer (6) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_6_PREFETCH_COUNT",
+	0x0598, RW, "PCI Buffer (6) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_6_RETRY_TOTAL_COUNT",
+	0x05a0, RW, "PCI Buffer (6) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_6_RETRY_MAX_COUNT",
+	0x05a8, RW, "PCI Buffer (6) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_6_MAX_LATENCY",
+	0x05b0, RW, "PCI Buffer (6) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_6_CLEAR",
+	0x05b8, RW, "PCI Buffer (6) Clear All Register" },
+    { "TIO_CP_PCI_BUFFER_7_FLUSH_COUNT_TOUCH",
+	0x05c0, RW, "PCI Buffer (7) Flush Count with Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_7_FLUSH_COUNT_NOTOUCH",
+	0x05c8, RW, "PCI Buffer (7) Flush Count w/o Data Touch Register" },
+    { "TIO_CP_PCI_BUFFER_7_REQ_COUNT",
+	0x05d0, RW, "PCI Buffer (7) Request in Flight Count Register" },
+    { "TIO_CP_PCI_BUFFER_7_PREFETCH_COUNT",
+	0x05d8, RW, "PCI Buffer (7) Prefetch Request Count Register" },
+    { "TIO_CP_PCI_BUFFER_7_RETRY_TOTAL_COUNT",
+	0x05e0, RW, "PCI Buffer (7) Total PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_7_RETRY_MAX_COUNT",
+	0x05e8, RW, "PCI Buffer (7) Max PCI Retry Count Register" },
+    { "TIO_CP_PCI_BUFFER_7_MAX_LATENCY",
+	0x05f0, RW, "PCI Buffer (7) Max Latency Count Register" },
+    { "TIO_CP_PCI_BUFFER_7_CLEAR",
+	0x05f8, RW, "PCI Buffer (7) Clear All Register" },
+    { "TIO_CP_PCI_BUS_ERROR_ADDRESS",
+	0x0600, RO, "PCI-X Bus Error Address Register" },
+    { "TIO_CP_PCI_BUS_ERROR_ATTRIBUTE",
+	0x0608, RO, "PCI-X Bus Error Attribute Register" },
+    { "TIO_CP_PCI_BUS_ERROR_DATA",
+	0x0610, RO, "PCI-X Bus Error Data Register" },
+    { "TIO_CP_SPLIT_REQUEST_ADDRESS",
+	0x0618, RO, "PCI-X PIO Split Request Address Register" },
+    { "TIO_CP_SPLIT_REQUEST_ATTRIBUTE",
+	0x0620, RO, "PCI-X PIO Split Request Attribute Register" },
+    { "TIO_CP_DMA_ERROR_ATTRIBUTE",
+	0x0628, RO, "PCI-X DMA Request Error Attribute Register" },
+    { "TIO_CP_DMA_ERROR_ADDRESS",
+	0x0630, RO, "PCI-X DMA Request Error Address Register" },
+    { "TIO_CP_PCIX_TIMEOUT",
+	0x0638, RW, "PCI-X Timeout Register" },
+    { "TIO_CP_CT_DEBUG_CTL",
+	0x0700, RW, "Coretalk Debug Mux Control" },
+    { "TIO_CP_BR_DEBUG_CTL",
+	0x0708, RW, "Bridge Debug Mux Control" },
+    { "TIO_CP_MUX3_DEBUG_CTL",
+	0x0710, RW, "Debug Mux 3 Control" },
+    { "TIO_CP_MUX4_DEBUG_CTL",
+	0x0718, RW, "Debug Mux 4 Control" },
+    { "TIO_CP_MUX5_DEBUG_CTL",
+	0x0720, RW, "Debug Mux 5 Control" },
+    { "TIO_CP_MUX6_DEBUG_CTL",
+	0x0728, RW, "Debug Mux 6 Control" },
+    { "TIO_CP_MUX7_DEBUG_CTL",
+	0x0730, RW, "Debug Mux 7 Control" },
+};
+int tiocp_offsets_size = sizeof(tiocp_offsets)/sizeof(xb_reg_desc_t);
+#endif // CONFIG_KDB
diff -Nru a/arch/ia64/sn/io/sn2/xbow.c b/arch/ia64/sn/io/sn2/xbow.c
--- a/arch/ia64/sn/io/sn2/xbow.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/xbow.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,39 +6,19 @@
  * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/sched.h>
 #include <linux/interrupt.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/intr.h>
 #include <asm/sn/sn2/sn_private.h>
-#include <asm/sn/sn2/shubio.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/xtalk/xtalk_private.h>
 #include <asm/sn/simulator.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
 
 /* #define DEBUG		1 */
 /* #define XBOW_DEBUG	1 */
-/* #define DEBUG_ERROR	1 */
-
-
-/*
- * Files needed to get the device driver entry points
- */
-
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
 
-#include <asm/sn/prio.h>
-#include <asm/sn/hcl_util.h>
+#define kdebug 0
 
 
 #define NEW(ptr)	(ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
@@ -47,7 +26,7 @@
 
 /*
  * This file supports the Xbow chip.  Main functions: initializtion,
- * error handling, and GBR.
+ * error handling.
  */
 
 /*
@@ -55,7 +34,6 @@
  * has a "fastinfo" pointer pointing at one
  * of these things.
  */
-typedef struct xbow_soft_s *xbow_soft_t;
 
 struct xbow_soft_s {
     vertex_hdl_t            conn;	/* our connection point */
@@ -64,30 +42,24 @@
     xbow_t                 *base;	/* PIO pointer to crossbow chip */
     char                   *name;	/* hwgraph name */
 
-    xbow_perf_t             xbow_perfcnt[XBOW_PERF_COUNTERS];
-    xbow_perf_link_t        xbow_perflink[MAX_XBOW_PORTS];
     xbow_link_status_t      xbow_link_status[MAX_XBOW_PORTS];
-    spinlock_t              xbow_perf_lock;
-    int                     link_monitor;
     widget_cfg_t	   *wpio[MAX_XBOW_PORTS];	/* cached PIO pointer */
 
     /* Bandwidth allocation state. Bandwidth values are for the
      * destination port since contention happens there.
      * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
      */
-    spinlock_t		    xbow_bw_alloc_lock;		/* bw allocation lock */
     unsigned long long	    bw_hiwm[MAX_XBOW_PORTS];	/* hiwater mark values */
     unsigned long long      bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
 };
 
 #define xbow_soft_set(v,i)	hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
-#define xbow_soft_get(v)	((xbow_soft_t)hwgraph_fastinfo_get((v)))
+#define xbow_soft_get(v)	((struct xbow_soft_s *)hwgraph_fastinfo_get((v)))
 
 /*
  * Function Table of Contents
  */
 
-void                    xbow_mlreset(xbow_t *);
 int                     xbow_attach(vertex_hdl_t);
 
 int                     xbow_widget_present(xbow_t *, int);
@@ -95,19 +67,6 @@
 vertex_hdl_t            xbow_widget_lookup(vertex_hdl_t, int);
 
 void                    xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-
-
-
-void                    xbow_update_perf_counters(vertex_hdl_t);
-xbow_perf_link_t       *xbow_get_perf_counters(vertex_hdl_t);
-int                     xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
-xbow_link_status_t     *xbow_get_llp_status(vertex_hdl_t);
-void                    xbow_update_llp_status(vertex_hdl_t);
-
-int                     xbow_disable_llp_monitor(vertex_hdl_t);
-int                     xbow_enable_llp_monitor(vertex_hdl_t);
-int                     xbow_prio_bw_alloc(vertex_hdl_t, xwidgetnum_t, xwidgetnum_t,
-                                unsigned long long, unsigned long long);
 static void		xbow_setwidint(xtalk_intr_t);
 
 xswitch_reset_link_f    xbow_reset_link;
@@ -122,12 +81,12 @@
 xbow_mmap(struct file * file, struct vm_area_struct * vma)
 {
         unsigned long           phys_addr;
-        int                     error = 0;
+        int                     error;
 
         phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-        vma->vm_flags |= VM_RESERVED | VM_IO;
-        error = io_remap_page_range(vma, phys_addr, vma->vm_start,
+        vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
+        error = io_remap_page_range(vma, vma->vm_start, phys_addr,
                                    vma->vm_end-vma->vm_start,
                                    vma->vm_page_prot);
         return(error);
@@ -143,23 +102,6 @@
         .mmap		= xbow_mmap,
 };
 
-/*
- *    xbow_mlreset: called at mlreset time if the
- *      platform specific code determines that there is
- *      a crossbow in a critical path that must be
- *      functional before the driver would normally get
- *      the device properly set up.
- *
- *      what do we need to do, that the boot prom can
- *      not be counted on to have already done, that is
- *      generic across all platforms using crossbows?
- */
-/*ARGSUSED */
-void
-xbow_mlreset(xbow_t * xbow)
-{
-}
-
 #ifdef XBRIDGE_REGS_SIM
 /*    xbow_set_simulated_regs: sets xbow regs as needed
  *	for powering through the boot
@@ -201,8 +143,8 @@
     /*REFERENCED */
     vertex_hdl_t            vhdl;
     vertex_hdl_t            busv;
-    xbow_t                 *xbow;
-    xbow_soft_t             soft;
+    xbow_t                  *xbow;
+    struct xbow_soft_s      *soft;
     int                     port;
     xswitch_info_t          info;
     xtalk_intr_t            intr_hdl;
@@ -211,15 +153,14 @@
     int                     rev;
     int			    i;
     int			    xbow_num;
+#if DEBUG && ATTACH_DEBUG
+    char		    name[MAXDEVNAME];
+#endif
     static void xbow_errintr_handler(int, void *, struct pt_regs *);
 
 	
 #if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    printk("%v: xbow_attach\n", conn);
-#else
-    printk("0x%x: xbow_attach\n", conn);
-#endif
+    printk("%s: xbow_attach\n", vertex_to_name(conn, name, MAXDEVNAME));
 #endif
 
     /*
@@ -229,6 +170,8 @@
 #ifdef XBRIDGE_REGS_SIM
     printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
     xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
+    if (!xbow)
+	    return -ENOMEM;
     /*
      * turn on ports e and f like in a real live ibrick
      */
@@ -260,7 +203,6 @@
      * Register a xbow driver with hwgraph.
      * file ops.
      */
-    vhdl = NULL;
     vhdl = hwgraph_register(conn, EDGE_LBL_XBOW, 0,
 	   0, 0, 0,
 	   S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
@@ -274,7 +216,9 @@
      * Allocate the soft state structure and attach
      * it to the xbow's vertex
      */
-    NEW(soft);
+    soft = kmalloc(sizeof(*soft), GFP_KERNEL);
+    if (!soft)
+	    return -ENOMEM;
     soft->conn = conn;
     soft->vhdl = vhdl;
     soft->busv = busv;
@@ -298,6 +242,10 @@
     s = dev_to_name(vhdl, devnm, MAXDEVNAME);
     soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1, 
 			    GFP_KERNEL);
+    if (!soft->name) {
+	    kfree(soft);
+	    return -ENOMEM;
+    }
     sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
 
 #ifdef XBRIDGE_REGS_SIM
@@ -311,14 +259,6 @@
 #endif /* XBRIDGE_REGS_SIM */
     rev = XWIDGET_PART_REV_NUM(id);
 
-    mutex_spinlock_init(&soft->xbow_perf_lock);
-    soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
-    soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
-
-    /* Initialization for GBR bw allocation */
-    mutex_spinlock_init(&soft->xbow_bw_alloc_lock);
-
-#define	XBOW_8_BIT_PORT_BW_MAX		(400 * 1000 * 1000)	/* 400 MB/s */
 #define XBOW_16_BIT_PORT_BW_MAX		(800 * 1000 * 1000)	/* 800 MB/s */
 
     /* Set bandwidth hiwatermark and current values */
@@ -419,43 +359,6 @@
     return 0;				/* attach successful */
 }
 
-/* This contains special-case code for grio. There are plans to make
- * this general sometime in the future, but till then this should
- * be good enough.
- */
-xwidgetnum_t
-xbow_widget_num_get(vertex_hdl_t dev)
-{
-	vertex_hdl_t	tdev;
-	char		devname[MAXDEVNAME];
-	xwidget_info_t	xwidget_info;
-	int		i;
-
-	vertex_to_name(dev, devname, MAXDEVNAME);
-
-	/* If this is a pci controller vertex, traverse up using
-	 * the ".." links to get to the widget.
-	 */
-	if (strstr(devname, EDGE_LBL_PCI) &&
-			strstr(devname, EDGE_LBL_CONTROLLER)) {
-		tdev = dev;
-		for (i=0; i< 2; i++) {
-			if (hwgraph_edge_get(tdev,
-				HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
-					GRAPH_SUCCESS)
-				return XWIDGET_NONE;
-		}
-
-		if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
-			return (xwidget_info_id_get(xwidget_info));
-		} else {
-			return XWIDGET_NONE;
-		}
-	}
-
-	return XWIDGET_NONE;
-}
-
 /*
  * xbow_widget_present: See if a device is present
  * on the specified port of this crossbow.
@@ -477,6 +380,10 @@
 					IS_PIC_XBOW(xbow->xb_wid_id) && port==0xf) {
 			return 1;
 		}
+		else if ( IS_PIC_XBOW(xbow->xb_wid_id) && port==0xb ) {
+			/* for opus the present bit doesn't work on port 0xb */
+			return 1;
+		}
 		return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
 	}
 }
@@ -562,10 +469,8 @@
 				    XEM_ADD_NVAR("ioe." #n, p);		\
 				}
 
-int                     xbow_xmit_retry_errors;
-
 int
-xbow_xmit_retry_error(xbow_soft_t soft,
+xbow_xmit_retry_error(struct xbow_soft_s *soft,
 		      int port)
 {
     xswitch_info_t          info;
@@ -600,46 +505,18 @@
     part = XWIDGET_PART_NUM(id);
     mfgr = XWIDGET_MFG_NUM(id);
 
-    /* If this thing is not a Bridge,
-     * do not activate the WAR, and
-     * tell our caller we do not need
-     * to be called again.
-     */
-    if ((part != BRIDGE_WIDGET_PART_NUM) ||
-	(mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
-		/* FIXME: add Xbridge to the WAR.
-		 * Shouldn't hurt anything.  Later need to
-		 * check if we can remove this.
-                 */
-    		if ((part != XBRIDGE_WIDGET_PART_NUM) ||
-		    (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
-			return 0;
-    }
-
-    /* count how many times we
-     * have picked up after
-     * LLP Transmit problems.
-     */
-    xbow_xmit_retry_errors++;
-
-    /* rewrite the control register
-     * to fix things up.
-     */
-    wid->w_control = wid->w_control;
-    wid->w_control;
-
-    return 1;
+    return 0;
 }
 
 /*
  * xbow_errintr_handler will be called if the xbow
  * sends an interrupt request to report an error.
  */
-static void
+static irqreturn_t
 xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
 {
     ioerror_t               ioe[1];
-    xbow_soft_t             soft = (xbow_soft_t) arg;
+    struct xbow_soft_s     *soft = (struct xbow_soft_s *)arg;
     xbow_t                 *xbow = soft->base;
     xbowreg_t               wid_control;
     xbowreg_t               wid_stat;
@@ -805,8 +682,9 @@
 	XEM_ADD_VAR(wid_err_upper);
 	XEM_ADD_VAR(wid_err_lower);
 	XEM_ADD_VAR(wid_err_addr);
-	PRINT_PANIC("XIO Bus Error");
+	panic("XIO Bus Error");
     }
+    return IRQ_HANDLED;
 }
 
 /*
@@ -831,10 +709,10 @@
 		      ioerror_mode_t mode,
 		      ioerror_t *ioerror)
 {
-    int                     retval = IOERROR_WIDGETLEVEL;
+    int                    retval = IOERROR_WIDGETLEVEL;
 
-    xbow_soft_t             soft = (xbow_soft_t) einfo;
-    int                     port;
+    struct xbow_soft_s    *soft = (struct xbow_soft_s *) einfo;
+    int                   port;
     vertex_hdl_t          conn;
     vertex_hdl_t          busv;
 
@@ -958,8 +836,9 @@
     if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
 		IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xf))
 		;
-    else
-    if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
+    else if (IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xb))
+		;	/* WAR for opus this is missing on 0xb */
+    else if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
 	/* nobody connected. */
 	if (mode == MODE_DEVPROBE)
 	    return IOERROR_HANDLED;
@@ -1087,173 +966,6 @@
     return retval;
 }
 
-void
-xbow_update_perf_counters(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    xbow_perf_t            *xbow_perf = xbow_soft->xbow_perfcnt;
-    xbow_perf_link_t       *xbow_plink = xbow_soft->xbow_perflink;
-    xbow_perfcount_t        perf_reg;
-    unsigned long           s;
-    int                     link, i;
-
-    for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
-	if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
-	    continue;
-
-	s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
-	perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
-
-	link = perf_reg.xb_perf.link_select;
-
-	(xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
-	    ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
-	xbow_perf->xp_current = perf_reg.xb_perf.count;
-
-	mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-    }
-}
-
-xbow_perf_link_t       *
-xbow_get_perf_counters(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    xbow_perf_link_t       *xbow_perf_link = xbow_soft->xbow_perflink;
-
-    return xbow_perf_link;
-}
-
-int
-xbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    xbow_perf_t            *xbow_perf = xbow_soft->xbow_perfcnt;
-    xbow_linkctrl_t         xbow_link_ctrl;
-    xbow_t                 *xbow = xbow_soft->base;
-    xbow_perfcount_t        perf_reg;
-    unsigned long           s;
-    int                     i;
-
-    link -= BASE_XBOW_PORT;
-    if ((link < 0) || (link >= MAX_XBOW_PORTS))
-	return -1;
-
-    if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
-	return -1;
-
-    if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
-	return -1;
-
-    s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
-    if ((xbow_perf + counter)->xp_mode && mode) {
-	mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-	return -1;
-    }
-    for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
-	if (i == counter)
-	    continue;
-	if (((xbow_perf + i)->xp_link == link) &&
-	    ((xbow_perf + i)->xp_mode)) {
-	    mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-	    return -1;
-	}
-    }
-    xbow_perf += counter;
-
-    xbow_perf->xp_curlink = xbow_perf->xp_link = link;
-    xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
-
-    xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
-    xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
-    xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
-
-    perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
-    perf_reg.xb_perf.link_select = link;
-    *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
-    xbow_perf->xp_current = perf_reg.xb_perf.count;
-
-    mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-    return 0;
-}
-
-xbow_link_status_t     *
-xbow_get_llp_status(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    xbow_link_status_t     *xbow_llp_status = xbow_soft->xbow_link_status;
-
-    return xbow_llp_status;
-}
-
-void
-xbow_update_llp_status(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    xbow_link_status_t     *xbow_llp_status = xbow_soft->xbow_link_status;
-    xbow_t                 *xbow;
-    xbwX_stat_t             lnk_sts;
-    xbow_aux_link_status_t  aux_sts;
-    int                     link;
-    vertex_hdl_t	    xwidget_vhdl;
-    char		   *xwidget_name;	
-
-    xbow = (xbow_t *) xbow_soft->base;
-    for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
-	/* Get the widget name corresponding the current link.
-	 * Note : 0 <= link < MAX_XBOW_PORTS(8).
-	 * 	  BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
-	 */
-	xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
-	xwidget_name = xwidget_name_get(xwidget_vhdl);
-	aux_sts.aux_linkstatus
-	    = xbow->xb_link_raw[link].link_aux_status;
-	lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
-
-	if (lnk_sts.link_alive == 0)
-	    continue;
-
-	xbow_llp_status->rx_err_count +=
-	    aux_sts.xb_aux_linkstatus.rx_err_cnt;
-
-	xbow_llp_status->tx_retry_count +=
-	    aux_sts.xb_aux_linkstatus.tx_retry_cnt;
-
-	if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
-#ifdef	LATER
-	    printk(KERN_WARNING  "link %d[%s]: bad status 0x%x\n",
-		    link, xwidget_name, lnk_sts.linkstatus);
-#endif
-	}
-    }
-}
-
-int
-xbow_disable_llp_monitor(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-    int                     port;
-
-    for (port = 0; port < MAX_XBOW_PORTS; port++) {
-	xbow_soft->xbow_link_status[port].rx_err_count = 0;
-	xbow_soft->xbow_link_status[port].tx_retry_count = 0;
-    }
-
-    xbow_soft->link_monitor = 0;
-    return 0;
-}
-
-int
-xbow_enable_llp_monitor(vertex_hdl_t vhdl)
-{
-    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
-
-    xbow_soft->link_monitor = 1;
-    return 0;
-}
-
-
 int
 xbow_reset_link(vertex_hdl_t xconn_vhdl)
 {
@@ -1262,15 +974,10 @@
     xbow_t                 *xbow;
     xbowreg_t               ctrl;
     xbwX_stat_t             stat;
-    unsigned                itick;
-    unsigned                dtick;
-    static int              ticks_per_ms = 0;
-
-    if (!ticks_per_ms) {
-	itick = get_timestamp();
-	us_delay(1000);
-	ticks_per_ms = get_timestamp() - itick;
-    }
+    unsigned                long itick;
+    unsigned int            dtick;
+    static long             ticks_to_wait = HZ / 1000;
+
     widget_info = xwidget_info_get(xconn_vhdl);
     port = xwidget_info_id_get(widget_info);
 
@@ -1279,7 +986,7 @@
 #else
     {
 	vertex_hdl_t            xbow_vhdl;
-	xbow_soft_t             xbow_soft;
+	struct xbow_soft_s      *xbow_soft;
 
 	hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
 	xbow_soft = xbow_soft_get(xbow_vhdl);
@@ -1296,155 +1003,17 @@
      */
     ctrl = xbow->xb_link(port).link_control;
     xbow->xb_link(port).link_reset = 0;
-    itick = get_timestamp();
+    itick = jiffies;
     while (1) {
 	stat.linkstatus = xbow->xb_link(port).link_status;
 	if (stat.link_alive)
 	    break;
-	dtick = get_timestamp() - itick;
-	if (dtick > ticks_per_ms) {
+	dtick = jiffies - itick;
+	if (dtick > ticks_to_wait) {
 	    return -1;			/* never came out of reset */
 	}
-	DELAY(2);			/* don't beat on link_status */
+	udelay(2);			/* don't beat on link_status */
     }
     xbow->xb_link(port).link_control = ctrl;
     return 0;
-}
-
-#define XBOW_ARB_RELOAD_TICKS		25
-					/* granularity: 4 MB/s, max: 124 MB/s */
-#define GRANULARITY			((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
-
-#define XBOW_BYTES_TO_GBR(BYTES_per_s)	(int) (BYTES_per_s / GRANULARITY)
-
-#define XBOW_GBR_TO_BYTES(cnt)		(bandwidth_t) ((cnt) * GRANULARITY)
-
-#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec)	\
-			((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
-
-#define XBOW_ARB_GBR_MAX		31
-
-#define ABS(x)				((x > 0) ? (x) : (-1 * x))
-					/* absolute value */
-
-int
-xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
-{
-    int                     gbr_granted;
-    int                     new_total_gbr;
-    int                     change_gbr;
-    bandwidth_t             new_total_bw;
-
-#ifdef GRIO_DEBUG
-    printk("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
-		old_bytes_per_sec, bytes_per_sec);
-#endif	/* GRIO_DEBUG */
-
-    gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
-			old_bytes_per_sec);
-    new_total_bw = old_bytes_per_sec + bytes_per_sec;
-    new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
-			new_total_bw);
-
-    change_gbr = new_total_gbr - gbr_granted;
-
-#ifdef GRIO_DEBUG
-    printk("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
-		gbr_granted, new_total_gbr, change_gbr);
-#endif	/* GRIO_DEBUG */
-
-    return (change_gbr);
-}
-
-/* Conversion from GBR to bytes */
-bandwidth_t
-xbow_gbr_to_bytes(int gbr)
-{
-    return (XBOW_GBR_TO_BYTES(gbr));
-}
-
-/* Given the vhdl for the desired xbow, the src and dest. widget ids
- * and the req_bw value, this xbow driver entry point accesses the
- * xbow registers and allocates the desired bandwidth if available.
- *
- * If bandwidth allocation is successful, return success else return failure.
- */
-int
-xbow_prio_bw_alloc(vertex_hdl_t vhdl,
-		xwidgetnum_t src_wid,
-		xwidgetnum_t dest_wid,
-		unsigned long long old_alloc_bw,
-		unsigned long long req_bw)
-{
-    xbow_soft_t             soft = xbow_soft_get(vhdl);
-    volatile xbowreg_t     *xreg;
-    xbowreg_t               mask;
-    unsigned long           s;
-    int                     error = 0;
-    bandwidth_t             old_bw_BYTES, req_bw_BYTES;
-    xbowreg_t               old_xreg;
-    int                     old_bw_GBR, req_bw_GBR, new_bw_GBR;
-
-#ifdef GRIO_DEBUG
-    printk("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
-		(int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
-#endif
-
-    ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
-    ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
-
-    s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
-
-    /* Get pointer to the correct register */
-    xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
-
-    /* Get mask for GBR count value */
-    mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
-
-    req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
-    req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
-		: xbow_gbr_to_bytes(req_bw_GBR);
-
-#ifdef GRIO_DEBUG
-    printk("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
-		req_bw, req_bw_BYTES, req_bw_GBR);
-#endif	/* GRIO_DEBUG */
-
-    old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
-    old_xreg = *xreg;
-    old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
-
-#ifdef GRIO_DEBUG
-    ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
-
-    printk("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
-
-    printk("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
-		req_bw_BYTES, old_bw_BYTES,
-		soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
-	   
-#endif				/* GRIO_DEBUG */
-
-    /* Accept the request only if we don't exceed the destination
-     * port HIWATER_MARK *AND* the max. link GBR arbitration count
-     */
-    if (((old_bw_BYTES + req_bw_BYTES) <=
-		soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
-		(req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
-
-	new_bw_GBR = (old_bw_GBR + req_bw_GBR);
-
-	/* Set this in the xbow link register */
-	*xreg = (old_xreg & ~mask) | \
-	    (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
-
-	soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
-			xbow_gbr_to_bytes(new_bw_GBR);
-    } else {
-	error = 1;
-    }
-
-    mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);
-
-    return (error);
 }
diff -Nru a/arch/ia64/sn/io/sn2/xtalk.c b/arch/ia64/sn/io/sn2/xtalk.c
--- a/arch/ia64/sn/io/sn2/xtalk.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/sn2/xtalk.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -13,7 +12,6 @@
 #include <asm/sn/driver.h>
 #include <asm/sn/io.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/hcl_util.h>
@@ -23,9 +21,9 @@
 #include <asm/sn/xtalk/xtalk_private.h>
 
 /*
- * Implement crosstalk provider operations.  The xtalk* layer provides a
- * platform-independent interface for crosstalk devices.  This layer
- * switches among the possible implementations of a crosstalk adapter.
+ * Implement io channel provider operations.  The xtalk* layer provides a
+ * platform-independent interface for io channel devices.  This layer
+ * switches among the possible implementations of a io channel adapter.
  *
  * On platforms with only one possible xtalk provider, macros can be
  * set up at the top that cause the table lookups and indirections to
@@ -37,11 +35,6 @@
 
 char                    widget_info_fingerprint[] = "widget_info";
 
-#define	DEV_FUNC(dev,func)	hub_##func
-#define	CAST_PIOMAP(x)		((hub_piomap_t)(x))
-#define	CAST_DMAMAP(x)		((hub_dmamap_t)(x))
-#define	CAST_INTR(x)		((hub_intr_t)(x))
-
 /* =====================================================================
  *            Function Table of Contents
  */
@@ -71,7 +64,6 @@
 void                    xtalk_intr_disconnect(xtalk_intr_t);
 vertex_hdl_t            xtalk_intr_cpu_get(xtalk_intr_t);
 int                     xtalk_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
-int                     xtalk_error_devenable(vertex_hdl_t, int, int);
 void                    xtalk_provider_startup(vertex_hdl_t);
 void                    xtalk_provider_shutdown(vertex_hdl_t);
 vertex_hdl_t            xtalk_intr_dev_get(xtalk_intr_t);
@@ -121,6 +113,7 @@
 #define	CAST_PIOMAP(x)		((xtalk_piomap_t)(x))
 #define	CAST_DMAMAP(x)		((xtalk_dmamap_t)(x))
 #define	CAST_INTR(x)		((xtalk_intr_t)(x))
+xtalk_provider_t * xwidget_info_pops_get(xwidget_info_t info);
 
 static xtalk_provider_t *
 xwidget_to_provider_fns(vertex_hdl_t xconn)
@@ -136,6 +129,17 @@
 
     return (provider_fns);
 }
+
+xtalk_provider_t *
+xwidget_info_pops_get(xwidget_info_t info) {
+	vertex_hdl_t master = info->w_master;
+	xtalk_provider_t *provider_fns;
+
+	provider_fns = xtalk_provider_fns_get(master);
+
+	ASSERT(provider_fns != NULL);
+	return provider_fns;
+}
 #endif
 
 /*
@@ -273,13 +277,13 @@
 }
 
 /* xtalk_early_piotrans_addr:
- * figure out a PIO address for the "nth" crosstalk widget that
+ * figure out a PIO address for the "nth" io channel widget that
  * matches the specified part and mfgr number. Returns NULL if
  * there is no such widget, or if the requested mapping can not
  * be constructed.
- * Limitations on which crosstalk slots (and busses) are
+ * Limitations on which io channel slots (and busses) are
  * checked, and definitions of the ordering of the search across
- * the crosstalk slots, are defined by the platform.
+ * the io channel slots, are defined by the platform.
  */
 caddr_t
 xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
@@ -310,7 +314,7 @@
 			       unsigned flags)
 {
 #if DEBUG
-    PRINT_PANIC("null_xtalk_early_piotrans_addr");
+    panic("null_xtalk_early_piotrans_addr");
 #endif
     return NULL;
 }
@@ -318,7 +322,7 @@
 /* =====================================================================
  *                    DMA MANAGEMENT
  *
- *      For mapping from crosstalk space to system
+ *      For mapping from io channel space to system
  *      physical space.
  */
 
@@ -415,7 +419,7 @@
 /* =====================================================================
  *                    INTERRUPT MANAGEMENT
  *
- *      Allow crosstalk devices to establish interrupts
+ *      Allow io channel devices to establish interrupts
  */
 
 /*
@@ -510,8 +514,8 @@
  * This routine plays two roles during error delivery
  * to most widgets: first, the external agent (heart,
  * hub, or whatever) calls in with the error and the
- * connect point representing the crosstalk switch,
- * or whatever crosstalk device is directly connected
+ * connect point representing the io channel switch,
+ * or whatever io channel device is directly connected
  * to the agent.
  *
  * If there is a switch, it will generally look at the
@@ -528,6 +532,8 @@
 		       ioerror_t *ioerror)
 {
     xwidget_info_t          xwidget_info;
+    char		    name[MAXDEVNAME];
+
 
     xwidget_info = xwidget_info_get(xconn);
     /* Make sure that xwidget_info is a valid pointer before derefencing it.
@@ -548,46 +554,34 @@
 	(mode == MODE_DEVREENABLE))
 	return IOERROR_HANDLED;
 
-#if defined(SUPPORT_PRINTING_V_FORMAT)
-    printk(KERN_WARNING "Xbow at %v encountered Fatal error", xconn);
-#else
-    printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", (void *)xconn);
-#endif
-    ioerror_dump("xtalk", error_code, mode, ioerror);
+    printk(KERN_WARNING "Xbow at %s encountered Fatal error", vertex_to_name(xconn, name, MAXDEVNAME));
+    snia_ioerror_dump("xtalk", error_code, mode, ioerror);
 
     return IOERROR_UNHANDLED;
 }
 
-int
-xtalk_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
-{
-    return DEV_FUNC(xconn_vhdl, error_devenable) (xconn_vhdl, devnum, error_code);
-}
-
 
 /* =====================================================================
  *                    CONFIGURATION MANAGEMENT
  */
 
 /*
- * Startup a crosstalk provider
+ * Startup an io channel provider
  */
 void
 xtalk_provider_startup(vertex_hdl_t xtalk_provider)
 {
-    DEV_FUNC(xtalk_provider, provider_startup)
-	(xtalk_provider);
+    ((xtalk_provider_t *) hwgraph_fastinfo_get(xtalk_provider))->provider_startup(xtalk_provider);
 }
 
 
 /*
- * Shutdown a crosstalk provider
+ * Shutdown an io channel provider
  */
 void
 xtalk_provider_shutdown(vertex_hdl_t xtalk_provider)
 {
-    DEV_FUNC(xtalk_provider, provider_shutdown)
-	(xtalk_provider);
+    ((xtalk_provider_t *) hwgraph_fastinfo_get(xtalk_provider))->provider_shutdown(xtalk_provider);
 }
 
 /* 
@@ -608,17 +602,12 @@
 	return;
 }
 
-int
-xtalk_dma_enabled(vertex_hdl_t xconn_vhdl)
-{
-    return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
-}
 /*
- * Generic crosstalk functions, for use with all crosstalk providers
- * and all crosstalk devices.
+ * Generic io channel functions, for use with all io channel providers
+ * and all io channel devices.
  */
 
-/****** Generic crosstalk interrupt interfaces ******/
+/****** Generic io channel interrupt interfaces ******/
 vertex_hdl_t
 xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
 {
@@ -649,7 +638,7 @@
     return (xtalk_intr->xi_sfarg);
 }
 
-/****** Generic crosstalk pio interfaces ******/
+/****** Generic io channel pio interfaces ******/
 vertex_hdl_t
 xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
 {
@@ -681,7 +670,7 @@
 }
 
 
-/****** Generic crosstalk dma interfaces ******/
+/****** Generic io channel dma interfaces ******/
 vertex_hdl_t
 xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
 {
@@ -695,7 +684,7 @@
 }
 
 
-/****** Generic crosstalk widget information interfaces ******/
+/****** Generic io channel widget information interfaces ******/
 
 /* xwidget_info_chk:
  * check to see if this vertex is a widget;
@@ -797,7 +786,7 @@
 	panic("xwidget_info_name_get: null xwidget_info");
     return(xwidget_info->w_name);
 }
-/****** Generic crosstalk initialization interfaces ******/
+/****** Generic io channel initialization interfaces ******/
 
 /*
  * Associate a set of xtalk_provider functions with a vertex.
@@ -912,16 +901,11 @@
     if (!(widget_info = xwidget_info_get(widget)))
 	return(1);
 
-    /* Remove the inventory information associated
-     * with the widget.
-     */
-    hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
-    
     hwid = &(widget_info->w_hwid);
 
     /* Clean out the xwidget information */
     (void)kfree(widget_info->w_name);
-    BZERO((void *)widget_info, sizeof(widget_info));
+    memset((void *)widget_info, 0, sizeof(widget_info));
     DEL(widget_info);
     
     return(0);
diff -Nru a/arch/ia64/sn/io/snia_if.c b/arch/ia64/sn/io/snia_if.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/snia_if.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,278 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/xtalk/xbow.h>	/* Must be before iograph.h to get MAX_PORT_NUM */
+#include <asm/sn/iograph.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pciio_private.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+#include <asm/sn/simulator.h>
+
+#define DBG printk
+#if !defined(DEV_FUNC)
+extern pciio_provider_t *pciio_to_provider_fns(vertex_hdl_t dev);
+#define DEV_FUNC(dev,func)	pciio_to_provider_fns(dev)->func
+#define CAST_PIOMAP(x)		((pciio_piomap_t)(x))
+#define CAST_DMAMAP(x)		((pciio_dmamap_t)(x))
+#define CAST_INTR(x)		((pciio_intr_t)(x))
+#endif
+extern cfg_p           pcibr_find_capability(cfg_p, unsigned);
+extern pcibr_info_t      pcibr_info_get(vertex_hdl_t);
+
+/*
+ * Many functions are not passed their vertex
+ * information directly; rather, they must
+ * dive through a resource map. These macros
+ * are available to coordinate this detail.
+ */
+#define PIOMAP_FUNC(map,func)		DEV_FUNC((map)->pp_dev,func)
+#define DMAMAP_FUNC(map,func)		DEV_FUNC((map)->pd_dev,func)
+#define INTR_FUNC(intr_hdl,func)	DEV_FUNC((intr_hdl)->pi_dev,func)
+
+int
+snia_badaddr_val(volatile void *addr, int len, volatile void *ptr)
+{
+	int ret = 0;
+	volatile void *new_addr;
+
+	switch (len) {
+		case 4:
+			new_addr = (void *) addr;
+			ret = ia64_sn_probe_io_slot((long)new_addr, len, (void *)ptr);
+			break;
+		default:
+			printk(KERN_WARNING "badaddr_val given len %x but supports len of 4 only\n", len);
+	}
+
+	if (ret < 0)
+		panic("badaddr_val: unexpected status (%d) in probing", ret);
+	return(ret);
+
+}
+
+
+nasid_t
+snia_get_console_nasid(void)
+{
+	extern nasid_t console_nasid;
+	extern nasid_t master_baseio_nasid;
+
+	if (console_nasid < 0) {
+		console_nasid = ia64_sn_get_console_nasid();
+		if (console_nasid < 0) {
+// ZZZ What do we do if we don't get a console nasid on the hardware????
+			if (IS_RUNNING_ON_SIMULATOR() )
+				console_nasid = master_baseio_nasid;
+		}
+	} 
+	return console_nasid;
+}
+
+nasid_t
+snia_get_master_baseio_nasid(void)
+{
+	extern nasid_t master_baseio_nasid;
+	extern char master_baseio_wid;
+
+	if (master_baseio_nasid < 0) {
+		master_baseio_nasid = ia64_sn_get_master_baseio_nasid();
+
+		if ( master_baseio_nasid >= 0 ) {
+        		master_baseio_wid = WIDGETID_GET(KL_CONFIG_CH_CONS_INFO(master_baseio_nasid)->memory_base);
+		}
+	} 
+	return master_baseio_nasid;
+}
+
+
+void
+snia_ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
+{
+#ifdef	LATER
+	/* This needs to be tested */
+
+	static char *error_mode_string[] =
+		{ "probe", "kernel", "user", "reenable" };
+
+	printk("%s%s%s%s%s error in %s mode\n",
+               name,
+               (error_code & IOECODE_PIO) ? " PIO" : "",
+               (error_code & IOECODE_DMA) ? " DMA" : "",
+               (error_code & IOECODE_READ) ? " Read" : "",
+               (error_code & IOECODE_WRITE) ? " Write" : "",
+               error_mode_string[error_mode]);
+
+#define PRFIELD(f)                                  \
+        if (IOERROR_FIELDVALID(ioerror,f)) {        \
+		int tmp;                            \
+		IOERROR_GETVALUE(tmp, ioerror, f);  \
+                printk("\t%20s: 0x%x\n", #f, tmp);  \
+	}
+
+        PRFIELD(errortype);             /* error type: extra info about error */
+        PRFIELD(widgetnum);             /* Widget number that's in error */
+        PRFIELD(widgetdev);             /* Device within widget in error */
+        PRFIELD(srccpu);                /* CPU on srcnode generating error */
+        PRFIELD(srcnode);               /* Node which caused the error   */
+        PRFIELD(errnode);               /* Node where error was noticed  */
+        PRFIELD(sysioaddr);             /* Sys specific IO address       */
+        PRFIELD(xtalkaddr);             /* Xtalk (48bit) addr of Error   */
+        PRFIELD(busspace);              /* Bus specific address space    */
+        PRFIELD(busaddr);               /* Bus specific address          */
+        PRFIELD(vaddr);                 /* Virtual address of error      */
+        PRFIELD(memaddr);               /* Physical memory address       */
+        PRFIELD(epc);                   /* pc when error reported        */
+        PRFIELD(ef);                    /* eframe when error reported    */
+
+#undef  PRFIELD
+
+        {
+                /* Print a more descriptive CPU string */
+                cpuid_t srccpu;
+		IOERROR_GETVALUE(srccpu, ioerror, srccpu);
+		// smp_processor_id()
+                printk("(NOTE: CPU %d)\n", srccpu);
+                printk("\n");
+        }
+#endif	/* LATER */
+}
+
+/*
+ * XXX: should probably be called __sn2_pci_rrb_alloc
+ * used by qla1280
+ */
+
+int
+snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
+	int *count_vchan0,
+	int *count_vchan1)
+{
+	vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+
+	return pcibr_rrb_alloc(dev, count_vchan0, count_vchan1);
+}
+
+/* 
+ * XXX: interface should be more like
+ *
+ *     int __sn2_pci_enable_bwswap(struct pci_dev *dev);
+ *     void __sn2_pci_disable_bswap(struct pci_dev *dev);
+ */
+/* used by ioc4 ide */
+
+pciio_endian_t
+snia_pciio_endian_set(struct pci_dev *pci_dev,
+	pciio_endian_t device_end,
+	pciio_endian_t desired_end)
+{
+	vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+	
+	return DEV_FUNC(dev, endian_set)
+		(dev, device_end, desired_end);
+}
+
+iopaddr_t
+snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device */
+                    device_desc_t dev_desc,     /* device descriptor */
+                    paddr_t paddr,      /* system physical address */
+                    size_t byte_count,  /* length */
+                    unsigned flags)
+{                                       /* defined in dma.h */
+
+    vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+
+    /*
+     * If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be 
+     * set.  Otherwise, it must not be set.  This applies to SN1 and SN2.
+     */
+    return DEV_FUNC(dev, dmatrans_addr)
+        (dev, dev_desc, paddr, byte_count, flags & ~PCIIO_BYTE_STREAM);
+}
+
+pciio_dmamap_t
+snia_pciio_dmamap_alloc(struct pci_dev *pci_dev,  /* set up mappings for this device */
+                   device_desc_t dev_desc,      /* device descriptor */
+                   size_t byte_count_max,       /* max size of a mapping */
+                   unsigned flags)
+{                                       /* defined in dma.h */
+
+    vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+
+    /*
+     * If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
+     * set.  Otherwise, it must not be set.  This applies to SN1 and SN2.
+     */
+    return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
+        (dev, dev_desc, byte_count_max, flags & ~PCIIO_BYTE_STREAM);
+}
+
+void
+snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
+{
+    DMAMAP_FUNC(pciio_dmamap, dmamap_free)
+        (CAST_DMAMAP(pciio_dmamap));
+}
+
+iopaddr_t
+snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap,  /* use these mapping resources */
+                  paddr_t paddr,        /* map for this address */
+                  size_t byte_count)
+{                                       /* map this many bytes */
+    return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
+        (CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
+}
+
+unsigned int
+snia_msi_alloc(struct pci_dev *pci_dev,  /* set MSI/MSIX for this device */
+			int number_requested,
+			unsigned int *irqs)
+{
+
+    vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+    extern unsigned int pcibr_msi_alloc(vertex_hdl_t, int, unsigned int *);
+
+    return pcibr_msi_alloc(dev, number_requested, irqs);
+
+}
+
+unsigned int
+snia_msix_alloc(struct pci_dev *pci_dev, /* set MSI/MSIX for this device */
+			int number_requested,
+			unsigned int *irqs)
+{
+
+    vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
+    extern unsigned int pcibr_msix_alloc(vertex_hdl_t, struct pci_dev *, int, unsigned int *);
+
+    return pcibr_msix_alloc(dev, pci_dev, number_requested, irqs);
+
+}
+
+EXPORT_SYMBOL(snia_pciio_dmatrans_addr);
+EXPORT_SYMBOL(snia_pciio_dmamap_alloc);
+EXPORT_SYMBOL(snia_pciio_dmamap_free);
+EXPORT_SYMBOL(snia_pciio_dmamap_addr);
+EXPORT_SYMBOL(snia_pciio_endian_set);
+EXPORT_SYMBOL(snia_pcibr_rrb_alloc);
+EXPORT_SYMBOL(snia_msi_alloc);
+EXPORT_SYMBOL(snia_msix_alloc);
diff -Nru a/arch/ia64/sn/io/tio/Makefile b/arch/ia64/sn/io/tio/Makefile
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/Makefile	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,10 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+#
+# Makefile for the tio subdirs
+
+obj-y	+=	ca/
diff -Nru a/arch/ia64/sn/io/tio/ca/Makefile b/arch/ia64/sn/io/tio/ca/Makefile
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/ca/Makefile	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,10 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+#
+# Makefile for the tio:ca routines.
+
+obj-y	+= ca_linux.o ca_driver.o ca_pci.o ca_error.o
diff -Nru a/arch/ia64/sn/io/tio/ca/ca_driver.c b/arch/ia64/sn/io/tio/ca/ca_driver.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/ca/ca_driver.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,75 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/tio/tioca.h>
+#include <asm/sn/tio/tioca_soft.h>
+
+/*
+ * Debugging support
+ */
+
+uint32_t	tioca_debug_mask = CA_DBG_DEFAULT;
+
+/*
+ * Hardware level driver
+ */
+
+/*
+ * First level initialization for tioca asic.  Takes a system-level handle
+ * identifying the hardware as well as a base PIO address and constructs a
+ * soft structure representing the initialized hw.
+ *
+ * Assumes a clean reset has already been performed
+ */
+
+tioca_soft_p
+tioca_hwinit(vertex_hdl_t systemhdl, tioca_p base)
+{
+	tioca_soft_p	soft;
+
+	soft = tioca_mem_alloc(sizeof(tioca_soft_t), 0);
+	if (soft == NULL) {
+		return NULL;
+	}
+
+	soft->ca_systemhdl = systemhdl;
+	soft->ca_base = base;
+
+	/*
+ 	 * Setup initial control1 values as follows:
+	 *
+	 *	System Little Endian
+	 *	All swapping off
+	 *	
+	 */
+
+	soft->ca_control1 = base->ca_control1;
+	soft->ca_control2 = base->ca_control2;
+
+	/*
+	 * Do PCI setup, including probing the bus and parsing AGP capabilites
+	 */
+
+	return soft;
+		
+	if (soft) {
+		tioca_mem_free(soft, sizeof(tioca_soft_t));
+	}
+
+	return NULL;
+}
+
+void
+tioca_agp_enable(vertex_hdl_t systemhdl)
+{
+}
+
diff -Nru a/arch/ia64/sn/io/tio/ca/ca_error.c b/arch/ia64/sn/io/tio/ca/ca_error.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/ca/ca_error.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/module.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/tio/tioca.h>
+#include <asm/sn/tio/tioca_soft.h>
+
+irqreturn_t
+tioca_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
+{
+    tioca_soft_p ca_soft;
+    tioca_reg_t err_status, int_status;
+    tioca_reg_t multi_error;
+    tioca_reg_t first_error;
+    tioca_p ca_base;
+
+    ca_soft = (tioca_soft_p) arg;
+    ca_base = ca_soft->ca_base;
+    int_status = ca_base->ca_int_status;
+    multi_error = ca_base->ca_mult_error;
+    first_error = ca_base->ca_first_error;
+
+    ca_base->ca_first_error = 0;
+
+    err_status = int_status | first_error;
+
+    if(err_status & CA_PCI_ERR) {
+	tioca_reg_t pcierr;
+        tioca_reg_t multi;
+	tioca_reg_t pciaddr;
+
+	pcierr = ca_base->ca_pcierr_type;
+        multi = multi_error & CA_PCI_ERR ;
+	pciaddr = ca_base->ca_pcierr_addr;
+
+    }
+
+    if(err_status & CA_PIO_INVALID_ADDR) {
+        tioca_reg_t pioerr;
+	tioca_reg_t multi;
+        tioca_reg_t pciaddr;
+
+        pioerr = ca_base->ca_pcierr_type;
+	multi = multi_error & CA_PIO_INVALID_ADDR ;
+        pciaddr = ca_base->ca_pcierr_addr;
+
+    }
+
+    if(err_status & CA_CRM_PKTERR) {
+        tioca_reg_t crmerr;
+	tioca_reg_t multi;
+
+        crmerr = ca_base->ca_crm_pkterr_type;
+	multi = multi_error & CA_CRM_PKTERR ;
+    }
+
+    ca_base->ca_int_status_alias = int_status;
+
+    if(multi_error)
+	ca_base->ca_mult_error_alias = multi_error;
+
+    printk(KERN_ALERT
+		"Error interrupt from TIO:CA %s status %d multiple errors %d",
+		ca_soft->ca_name, (int)int_status, (int)multi_error);
+
+    first_error = ca_base->ca_first_error;
+    if(first_error) {
+    	printk(KERN_ALERT
+                "Error from TIO:CA %s post-clear error %d", ca_soft->ca_name, (int)first_error);
+
+    	ca_base->ca_first_error = 0;
+    }
+    return IRQ_HANDLED;
+}
+
+int
+tioca_pioerror(tioca_soft_p ca_soft, 
+                  int error_code,
+                  ioerror_mode_t mode,
+                  ioerror_t *ioe)
+{
+    int                     retval = IOERROR_HANDLED;
+    vertex_hdl_t            tioca_vhdl = ca_soft->ca_vhdl;
+
+    retval = pciio_error_handler(tioca_vhdl, error_code, mode, ioe);
+
+    if (retval != IOERROR_HANDLED) {
+
+        /* Generate a generic message for IOERROR_UNHANDLED
+         * since the subsidiary handlers were silent, and
+         * did no recovery.
+         */
+        if (retval == IOERROR_UNHANDLED) {
+            retval = IOERROR_PANIC;
+
+            /* we may or may not want to print some of this,
+             * depending on debug level and which error code.
+             */
+
+            printk(KERN_ALERT
+                    "PIO Error on AGP Bus %s",
+                    ca_soft->ca_name);
+        }
+    }
+    return retval;
+
+}
+
+int
+tioca_error_handler(
+                       error_handler_arg_t einfo,
+                       int error_code,
+                       ioerror_mode_t mode,
+                       ioerror_t *ioe)
+{
+
+    tioca_soft_p            ca_soft;
+    int                     retval = IOERROR_BADERRORCODE;
+
+    ca_soft = (tioca_soft_p) einfo;
+
+#if DEBUG && ERROR_DEBUG
+    printk( "%s: tioca_error_handler\n", ca_soft->bs_name);
+#endif
+
+    ASSERT(ca_soft != NULL);
+
+    if (error_code & IOECODE_PIO)
+        retval = tioca_pioerror(ca_soft, error_code, mode, ioe);
+
+    return retval;
+
+}
+
diff -Nru a/arch/ia64/sn/io/tio/ca/ca_linux.c b/arch/ia64/sn/io/tio/ca/ca_linux.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/ca/ca_linux.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,246 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/module.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/tio/tioca.h>
+#include <asm/sn/tio/tioca_soft.h>
+#include <asm/sn/sn_private.h>
+
+extern void xwidget_error_register(vertex_hdl_t xwidget,
+                       error_handler_f *efunc,
+                       error_handler_arg_t einfo);
+
+extern int tioca_error_handler(error_handler_arg_t einfo,
+                       int error_code,
+                       ioerror_mode_t mode,
+                       ioerror_t *ioe);
+extern void tioca_error_intr_handler(int, void *, struct pt_regs *);
+extern void tioca_seterrint(xtalk_intr_t intr);
+
+/*
+ * Interfaces for bridging the generic TIO:CA code to linux 
+ */
+
+/*
+ * Memory allocation
+ */
+
+void *
+tioca_mem_alloc(size_t nbytes, uint flags)
+{
+	/* flags currently unused in linux */
+	return kmalloc(nbytes, GFP_ATOMIC);
+}
+
+void
+tioca_mem_free(void *ptr, size_t nbytes)
+{
+	/* nbytes currently unused in linux */
+	kfree(ptr);
+}
+
+/*
+ * Convert between soft struct and system handle
+ */
+
+tioca_soft_p
+tioca_hdl_to_soft(vertex_hdl_t hdl)
+{
+	return (tioca_soft_p)hwgraph_fastinfo_get(hdl);
+}
+
+vertex_hdl_t
+tioca_soft_to_hdl(tioca_soft_p soft)
+{
+	return soft->ca_systemhdl;
+}
+
+static int
+tioca_mmap(struct file * file, struct vm_area_struct * vma)
+{
+        vertex_hdl_t            tioca_vhdl;
+        tioca_soft_p		ca_soft;
+        tioca_p			ca;
+        unsigned long           phys_addr;
+        int                     error = 0;
+
+        tioca_vhdl = (vertex_hdl_t) file->f_dentry->d_fsdata;
+        ca_soft = tioca_hdl_to_soft(tioca_vhdl);
+        ca = (tioca_p)ca_soft->ca_base;
+        phys_addr = (unsigned long)ca & ~0xc000000000000000; /* Mask out the Uncache 
+bits */
+        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+        vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
+        error = io_remap_page_range(vma, vma->vm_start, phys_addr,
+                                   vma->vm_end-vma->vm_start,
+                                   vma->vm_page_prot);
+        return(error);
+}
+
+extern pciio_provider_t tioca_pci;
+
+struct file_operations tioca_fops = {
+        .owner          = THIS_MODULE,
+        .mmap           = tioca_mmap,
+};
+
+void
+tioca_dump(tioca_p ca)
+{
+	printk("CA registers (hex offset) in [] at 0x%p:\n", ca);
+
+	printk("ca_id                [000] 0x%lx\n", ca->ca_id);
+	printk("ca_control1          [008] 0x%lx\n", ca->ca_control1);
+	printk("ca_control2          [010] 0x%lx\n", ca->ca_control2);
+	printk("ca_status1           [018] 0x%lx\n", ca->ca_status1);
+	printk("ca_status2           [020] 0x%lx\n", ca->ca_status2);
+	printk("ca_gart_aperature    [028] 0x%lx\n", ca->ca_gart_aperature);
+	printk("ca_gfx_detach        [030] 0x%lx\n", ca->ca_gfx_detach);
+	printk("ca_inta_dest_addr    [038] 0x%lx\n", ca->ca_inta_dest_addr);
+	printk("ca_intb_dest_addr    [040] 0x%lx\n", ca->ca_intb_dest_addr);
+	printk("ca_err_int_dest_addr [048] 0x%lx\n", ca->ca_err_int_dest_addr);
+	printk("ca_int_status        [050] 0x%lx\n", ca->ca_int_status);
+	printk("ca_int_status_alias  [058] 0x%lx\n", ca->ca_int_status_alias);
+	printk("ca_mult_error        [060] 0x%lx\n", ca->ca_mult_error);
+	printk("ca_mult_error_alias  [068] 0x%lx\n", ca->ca_mult_error_alias);
+	printk("ca_first_error       [070] 0x%lx\n", ca->ca_first_error);
+	printk("ca_int_mask          [078] 0x%lx\n", ca->ca_int_mask);
+	printk("ca_crm_pkterr_type   [080] 0x%lx\n", ca->ca_crm_pkterr_type);
+	printk("ca_crm_pkterr_type_alias [088] 0x%lx\n", ca->ca_crm_pkterr_type_alias);
+	printk("ca_crm_ct_error_detail_1 [090] 0x%lx\n", ca->ca_crm_ct_error_detail_1);
+	printk("ca_crm_ct_error_detail_2 [098] 0x%lx\n", ca->ca_crm_ct_error_detail_2);
+	printk("ca_crm_tnumto        [0A0] 0x%lx\n", ca->ca_crm_tnumto);
+	printk("ca_gart_err          [0A8] 0x%lx\n", ca->ca_gart_err);
+	printk("ca_pcierr_type       [0B0] 0x%lx\n", ca->ca_pcierr_type);
+	printk("ca_pcierr_addr       [0B8] 0x%lx\n", ca->ca_pcierr_addr);
+	printk("ca_pci_rd_buf_flush  [0D8] 0x%lx\n", ca->ca_pci_rd_buf_flush);
+	printk("ca_pci_dma_addr_extn [0E0] 0x%lx\n", ca->ca_pci_dma_addr_extn);
+	printk("ca_agp_dma_addr_extn [0E8] 0x%lx\n", ca->ca_agp_dma_addr_extn);
+	printk("ca_force_inta        [0F0] 0x%lx\n", ca->ca_force_inta);
+	printk("ca_force_intb        [0F8] 0x%lx\n", ca->ca_force_intb);
+	printk("ca_debug_vector_sel  [100] 0x%lx\n", ca->ca_debug_vector_sel);
+	printk("ca_debug_mux_core_sel [108] 0x%lx\n", ca->ca_debug_mux_core_sel);
+	printk("ca_debug_mux_pci_sel [110] 0x%lx\n", ca->ca_debug_mux_pci_sel);
+	printk("ca_debug_domain_sel  [118] 0x%lx\n", ca->ca_debug_domain_sel);
+	printk("ca_gart_ptr_table    [200] 0x%lx\n", ca->ca_gart_ptr_table);
+	printk("ca_gart_tlb_addr[0]  [208] 0x%lx\n", ca->ca_gart_tlb_addr[0]);
+	printk("ca_gart_tlb_addr[1]  [210] 0x%lx\n", ca->ca_gart_tlb_addr[1]);
+	printk("ca_gart_tlb_addr[2]  [218] 0x%lx\n", ca->ca_gart_tlb_addr[2]);
+	printk("ca_gart_tlb_addr[3]  [220] 0x%lx\n", ca->ca_gart_tlb_addr[3]);
+	printk("ca_gart_tlb_addr[4]  [228] 0x%lx\n", ca->ca_gart_tlb_addr[4]);
+	printk("ca_gart_tlb_addr[5]  [230] 0x%lx\n", ca->ca_gart_tlb_addr[5]);
+	printk("ca_gart_tlb_addr[6]  [238] 0x%lx\n", ca->ca_gart_tlb_addr[6]);
+	printk("ca_gart_tlb_addr[7]  [240] 0x%lx\n", ca->ca_gart_tlb_addr[7]);
+}
+
+int
+tioca_attach(vertex_hdl_t asic_hdl)
+{
+	vertex_hdl_t	tioca_vhdl=NULL;
+	vertex_hdl_t	ctlr_vhdl=NULL;
+	tioca_p		ca;
+	tioca_soft_p	ca_soft;
+	xtalk_intr_t	xtalk_intr;
+	tioca_reg_t	int_mask;
+	int		iobrick_type_get_nasid(nasid_t nasid);
+	char		devnm[MAXDEVNAME], *s;
+	int		rc;
+	char		buf[256];
+
+	dev_to_name(asic_hdl, buf, sizeof(buf));
+        TIOCA_DEBUG(CA_DBG_ALWAYS, ("tioca_attach [0x%p] %s\n", (void *)asic_hdl, buf));
+
+	ca = (tioca_p)xtalk_piotrans_addr(asic_hdl,
+					  NULL, 0, sizeof(tioca_t), 0);
+
+	if (ca == NULL) {
+		return -1;
+	}
+
+/*
+	tioca_dump(ca);
+*/
+
+	ca_soft = tioca_hwinit(asic_hdl, ca);
+
+	if (ca_soft == NULL) {
+		return -1;
+	}
+
+
+/* TIO BRINGUP 
+	brick_type = iobrick_type_get_nasid(NASID_GET(ca));
+
+	if ( brick_type == MODULE_OPUSBRICK ) {
+*/		rc = hwgraph_path_add(asic_hdl, EDGE_LBL_AGP, &tioca_vhdl);
+                ASSERT(rc == GRAPH_SUCCESS);
+		ca_soft->ca_vhdl = tioca_vhdl;
+		pciio_provider_register(tioca_vhdl, &tioca_pci);
+		pciio_provider_startup(tioca_vhdl);
+
+		s = dev_to_name(tioca_vhdl, devnm, MAXDEVNAME);
+printk("*** tioca_vhdl 0x%p:  %s\n", (void *)tioca_vhdl, devnm);
+		ca_soft->ca_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
+		strcpy(ca_soft->ca_name, s);
+/* TIO BRINGUP
+	}
+*/
+        ASSERT(tioca_vhdl != NULL);
+
+        ctlr_vhdl = hwgraph_register(tioca_vhdl, EDGE_LBL_CONTROLLER, 0,
+                                     0, 0, 0,
+                                     S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0,
+                                     0, (struct file_operations *) &tioca_fops,
+                                     (void *) tioca_vhdl);
+        ASSERT(ctlr_vhdl != NULL);
+	xwidget_error_register(asic_hdl, tioca_error_handler, ca_soft);
+
+	xtalk_intr = xtalk_intr_alloc(asic_hdl, (device_desc_t)0, tioca_vhdl);
+
+	ASSERT(xtalk_intr != NULL);
+
+        {
+                int irq = ((hub_intr_t)xtalk_intr)->i_bit;
+                int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
+
+                intr_unreserve_level(cpu, irq);
+                ((hub_intr_t)xtalk_intr)->i_bit = SGI_TIOCA_ERROR;
+        }
+
+	ca_soft->ca_err_intr = xtalk_intr;
+
+	xtalk_intr_connect(xtalk_intr, (intr_func_t) tioca_error_intr_handler,
+		(intr_arg_t) ca_soft, (xtalk_intr_setfunc_t)tioca_seterrint, (void *)ca);
+
+	request_irq(SGI_TIOCA_ERROR, (void *)tioca_error_intr_handler, SA_SHIRQ, "TIO:CA error",
+                                        (intr_arg_t) ca_soft);
+
+	/* Enable specific interrupts for pre-agp-enabled operation by writing to TIO:CA control registers */
+
+        int_mask = CA_PCI_ERR | CA_PIO_REQ_OVFLW | CA_CRM_PKTERR | CA_PIO_INVALID_ADDR;
+        ca->ca_int_mask = ~int_mask;
+	ca_soft->ca_int_mask = ca->ca_int_mask;
+
+	/*
+	 * Set up PCI and probe the bus
+	 */
+
+	tioca_pci_init(ca_soft);
+
+	return 0;
+}
diff -Nru a/arch/ia64/sn/io/tio/ca/ca_pci.c b/arch/ia64/sn/io/tio/ca/ca_pci.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/arch/ia64/sn/io/tio/ca/ca_pci.c	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,1281 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * PLEASE RUN THIS THROGH "indent -kr" WHEN ADDING NEW CODE
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pciio_private.h>
+#include <asm/sn/tio/tioca.h>
+#include <asm/sn/tio/tioca_soft.h>
+#include <asm/sn/tio/tioca_private.h>
+#include <asm/sn/sn_private.h>
+
+extern int compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr);
+
+void tioca_setpciint(xtalk_intr_t xtalk_intr);
+
+pciio_piomap_alloc_f tioca_piomap_alloc;
+pciio_piomap_free_f tioca_piomap_free;
+pciio_piomap_addr_f tioca_piomap_addr;
+pciio_piomap_done_f tioca_piomap_done;
+pciio_piotrans_addr_f tioca_piotrans_addr;
+pciio_piospace_alloc_f tioca_piospace_alloc;
+pciio_piospace_free_f tioca_piospace_free;
+
+pciio_dmamap_alloc_f tioca_dmamap_alloc;
+pciio_dmamap_free_f tioca_dmamap_free;
+pciio_dmamap_addr_f tioca_dmamap_addr;
+pciio_dmamap_done_f tioca_dmamap_done;
+pciio_dmatrans_addr_f tioca_dmatrans_addr;
+pciio_dmamap_drain_f tioca_dmamap_drain;
+pciio_dmaaddr_drain_f tioca_dmaaddr_drain;
+pciio_dmalist_drain_f tioca_dmalist_drain;
+
+pciio_provider_startup_f tioca_provider_startup;
+pciio_provider_shutdown_f tioca_provider_shutdown;
+pciio_reset_f tioca_reset;
+pciio_endian_set_f tioca_endian_set;
+pciio_config_get_f tioca_config_get;
+pciio_config_set_f tioca_config_set;
+
+pciio_error_devenable_f tioca_error_devenable;
+pciio_error_extract_f tioca_error_extract;
+pciio_driver_reg_callback_f tioca_driver_reg_callback;
+pciio_driver_unreg_callback_f tioca_driver_unreg_callback;
+pciio_device_unregister_f tioca_device_unregister;
+
+/*
+ * Private functions
+ */
+
+static void *tioca_config_base(tioca_soft_p, pciio_bus_t, pciio_slot_t,
+			       pciio_function_t);
+static int tioca_probe(tioca_soft_p soft, void *addr, unsigned size,
+		       void * valp);
+static int tioca_agp_func_init(tioca_info_t info, tioca_soft_p soft);
+
+typedef struct tioca_intr_s *tioca_intr_t;
+
+pciio_intr_t tioca_intr_alloc(vertex_hdl_t dev, device_desc_t dev_desc,
+			      pciio_intr_line_t lines, vertex_hdl_t owner_dev);
+void tioca_intr_free(pciio_intr_t pciio_intr);
+int tioca_intr_connect(pciio_intr_t pciio_intr, intr_func_t intr_func,
+		       intr_arg_t intr_arg);
+void tioca_intr_disconnect(pciio_intr_t pciio_intr);
+vertex_hdl_t tioca_intr_cpu_get(pciio_intr_t pciio_intr);
+
+#define CFG8_GET(cfgp, byte_offset)	(((uint8_t *)(cfgp))[(byte_offset)])
+#define CFG16_GET(cfgp, byte_offset)	(((uint16_t *)(cfgp))[(byte_offset)/2])
+#define CFG32_GET(cfgp, byte_offset)	(((uint32_t *)(cfgp))[(byte_offset)/4])
+
+#define CFG8_PUT(addr, offset, val)	\
+		*((uint8_t *)((addr) + (offset))) = (uint8_t)(val)
+#define CFG16_PUT(addr, offset, val)	\
+		*((uint16_t *)((addr) + (offset)/2)) = (uint16_t)(val)
+#define CFG32_PUT(addr, offset, val)	\
+		*((uint32_t *)((addr) + (offset)/4)) = (uint32_t)(val)
+
+/*
+ * PCI provider interfaces for TIO:CA
+ */
+
+pciio_provider_t tioca_pci = {
+	/* PIO MANAGEMENT */
+	tioca_piomap_alloc,
+	tioca_piomap_free,
+	tioca_piomap_addr,
+	tioca_piomap_done,
+	tioca_piotrans_addr,
+	tioca_piospace_alloc,
+	tioca_piospace_free,
+
+	/* DMA MANAGEMENT */
+	tioca_dmamap_alloc,
+	tioca_dmamap_free,
+	tioca_dmamap_addr,
+	tioca_dmamap_done,
+	tioca_dmatrans_addr,
+	tioca_dmamap_drain,
+	tioca_dmaaddr_drain,
+	tioca_dmalist_drain,
+
+	/* INTERRUPT MANAGEMENT */
+	tioca_intr_alloc,
+	tioca_intr_free,
+	tioca_intr_connect,
+	tioca_intr_disconnect,
+	tioca_intr_cpu_get,
+
+	/* CONFIGURATION MANAGEMENT */
+	tioca_provider_startup,
+	tioca_provider_shutdown,
+	tioca_reset,
+	tioca_endian_set,
+	tioca_config_get,
+	tioca_config_set,
+
+	/* Error handling interface */
+	tioca_error_devenable,
+	tioca_error_extract,
+
+	/* Calback support */
+	tioca_driver_reg_callback,
+	tioca_driver_unreg_callback,
+	tioca_device_unregister,
+};
+
+/*
+ * Probe the CA slot and do basic PCI setup.  Assumes that CA is sufficiently
+ * initialized to allow PCI config space access.
+ *
+ * Note:  AGP specific setup based on AGP capability is not done here
+ */
+
+/* Round val up to a multiple of mult.  Assumes mult is a power of 2 */
+ 
+#define ROUNDUP_LOG2(val, mult)	(((val) + (mult-1)) & ~(mult-1))
+
+int
+tioca_pci_init(tioca_soft_p soft)
+{
+	uint8_t	 *cfg;
+	uint16_t devid;
+	uint16_t vendid;
+	uint8_t hdr;
+	tioca_info_t info;
+	uint32_t *barp, barv, size, pci_addr;
+	uint32_t mem_offset, io_offset, lowmem_offset, *spacep;
+	int bar_64;
+	uint8_t	func, max_func, bar;
+	int	rv;
+	uint16_t pci_status;
+	uint8_t	cap_ptr;
+	uint8_t	cap_id;
+	vertex_hdl_t func_vhdl;
+	char	func_path[256];
+
+	TIOCA_DEBUG(CA_DBG_ALWAYS, ("tioca_pci_init ... soft %p\n", soft));
+
+	/*
+	 * Set up PCI-related CA registers
+	 */
+
+	lowmem_offset = 0x000c0000;	/* reserve lowest mem for legacy VGA */
+	mem_offset = 0x00100000;	/* start mem at 1mb */
+	io_offset = 0x00001000;		/* reserve low 64k for legacy */
+
+	/*
+	 * Probe the bus.  Per spec, there can only be one device, and it
+	 * has to be device 0 (ie. card IDSEL hooked to AD16).
+	 */
+
+	max_func = 1;
+	for (func = 0; func < max_func; func++) {
+		cfg = (uint8_t *) tioca_config_base(soft, 0, 0, func);
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("function %d config space at addr 0x%p\n", func, cfg));
+		rv = tioca_probe(soft, cfg + PCI_CFG_VENDOR_ID, 2, (void *)&vendid);
+
+		if (rv != 0) {
+			TIOCA_DEBUG(CA_DBG_ALWAYS,
+				    ("tioca_probe of soft %p cfg %p returns %d vendid %#x\n",
+				    soft, cfg + PCI_CFG_VENDOR_ID, rv, vendid));
+
+			if (rv > 0 || vendid == 0xffff) {
+				/*
+				 * Device/Function not present.
+				 */
+
+				continue;	/* try the next func */
+			} else {
+				return -1;	/* likely a coding error */
+			}
+		}
+
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			    ("tioca_probe func %d found vendor id %#x\n",
+				    func, vendid));
+
+		/*
+		 * The above check should catch a non-existant function, but
+		 * just in case, check for a vendor id of all f's.
+	 	 */
+
+		/*
+		 * Function has responded.
+		 *
+		 * ###maule:  direct  accesses to this config space should
+		 * be ok now so just dereference cfg+offset.  If that turns
+		 * out to not be the case, we'll need to use tioca_probe().
+		 */
+
+		hdr = CFG8_GET(cfg, PCI_CFG_HEADER_TYPE);
+		if (func == 0 && (hdr & 0x80)) {
+			max_func = 8;	/* device is multifunc */
+		}
+
+		/*
+		 * Construct a tioca_info_t to represent this function
+		 */
+
+		devid = CFG16_GET(cfg, PCI_CFG_DEVICE_ID);
+		info = tioca_mem_alloc(sizeof(struct tioca_info_s), 0);
+		pciio_device_info_new(&info->f_c, soft->ca_systemhdl, 0,
+				(max_func > 1) ? func : PCIIO_FUNC_NONE,
+				 vendid, devid);
+		info->f_cfgptr = cfg;
+
+		/*
+		 * Set up the standard PCI registers - BAR's, latency timer
+		 * ###maule:  only setting up BAR's for now - don't know if we
+		 * need to worry about anything else since there's only 1 device on
+		 * the bus.
+		 */
+
+		for (bar = 0, barp = (uint32_t *)(cfg + PCI_CFG_BASE_ADDR_0);
+		     bar < 6; bar++, barp++) {
+			barv = *barp;
+			if (barv == 0) {
+				continue;	/* BAR not implemented */
+			}
+
+			/*
+			 * ###maule
+			 * Medusa HACK - if the upper byte is ff, assume
+			 * that the user has encoded the size in the BAR
+			 * already.
+			 */
+
+			if ((barv >> 24) != 0xff) {
+				/*
+			 	 * Reset the BAR to f's to ascertain its size.
+			 	 */
+
+				*barp = 0xffffffff;
+				barv = *barp;
+			}
+
+			if (barv & PCI_BA_IO_SPACE) {
+				/* I/O BAR */
+
+				size = ((~barv) | PCI_BA_IO_CODEMASK) + 1;
+				spacep = &io_offset;
+
+				bar_64 = 0;
+				info->f_window[bar].w_code =
+					barv & PCI_BA_IO_CODEMASK;
+
+				TIOCA_DEBUG(CA_DBG_ALWAYS,
+					("func %d bar %d size %#x type IO\n",
+					func, bar, size));
+			} else {
+				/* MEM bar */
+
+				size = ((~barv) | PCI_BA_MEM_CODEMASK) + 1;
+
+				/*
+				 * Shouldn't see this since this bit is
+				 * reserved as of the 2.2 PCI spec.  But
+				 * just in case ...
+				 */
+
+				if (barv & PCI_BA_MEM_1MEG) {
+					spacep = &lowmem_offset;
+				} else {
+					spacep = &mem_offset;
+				}
+
+				bar_64 = barv & PCI_BA_MEM_64BIT;
+
+				info->f_window[bar].w_code = 
+					barv & PCI_BA_MEM_CODEMASK;
+
+				TIOCA_DEBUG(CA_DBG_ALWAYS,
+					("func %d bar %d size %#x type MEM %s %s %s\n",
+					func, bar, size,
+					bar_64?"64BIT":"",
+					(barv & PCI_BA_PREFETCH)?"PREFETCH":"",
+					(barv & PCI_BA_MEM_1MEG)?"<1MB":""));
+			}
+
+			pci_addr = ROUNDUP_LOG2(*spacep, size);
+			*barp = pci_addr;
+			*spacep = pci_addr + size;
+
+			TIOCA_DEBUG(CA_DBG_ALWAYS, ("    assigned pci addr range %#x-%#x\n", pci_addr, pci_addr+size-1));
+
+			info->f_window[bar].w_base = pci_addr;
+			info->f_window[bar].w_size = size;
+			info->f_window[bar].w_space = PCIIO_SPACE_WIN(bar);
+
+			/*
+			 * TIO:CA does not generate dual address cycles, so
+			 * if this is a 64 bit BAR, set the upper 32 bits
+			 * to 0.
+			 */
+
+			if (bar_64) {
+				bar++;
+				barp++;
+				*barp = 0;
+			}
+		}
+
+		/*
+		 * Expansion ROM.
+		 */
+
+		barp = (uint32_t *)(cfg + PCI_EXPANSION_ROM);
+		*barp = 0xfffffffe;	/* bit 0 == 0 to disable decoder */
+		barv = *barp;
+
+		size = ~barv + 1;
+		pci_addr = ROUNDUP_LOG2(mem_offset, size);
+		*barp = (pci_addr | 1);	/* set addr and enable decoder */
+		mem_offset = pci_addr + size;
+
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("func %d Expansion ROM size %#x\n", func, size));
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("    assigned pci addr range %#x-%#x\n",
+			pci_addr, pci_addr+size-1));
+
+		info->f_rbase = pci_addr;
+		info->f_rsize = size;
+
+		/*
+		 * Check for AGP capability
+		 */
+
+		cap_ptr = 0;
+		pci_status = CFG16_GET(cfg, PCI_CFG_STATUS);
+		if (pci_status & PCI_STAT_CAP_LIST) {
+			cap_ptr = CFG8_GET(cfg, PCI_CAPABILITIES_PTR);
+			while (cap_ptr) {
+				if (cap_ptr & 0x3) {
+					/* sanity check - cap ptrs */
+					/* must be dword aligned */
+					break;
+				}
+
+				cap_id = CFG8_GET(cfg, cap_ptr);
+				if (cap_id == PCI_CAP_AGP) {
+					info->f_agpcap = cap_ptr;
+					tioca_agp_func_init(info, soft);
+					break;
+				}
+
+				cap_ptr = CFG8_GET(cfg, cap_ptr+1);
+			}
+		}
+
+		/*
+		 * Clear residual PCI status 
+		 * Enable MEM/IO decoders and Bus Master capability
+		 */
+
+		CFG16_PUT(cfg, PCI_CFG_STATUS, 0);
+		CFG16_PUT(cfg, PCI_CFG_COMMAND,
+		    PCI_CMD_BUS_MASTER | PCI_CMD_MEM_SPACE | PCI_CMD_IO_SPACE);
+
+#if 0
+		/*
+		 * DEBUG:  Dump out the config space for this function
+		 */
+
+		TIOCA_DEBUG(CA_DBG_ALWAYS, ("Dumping (hex) config space for function %d:\n", func));
+		for (d = 0; d < 64; d++) {
+			uint32_t	val;
+
+			rv = tioca_probe(soft, cfg + (d*4), 4, (void *)&val);
+			if (rv != 0) {
+				val = 0xffffffff;
+			}
+
+			if ((d % 4) == 0) {
+				TIOCA_DEBUG(CA_DBG_ALWAYS,
+					("Offset %#x:  ", d*4));;
+			}
+
+			TIOCA_DEBUG(CA_DBG_ALWAYS, ("%#x ", val));
+
+			if ((d % 4) == 3) {
+				TIOCA_DEBUG(CA_DBG_ALWAYS, ("\n"));
+			}
+
+		}
+#endif
+
+		func_vhdl = pciio_device_info_register(soft->ca_vhdl, &info->f_c);
+		dev_to_name(func_vhdl, func_path, sizeof(func_path));
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("Added vhdl %#x name %s\n", func_vhdl, func_path));
+	}
+
+	return 0;
+}
+
+int
+tioca_agp_func_init(tioca_info_t info, tioca_soft_p soft)
+{
+	uint8_t		agp_major;
+	uint8_t		agp_minor;
+	uint32_t	agp_status;
+	uint32_t	agp_command;
+	tioca_t		*tioca_base;
+	tioca_reg_t	control1;
+	char		*cfg;
+	uint8_t		cap_ptr;
+
+	if (! info->f_agpcap) {		/* sanity check */
+		return 0;
+	}
+
+	cfg = info->f_cfgptr;
+	cap_ptr = info->f_agpcap;
+
+	agp_major = CFG8_GET(cfg, cap_ptr+2);	/* encodes major and minor rev */
+	agp_minor = agp_major & 0xf;
+	agp_major = agp_major >> 4;
+
+	agp_status = CFG32_GET(cfg, cap_ptr+4);
+
+	tioca_base = soft->ca_base;
+
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		("Found AGP capability:  rev %d.%d status %#x\n",
+		agp_major, agp_minor, agp_status));
+
+	/*
+	 * Verify and set the card's speed.
+	 *
+	 * CA supports only AGP 3.0 in 8x mode, so the
+	 * function's AGP status 3:0 must be either 1010b
+	 * (AGP 3.0 8x only) or 1011 (AGP 3.0 4x and 8x).
+	 * If 3.0 8x is not supported by the card, we must
+	 * run the bus in PCI mode.  If the card can be
+	 * supported, set the AGP command register bits
+	 * 2:0 to 010b to run it at 8x speed.
+	 */ 
+
+	if ((agp_status & 0xa) != 0xa) {
+		TIOCA_DEBUG(CA_DBG_ERROR,
+			("    Function is not AGP 3.0 and 8x capable ... running bus in PCI mode\n"));
+		return 0;
+	}
+
+	/*
+	 * Initial setting of the card's AGP command register:
+	 *	AGP 3.0 8x mode				(2:0 == 0x2)
+	 *	Sideband Enable				(  9 == 0x1)
+	 */
+
+	agp_command = 0x22;		/* initial setting - AGP 3.0 8x mode */
+	control1 = tioca_base->ca_control1;
+
+	/*
+	 * Check for FastWrite support.
+	 */
+
+	if (agp_status & 0x10) {
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("    Card supports FastWrite ... enabling ....\n"));
+		agp_command |= 0x10;
+		control1 |= CA_AGP_FW_ENABLE;
+	} else {
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("    Card doesn't support FastWrite ... disabling ....\n"));
+		control1 &= ~CA_AGP_FW_ENABLE;
+	}
+
+	/*
+	 * Calibration Cycle:  Set up CA to generate these as frequently
+	 * as the card requests through it's AGP status register.
+	 *
+	 * Note:  the format of the calibration cycle bits in ca:control1
+	 * is the same as the in the AGP status register, so just plug the
+	 * the value in directly.
+	 */
+
+	control1 &= ~CA_AGP_CAL_CYCLE;
+	control1 |= ((agp_status >> 10) & 0x7) << CA_AGP_CAL_CYCLE_SHFT;
+
+	TIOCA_DEBUG(CA_DBG_ALWAYS, ("    Setting CA calibration cycle to %#x\n",
+		(agp_status >> 10) & 0x7));
+
+	/*
+	 * Initial values for command queue depth and async request size.
+	 * CA AGP slave has a 128 entry buffer
+	 * Set async request size to equal an SGI 128 byte cache line
+	 * ###maule:  are these reasonable values?
+	 */
+
+	agp_command |= 3 << 13;		/* async request 128 bytes */
+	TIOCA_DEBUG(CA_DBG_ALWAYS, ("    Setting Async Request size to 3 (128 bytes)\n"));
+
+	agp_command |= 128 << 24;	/* command queue depth 128 entries */
+	TIOCA_DEBUG(CA_DBG_ALWAYS, ("    Setting Request Queue to 128\n"));
+	
+	/*
+	 * Store values out in the hardware
+	 */
+
+	CFG32_PUT(cfg, cap_ptr+8, agp_command);
+	soft->ca_control1 = control1;
+	tioca_base->ca_control1 = control1;
+	TIOCA_DEBUG(CA_DBG_ALWAYS, ("    agp_command set to %#x ca control1 set to 0x%p\n",
+				agp_command, (void *)control1));
+
+	/*
+	 * Set up Isochronous status/command registers
+	 * Note:  Isochronous is not supported in CA, so set the card's
+	 * Isoc command register to 0.
+	 */
+
+	if (agp_status & 0x20000) {
+		TIOCA_DEBUG(CA_DBG_ALWAYS,
+			("    Card supports Isochronous transfers but CA doesn't ... disabling\n"));
+		CFG16_PUT(cfg, cap_ptr+0x20, 0);
+	}
+
+	return 0;
+}
+
+/*
+ * PIO interfaces
+ */
+
+pciio_piomap_t
+tioca_piomap_alloc(vertex_hdl_t pconn_vhdl,
+		   device_desc_t dev_desc,
+		   pciio_space_t space,
+		   iopaddr_t pci_addr,
+		   size_t req_size, size_t req_size_max, unsigned flags)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piomap_alloc not implemented yet ...\n"));
+	return NULL;
+}
+
+void
+tioca_piomap_free(pciio_piomap_t pciio_piomap)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piomap_free not implemented yet ...\n"));
+}
+
+caddr_t
+tioca_piomap_addr(pciio_piomap_t pciio_piomap, iopaddr_t pciio_addr,
+		  size_t byte_count)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piomap_addr not implemented yet ...\n"));
+	return NULL;
+}
+
+void
+tioca_piomap_done(pciio_piomap_t pciio_piomap)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piomap_done not implemented yet ...\n"));
+}
+
+caddr_t
+tioca_piotrans_addr(vertex_hdl_t devn, device_desc_t dev_desc,
+		    pciio_space_t space, iopaddr_t pciio_addr,
+		    size_t byte_count, unsigned flags)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piotrans_addr not implemented yet ...\n"));
+	return NULL;
+}
+
+iopaddr_t
+tioca_piospace_alloc(vertex_hdl_t dev, device_desc_t dev_desc,
+		     pciio_space_t space, size_t byte_count, size_t alignment)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piospace_alloc not implemented yet ...\n"));
+	return 0;
+}
+
+void
+tioca_piospace_free(vertex_hdl_t dev, pciio_space_t space, iopaddr_t pci_addr,
+		    size_t size)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_piospace_free not implemented yet ...\n"));
+}
+
+/*
+ * DMA interfaces
+ */
+
+iopaddr_t
+tioca_dmatrans_addr(vertex_hdl_t devn, device_desc_t dev_desc,
+		    iopaddr_t paddr, size_t byte_count, unsigned flags)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmatrans_addr not implemented yet ...\n"));
+	return (iopaddr_t)NULL;
+}
+
+pciio_dmamap_t
+tioca_dmamap_alloc(vertex_hdl_t dev, device_desc_t dev_desc,
+		   size_t byte_count_max, unsigned flags)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_alloc not implemented yet ...\n"));
+	return NULL;
+}
+
+void
+tioca_dmamap_free(pciio_dmamap_t dmamap)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_free not implemented yet ...\n"));
+}
+
+iopaddr_t
+tioca_dmamap_addr(pciio_dmamap_t dmamap, paddr_t paddr, size_t byte_count)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_addr not implemented yet ...\n"));
+	return 0;
+}
+
+void
+tioca_dmamap_done(pciio_dmamap_t dmamap)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_done not implemented yet ...\n"));
+}
+
+iopaddr_t
+tioca_dmamap_trans(vertex_hdl_t dev, device_desc_t dev_desc, paddr_t paddr,
+		   size_t byte_count, unsigned flags)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_trans not implemented yet ...\n"));
+	return 0;
+}
+
+void
+tioca_dmamap_drain(pciio_dmamap_t map)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmamap_drain not implemented yet ...\n"));
+}
+
+void
+tioca_dmaaddr_drain(vertex_hdl_t vhdl, paddr_t addr, size_t bytes)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmaaddr_drain not implemented yet ...\n"));
+}
+
+void
+tioca_dmalist_drain(vertex_hdl_t vhdl, alenlist_t list)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_dmalist_drain not implemented yet ...\n"));
+}
+
+/*
+ * INTERRUPT MANAGEMENT
+ */
+
+void
+tioca_force_interrupt(tioca_intr_t intr)
+{
+	unsigned bit;
+	unsigned bits;
+	tioca_soft_p soft = intr->bi_soft;
+	tioca_p ca = soft->ca_base;
+
+	bits = intr->bi_ibits;
+	for (bit = 0; bit < 2; bit++) {
+		if (bits & (1 << bit)) {
+
+			TIOCA_DEBUG(CA_DBG_ALWAYS,
+				    ("tioca_force_interrupt: bit=%#x\n", bit));
+
+			if (bit & (1 << 0))
+				ca->ca_force_inta = 1;
+			if (bit & (1 << 1))
+				ca->ca_force_intb = 1;
+
+		}
+	}
+}
+
+pciio_intr_t
+tioca_intr_alloc(vertex_hdl_t dev, device_desc_t dev_desc,
+		 pciio_intr_line_t lines, vertex_hdl_t owner_dev)
+{
+	pciio_info_t info;
+	tioca_intr_t tioca_intr;
+	int is_threaded = 0;
+	xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
+	xtalk_intr_t *xtalk_intr_p;
+	hub_intr_t hub_intr;
+	tioca_soft_p soft;
+	unsigned tioca_int_bits = 0;
+	unsigned tioca_int_bit;
+
+	info = pciio_info_get(dev);
+	soft = (tioca_soft_p) pciio_info_mfast_get(info);
+
+	if (lines & PCIIO_INTR_LINE_A)
+		tioca_int_bits |= 1 << 0;
+	if (lines & PCIIO_INTR_LINE_B)
+		tioca_int_bits |= 1 << 1;
+
+	tioca_intr = tioca_mem_alloc(sizeof(struct tioca_intr_s), 0);
+	if (!tioca_intr)
+		return NULL;
+
+	tioca_intr->bi_dev = dev;
+	tioca_intr->bi_lines = lines;
+	tioca_intr->bi_soft = soft;
+	tioca_intr->bi_ibits = 0;	/* bits will be added below */
+	tioca_intr->bi_func = 0;	/* unset until connect */
+	tioca_intr->bi_arg = 0;	/* unset until connect */
+	tioca_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
+	tioca_intr->bi_mustruncpu = CPU_NONE;
+
+	TIOCA_DEBUG(CA_DBG_VERBOSE,
+		    ("tioca_intr_alloc: tioca_int_bits: =%#x\n",
+		     tioca_int_bits));
+	for (tioca_int_bit = 0; tioca_int_bit < 2; tioca_int_bit++) {
+		if (tioca_int_bits & (1 << tioca_int_bit)) {
+			xtalk_intr_p =
+			    &soft->ca_intr[tioca_int_bit].ca_xtalk_intr;
+			xtalk_intr = *xtalk_intr_p;
+			if (xtalk_intr == NULL) {
+				xtalk_intr =
+				    xtalk_intr_alloc_nothd(soft->ca_vhdl,
+							   dev_desc, owner_dev);
+				ASSERT(xtalk_intr != NULL);
+				if (xtalk_intr == NULL) {
+					if (!*xtalk_intr_p) {
+						printk(KERN_ALERT
+						       "tioca_intr_alloc %s: unable to get coretalk interrupt resources\n",
+						       soft->ca_name);
+						return 0;
+					}
+				} else
+				    if (!compare_and_swap_ptr
+					((void **) xtalk_intr_p, NULL,
+					 xtalk_intr)) {
+					xtalk_intr_free(xtalk_intr);
+					xtalk_intr = *xtalk_intr_p;
+				}
+			}
+		}
+		tioca_intr->bi_ibits |= 1 << tioca_int_bit;
+	}
+	hub_intr = (hub_intr_t) xtalk_intr;
+	tioca_intr->bi_irq = hub_intr->i_bit;
+	tioca_intr->bi_cpu = hub_intr->i_cpuid;
+	TIOCA_DEBUG(CA_DBG_VERBOSE,
+		    ("tioca_intr_alloc complete: tioca_intr=0x%p\n",
+		     tioca_intr));
+	return &(tioca_intr->bi_pi);
+}
+
+void
+tioca_intr_free(pciio_intr_t pciio_intr)
+{
+	tioca_intr_t	tioca_intr = (tioca_intr_t)pciio_intr;
+	unsigned tioca_int_bits = tioca_intr->bi_ibits;
+	tioca_soft_p soft = tioca_intr->bi_soft;
+	unsigned tioca_int_bit;
+	xtalk_intr_t *xtalk_intr_p;
+
+	for (tioca_int_bit = 0; tioca_int_bit < 8; tioca_int_bit++) {
+		if (tioca_int_bits & (1 << tioca_int_bit)) {
+			xtalk_intr_p =
+			    &soft->ca_intr[tioca_int_bit].ca_xtalk_intr;
+			if (*xtalk_intr_p) {
+				xtalk_intr_free(*xtalk_intr_p);
+				*xtalk_intr_p = 0;
+			}
+		}
+	}
+	tioca_mem_free(tioca_intr, 0);
+}
+
+void
+tioca_setpciint(xtalk_intr_t xtalk_intr)
+{
+	tioca_soft_p soft;
+	tioca_p ca;
+	iopaddr_t addr;
+	xtalk_intr_vector_t vect;
+	vertex_hdl_t vhdl;
+	pciio_info_t info;
+	unsigned tioca_int_bit;
+
+	addr = xtalk_intr_addr_get(xtalk_intr);
+	vect = xtalk_intr_vector_get(xtalk_intr);
+	vhdl = xtalk_intr_dev_get(xtalk_intr);
+
+	/* int_bits are stored in sfarg, int_bits bit 1:0 */
+	tioca_int_bit = *((unsigned *) xtalk_intr_sfarg_get(xtalk_intr)) & 0x3;
+
+	info = pciio_info_get(vhdl);
+	soft = (tioca_soft_p) pciio_info_mfast_get(info);
+
+	ca = soft->ca_base;
+	if (tioca_int_bit & (1 << 0))
+		ca->ca_inta_dest_addr =
+		    addr | ((tioca_reg_t) vect << CA_INT_DEST_VECT_SHFT);
+	if (tioca_int_bit & (1 << 1))
+		ca->ca_intb_dest_addr =
+		    addr | ((tioca_reg_t) vect << CA_INT_DEST_VECT_SHFT);
+
+}
+
+int
+tioca_intr_connect(pciio_intr_t pciio_intr, intr_func_t intr_func,
+		   intr_arg_t intr_arg)
+{
+	tioca_intr_t	tioca_intr = (tioca_intr_t)pciio_intr;
+	tioca_soft_p soft = tioca_intr->bi_soft;
+	tioca_p ca = soft->ca_base;
+	unsigned tioca_int_bits = tioca_intr->bi_ibits;
+	unsigned tioca_int_bit;
+	unsigned int_mask = 0;
+
+	if (tioca_intr == NULL)
+		return -1;
+
+	TIOCA_DEBUG(CA_DBG_VERBOSE,
+		    ("tioca_intr_connect: intr=_func0x%p, intr_arg=0x%p\n",
+		     intr_func, intr_arg));
+
+	tioca_intr->bi_func = intr_func;
+	tioca_intr->bi_arg = intr_arg;
+	*((volatile unsigned *) &tioca_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
+
+	for (tioca_int_bit = 0; tioca_int_bit < 2; tioca_int_bit++)
+		if (tioca_int_bits & (1 << tioca_int_bit)) {
+			xtalk_intr_t xtalk_intr;
+
+			xtalk_intr = soft->ca_intr[tioca_int_bit].ca_xtalk_intr;
+			soft->ca_intr[tioca_int_bit].ca_int_bit = tioca_int_bit;
+			xtalk_intr_connect(xtalk_intr, intr_func, intr_arg,
+					   (xtalk_intr_setfunc_t)
+					   tioca_setpciint,
+					   &soft->ca_intr[tioca_int_bit].
+					   ca_int_bit);
+		}
+
+	if (tioca_int_bits & (1 << 0))
+		int_mask |= CA_INTA;
+	if (tioca_int_bits & (1 << 1))
+		int_mask |= CA_INTB;
+	soft->ca_int_mask &= ~int_mask;
+	ca->ca_int_mask = soft->ca_int_mask;
+	soft->ca_int_mask = ca->ca_int_mask;
+
+	return 0;
+}
+
+void
+tioca_intr_disconnect(pciio_intr_t pciio_intr)
+{
+	tioca_intr_t tioca_intr = (tioca_intr_t)pciio_intr;
+	tioca_soft_p soft = tioca_intr->bi_soft;
+	unsigned tioca_int_bits = tioca_intr->bi_ibits;
+	unsigned tioca_int_bit;
+	unsigned int_mask = 0;
+	tioca_p ca = soft->ca_base;
+
+	*((volatile unsigned *) &tioca_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
+	tioca_intr->bi_func = 0;
+	tioca_intr->bi_arg = 0;
+
+	if (tioca_int_bits & (1 << 0))
+		int_mask |= CA_INTA;
+	if (tioca_int_bits & (1 << 1))
+		int_mask |= CA_INTB;
+	soft->ca_int_mask |= int_mask;
+	ca->ca_int_mask = soft->ca_int_mask;
+	soft->ca_int_mask = ca->ca_int_mask;
+
+	for (tioca_int_bit = 0; tioca_int_bit < 2; tioca_int_bit++)
+		if (tioca_int_bits & (1 << tioca_int_bit)) {
+			xtalk_intr_disconnect(soft->ca_intr[tioca_int_bit].
+					      ca_xtalk_intr);
+		}
+}
+
+vertex_hdl_t
+tioca_intr_cpu_get(pciio_intr_t pciio_intr)
+{
+	tioca_intr_t tioca_intr = (tioca_intr_t)pciio_intr;
+	tioca_soft_p soft = tioca_intr->bi_soft;
+	unsigned tioca_int_bits = tioca_intr->bi_ibits;
+	unsigned tioca_int_bit;
+
+	for (tioca_int_bit = 0; tioca_int_bit < 2; tioca_int_bit++)
+		if (tioca_int_bits & (1 << tioca_int_bit))
+			return xtalk_intr_cpu_get(soft->ca_intr[tioca_int_bit].
+						  ca_xtalk_intr);
+	return 0;
+}
+
+void
+tioca_seterrint(xtalk_intr_t intr)
+{
+	iopaddr_t addr = xtalk_intr_addr_get(intr);
+	xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
+
+	tioca_p ca = (tioca_p) xtalk_intr_sfarg_get(intr);
+	ca->ca_err_int_dest_addr =
+	    addr | ((tioca_reg_t) vect << CA_INT_DEST_VECT_SHFT);
+}
+
+/*
+ * CONFIGURATION MANAGEMENT
+ */
+
+void
+tioca_provider_startup(vertex_hdl_t pciio_provider)
+{
+}
+
+void
+tioca_provider_shutdown(vertex_hdl_t pciio_provider)
+{
+}
+
+int
+tioca_reset(vertex_hdl_t conn)
+{
+	return -1;
+}
+
+pciio_endian_t
+tioca_endian_set(vertex_hdl_t pconn_vhdl, pciio_endian_t device_end,
+		 pciio_endian_t desired_end)
+{
+	pciio_info_t pciio_info = pciio_hostinfo_get(pconn_vhdl);
+	tioca_soft_p soft = (tioca_soft_p) pciio_info_mfast_get(pciio_info);
+	tioca_p ca = soft->ca_base;
+	tioca_reg_t devreg;
+
+	devreg = soft->ca_control1;
+	if (device_end != desired_end)
+		devreg |= CA_PIO_IO_SWAP | CA_PIO_MEM_SWAP;
+	else
+		devreg &= ~(CA_PIO_IO_SWAP | CA_PIO_MEM_SWAP);
+
+	ca->ca_control1 = devreg;
+	soft->ca_control1 = ca->ca_control1;
+
+	return desired_end;
+}
+
+/*
+ * Given a function's connection handle, return the config space base address
+ */
+
+static void *
+tioca_config_base(tioca_soft_p soft, pciio_bus_t bus, pciio_slot_t device,
+		  pciio_function_t function)
+{
+	tioca_p base = soft->ca_base;
+
+	/*
+	 * Constructing the PCI bus address for config space access must be
+	 * done almost entirely by s/w.  CA will set bits 1:0 to 00'b for
+	 * Type 0 cycles or 01'b for type 1 cycles based on the address space
+	 * selected.  Other than that, we must plug the function/dword (type 0)
+	 * or bus/device/function/dword (type1 1) into the correct place of
+	 * the PCI address.  In addition, for type 0 transactions, we must have
+	 * AD16 set to drive the IDSEL of the card in the AGP slot.
+	 *
+	 * TIO AEGIS Programmers reference guide section 8.7 discusses this.
+	 */
+
+	if (bus == 0) {
+		/*
+		 * Bits 31:2 are passed through, so we must construct the
+		 * Type0 address including making sure that AD16 is set
+		 * to drive the IDSEL of the card in the AGP slot.
+		 */
+
+		(uint64_t) base |= CA_PIO_PCI_TYPE0_CONFIG;
+		(uint64_t) base |= (function & 0x7) << 8;
+		(uint64_t) base |= 0x10000;	/* set AD16 */
+	} else {
+		/*
+		 * Bits 31:2 are passed through, so we must construct the
+		 * Type1 address.  CA will set bits 1:0 to 01'b to indicate
+		 * a type 1 cycle on the bus.
+		 */
+		(uint64_t) base |= CA_PIO_PCI_TYPE1_CONFIG;
+		(uint64_t) base |= (bus & 0xff) << 16;
+		(uint64_t) base |= (device & 0x1f) << 10;
+		(uint64_t) base |= (function & 0x7) << 8;
+	}
+
+	TIOCA_DEBUG(CA_DBG_CONFIG | CA_DBG_VERBOSE,
+		    ("tioca_config_base hdl %p %d/%d/%d return %p\n",
+		     (void *) soft->ca_systemhdl, bus, device, function, base));
+
+	return (void *) base;
+}
+
+/*
+ * Probe a bus address under TIO:CA with MASTER ABORT interrupts disabled.
+ * Returns:	-1 if there was a usage error
+ *		0  if addr/size was probed without error
+ *		1  if addr/size was probed and resulted in an error
+ *
+ * In the case of a non-zero return, *valp will be filled in with all 1's.
+ * Otherwise, the value read from addr will be stored in *valp;
+ */
+
+static int
+tioca_probe(tioca_soft_p soft, void *addr, unsigned size, void * valp)
+{
+	int reset_int_mask = 0;
+	int rval = 0;
+	tioca_reg_t int_mask;
+	tioca_reg_t int_status;
+	tioca_reg_t pci_errtype;
+	tioca_p ca_base = soft->ca_base;
+	uint32_t val;
+	uint32_t *addr32 = &val;
+	uint8_t *valp8 = (uint8_t *)&val;
+	uint8_t *addr8 = (uint8_t *)addr;
+	uint16_t *valp16 = (uint16_t *)&val;
+	uint16_t *addr16 = (uint16_t *)addr;
+	int index;
+
+	/*
+	 * Temporarily disable master abort interrupts and clear state
+	 * ###maule:  Should we lock around this?
+	 */
+
+	int_mask = ca_base->ca_int_mask;
+	if (int_mask & CA_PCI_ERR) {
+		ca_base->ca_int_mask = int_mask & ~CA_PCI_ERR;
+		reset_int_mask = 1;
+	}
+
+	/*
+	 * Clear any existing PCI error state
+	 * ###maule:  Should we check for existing errors and print an
+	 * error if any are present but unhandled?
+	 */
+
+	ca_base->ca_int_status_alias = CA_PCI_ERR;
+	ca_base->ca_pcierr_type = 0;
+
+	/*
+	 * Do the access - snia_badaddr_val() only supports a size of 4
+	 */
+
+	addr32 = (uint32_t *)addr;
+	rval = snia_badaddr_val((void *)addr32, 4, &val);
+	if (rval) {
+		val = 0xffffffff;
+	}
+
+	switch (size) {
+	case 1:
+		index = addr8 - (uint8_t *)addr32;
+		*((uint8_t *)valp) = valp8[index];
+		break;
+	case 2:
+		index = addr16 - (uint16_t *)addr32;
+		*((uint16_t *)valp) = valp16[index];
+		break;
+	case 4:
+		*((uint32_t *)valp) = val;
+		break;
+	}
+
+	/*
+	 * Check for errors
+	 */
+
+	if (rval) {
+		tioca_dump(ca_base);
+	}
+
+	int_status = ca_base->ca_int_status;
+	if (int_status & CA_PCI_ERR) {
+		/*
+		 * Check the error type.  We're expecting only
+		 * CA_PCIERR_MST_MABT to be set.  Any other PCI error condition
+		 * results in an error message.
+		 */
+
+		pci_errtype = ca_base->ca_pcierr_type;
+		if (pci_errtype != CA_PCIERR_MST_MABT) {
+			TIOCA_DEBUG(CA_DBG_ERROR,
+				    ("tioca_probe of addr 0x%p resulted in unexpected ca_pcierr_type 0x%p\n",
+				     (void *) addr, (void *) pci_errtype));
+		}
+
+		/*
+		 * Clean up the error 
+		 */
+
+		ca_base->ca_int_status_alias = CA_PCI_ERR;
+		ca_base->ca_pcierr_type = 0;
+	}
+
+	/*
+	 * Reset ca_int_mask if necessary
+	 */
+
+	if (reset_int_mask) {
+		ca_base->ca_int_mask = int_mask;
+	}
+
+	return rval;
+}
+
+/*
+ * Read a config space register.  This is built around tioca_probe() so that
+ * accesses which result in a master abort will return a legal value of all 1's.
+ */
+
+uint64_t
+tioca_config_get(vertex_hdl_t conn, unsigned reg, unsigned size)
+{
+	pciio_info_t info;
+	tioca_soft_p soft;
+	caddr_t cfg_base;
+	uint64_t val;
+
+	TIOCA_DEBUG(CA_DBG_CONFIG | CA_DBG_VERBOSE,
+		    ("tioca_config_get not implemented yet ...\n"));
+
+	info = pciio_info_get(conn);
+	soft = (tioca_soft_p) pciio_info_mfast_get(info);
+
+	/*
+	 * ###maule:  hardcode bus 0 for now
+	 */
+
+	cfg_base =
+	    tioca_config_base(soft, 0, pciio_info_slot_get(info),
+			      pciio_info_function_get(info));
+
+	tioca_probe(soft, cfg_base + reg, size, &val);
+	TIOCA_DEBUG(CA_DBG_CONFIG | CA_DBG_VERBOSE,
+		    ("tioca_config_get conn %p probe 0x%p size %d returning 0x%p\n",
+		     (void *) conn, cfg_base + reg, size, (void *) val));
+
+	return val;
+}
+
+/*
+ * Set a config space register
+ */
+
+void
+tioca_config_set(vertex_hdl_t conn, unsigned reg, unsigned size, uint64_t val)
+{
+	pciio_info_t info;
+	tioca_soft_p soft;
+	caddr_t cfg_base;
+
+	info = pciio_info_get(conn);
+	soft = (tioca_soft_p) pciio_info_mfast_get(info);
+
+	TIOCA_DEBUG(CA_DBG_CONFIG | CA_DBG_VERBOSE,
+		    ("tioca_config_set not implemented yet ...\n"));
+
+	/*
+	 * ###maule:  hardcode bus 0 for now
+	 */
+
+	cfg_base =
+	    tioca_config_base(soft, 0, pciio_info_slot_get(info),
+			      pciio_info_function_get(info));
+
+	switch (size) {
+	case 1:
+		*((uint8_t *) (cfg_base + reg)) = (uint8_t) val;
+		break;
+	case 2:
+		*((uint16_t *) (cfg_base + reg)) = (uint16_t) val;
+		break;
+	case 4:
+		*((uint32_t *) (cfg_base + reg)) = (uint32_t) val;
+		break;
+	default:
+		TIOCA_DEBUG(CA_DBG_ERROR,
+			    ("tioca_config_set - invalid size %d\n", size));
+		break;
+	}
+}
+
+/*
+ * ERROR HANDLING
+ */
+
+int
+tioca_error_devenable(vertex_hdl_t pconn_vhdl, int error_code)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_error_devenable not implemented yet ...\n"));
+	return -1;
+}
+
+pciio_slot_t
+tioca_error_extract(vertex_hdl_t vhdl, pciio_space_t * spacep,
+		    iopaddr_t * addrp)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_error_extract not implemented yet ...\n"));
+	return PCIIO_SLOT_NONE;
+}
+
+/*
+ * CALLBACK SUPPORT
+ */
+
+void
+tioca_driver_reg_callback(vertex_hdl_t conn, int key1, int key2, int error)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_driver_reg_callback not implemented yet ...\n"));
+}
+
+void
+tioca_driver_unreg_callback(vertex_hdl_t conn, int key1, int key2, int error)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_driver_unreg_callback not implemented yet ...\n"));
+}
+
+int
+tioca_device_unregister(vertex_hdl_t conn)
+{
+	TIOCA_DEBUG(CA_DBG_ALWAYS,
+		    ("tioca_device_unregister not implemented yet ...\n"));
+	return -1;
+}
diff -Nru a/arch/ia64/sn/io/xswitch.c b/arch/ia64/sn/io/xswitch.c
--- a/arch/ia64/sn/io/xswitch.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/xswitch.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -12,7 +11,6 @@
 #include <asm/sn/sgi.h>
 #include <asm/sn/driver.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>
 #include <asm/sn/xtalk/xtalk.h>
@@ -30,8 +28,40 @@
  */
 
 #include <asm/sn/xtalk/xbow.h>
+#define DEV_FUNC(dev,func)      xbow_##func
+
+#if !defined(DEV_FUNC)
+/*
+ * There is more than one possible provider
+ * for this platform. We need to examine the
+ * master vertex of the current vertex for
+ * a provider function structure, and indirect
+ * through the appropriately named member.
+ */
+#define	DEV_FUNC(dev,func)	xwidget_to_provider_fns(dev)->func
+
+static xswitch_provider_t *
+xwidget_to_provider_fns(vertex_hdl_t xconn)
+{
+    vertex_hdl_t            busv;
+    xswitch_info_t          xswitch_info;
+    xswitch_provider_t      provider_fns;
+
+    busv = hwgraph_connectpt_get(xconn_vhdl);
+    ASSERT(busv != GRAPH_VERTEX_NONE);
+
+    xswitch_info = xswitch_info_get(busv);
+    ASSERT(xswitch_info != NULL);
+
+    provider_fns = xswitch_info->xswitch_fns;
+    ASSERT(provider_fns != NULL);
+
+    return provider_fns;
+}
+#endif
 
 #define	XSWITCH_CENSUS_BIT(port)		(1<<(port))
+#define	XSWITCH_CENSUS_PORT_MIN			(0x0)
 #define	XSWITCH_CENSUS_PORT_MAX			(0xF)
 #define	XSWITCH_CENSUS_PORTS			(0x10)
 #define	XSWITCH_WIDGET_PRESENT(infop,port)	((infop)->census & XSWITCH_CENSUS_BIT(port))
@@ -62,20 +92,28 @@
 		      xwidgetnum_t port,
 		      vertex_hdl_t xwidget)
 {
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return;
+#endif
     if (port > XSWITCH_CENSUS_PORT_MAX)
 	return;
 
-    xswitch_info->vhdl[port] = xwidget;
+    xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget;
 }
 
 vertex_hdl_t
 xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
 		      xwidgetnum_t port)
 {
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return GRAPH_VERTEX_NONE;
+#endif
     if (port > XSWITCH_CENSUS_PORT_MAX)
 	return GRAPH_VERTEX_NONE;
 
-    return xswitch_info->vhdl[port];
+    return xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN];
 }
 
 /*
@@ -88,20 +126,28 @@
 				   xwidgetnum_t port,
 				   vertex_hdl_t master_vhdl)
 {
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return;
+#endif
     if (port > XSWITCH_CENSUS_PORT_MAX)
 	return;
 
-    xswitch_info->master_vhdl[port] = master_vhdl;
+    xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl;
 }
 
 vertex_hdl_t
 xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
 				   xwidgetnum_t port)
 {
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return GRAPH_VERTEX_NONE;
+#endif
     if (port > XSWITCH_CENSUS_PORT_MAX)
 	return GRAPH_VERTEX_NONE;
 
-    return xswitch_info->master_vhdl[port];
+    return xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN];
 }
 
 void
@@ -122,7 +168,9 @@
 
 	NEW(xswitch_info);
 	xswitch_info->census = 0;
-	for (port = 0; port <= XSWITCH_CENSUS_PORT_MAX; port++) {
+	for (port = XSWITCH_CENSUS_PORT_MIN;
+	     port <= XSWITCH_CENSUS_PORT_MAX;
+	     port++) {
 	    xswitch_info_vhdl_set(xswitch_info, port,
 				  GRAPH_VERTEX_NONE);
 
@@ -154,6 +202,11 @@
 int
 xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
 {
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return 0;
+#endif
+
     if (port > XSWITCH_CENSUS_PORT_MAX)
 	return 0;
 
@@ -163,5 +216,6 @@
 int
 xswitch_reset_link(vertex_hdl_t xconn_vhdl)
 {
-    return xbow_reset_link(xconn_vhdl);
+    return DEV_FUNC(xconn_vhdl, reset_link)
+	(xconn_vhdl);
 }
diff -Nru a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
--- a/arch/ia64/sn/kernel/irq.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/kernel/irq.c	Thu Nov  6 13:42:35 2003
@@ -1,35 +1,11 @@
 /*
  * Platform dependent support for SGI SN
  *
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it 
- * under the terms of version 2 of the GNU General Public License 
- * as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it would be useful, but 
- * WITHOUT ANY WARRANTY; without even the implied warranty of 
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
- * 
- * Further, this software is distributed without any warranty that it is 
- * free of the rightful claim of any third person regarding infringement 
- * or the like.  Any license provided herein, whether implied or 
- * otherwise, applies only to this software file.  Patent licenses, if 
- * any, provided herein do not apply to combinations of this program with 
- * other software, or any other product whatsoever.
- * 
- * You should have received a copy of the GNU General Public 
- * License along with this program; if not, write the Free Software 
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- * 
- * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 
- * Mountain View, CA  94043, or:
- * 
- * http://www.sgi.com 
- * 
- * For further information regarding this notice, see: 
- * 
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 #include <linux/init.h>
@@ -42,10 +18,8 @@
 #include <asm/pgtable.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/types.h>
-#include <asm/sn/pci/bridge.h>
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pciio_private.h>
 #include <asm/sn/pci/pcibr.h>
@@ -62,11 +36,12 @@
 #include <asm/bitops.h>
 #include <asm/sn/sn2/shub_mmr.h>
 
-int irq_to_bit_pos(int irq);
 static void force_interrupt(int irq);
 extern void pcibr_force_interrupt(pcibr_intr_t intr);
 extern int sn_force_interrupt_flag;
 
+static pcibr_intr_list_t *pcibr_intr_list;
+
 
 
 static unsigned int
@@ -139,8 +114,35 @@
 }
 
 static void
-sn_set_affinity_irq(unsigned int irq, unsigned long mask)
+sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
 {
+#if CONFIG_SMP  
+        int redir = 0;
+        pcibr_intr_list_t p = pcibr_intr_list[irq];
+        pcibr_intr_t intr; 
+	int	cpu;
+        extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
+        extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
+                
+	if (p == NULL)
+		return; 
+        
+	intr = p->il_intr;
+
+	if (intr == NULL)
+		return; 
+
+	cpu = first_cpu(mask);
+	if (IS_PIC_SOFT(intr->bi_soft) ) {
+		sn_shub_redirect_intr(intr, cpu);
+	// Defer TIO for now.
+	// } else if (IS_TIO_SOFT(intr->bi_soft) {
+	// sn_tio_redirect_intr(intr, cpu);
+	} else { 
+		return; 
+	}
+	(void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir);
+#endif /* CONFIG_SMP */
 }
 
 
@@ -187,41 +189,18 @@
 	}
 }
 
-int
-bit_pos_to_irq(int bit) {
-#define BIT_TO_IRQ 64
-	if (bit > 118) bit = 118;
-
-        return bit + BIT_TO_IRQ;
-}
-
-int
-irq_to_bit_pos(int irq) {
-#define IRQ_TO_BIT 64
-	int bit = irq - IRQ_TO_BIT;
-
-        return bit;
-}
-
-struct pcibr_intr_list_t {
-	struct pcibr_intr_list_t *next;
-	pcibr_intr_t intr;
-};
-
-static struct pcibr_intr_list_t **pcibr_intr_list;
-
 void
 register_pcibr_intr(int irq, pcibr_intr_t intr) {
-	struct pcibr_intr_list_t *p = kmalloc(sizeof(struct pcibr_intr_list_t), GFP_KERNEL);
-	struct pcibr_intr_list_t *list;
+	pcibr_intr_list_t p = kmalloc(sizeof(struct pcibr_intr_list_s), GFP_KERNEL);
+	pcibr_intr_list_t list;
 	int cpu = SN_CPU_FROM_IRQ(irq);
 
 	if (pcibr_intr_list == NULL) {
-		pcibr_intr_list = kmalloc(sizeof(struct pcibr_intr_list_t *) * NR_IRQS, GFP_KERNEL);
+		pcibr_intr_list = kmalloc(sizeof(pcibr_intr_list_t) * NR_IRQS, GFP_KERNEL);
 		if (pcibr_intr_list == NULL) 
-			pcibr_intr_list = vmalloc(sizeof(struct pcibr_intr_list_t *) * NR_IRQS);
+			pcibr_intr_list = vmalloc(sizeof(pcibr_intr_list_t) * NR_IRQS);
 		if (pcibr_intr_list == NULL) panic("Could not allocate memory for pcibr_intr_list\n");
-		memset( (void *)pcibr_intr_list, 0, sizeof(struct pcibr_intr_list_t *) * NR_IRQS);
+		memset( (void *)pcibr_intr_list, 0, sizeof(pcibr_intr_list_t) * NR_IRQS);
 	}
 	if (pdacpu(cpu)->sn_last_irq < irq) {
 		pdacpu(cpu)->sn_last_irq = irq;
@@ -229,42 +208,42 @@
 	if (pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq;
 	if (!p) panic("Could not allocate memory for pcibr_intr_list_t\n");
 	if ((list = pcibr_intr_list[irq])) {
-		while (list->next) list = list->next;
-		list->next = p;
-		p->next = NULL;
-		p->intr = intr;
+		while (list->il_next) list = list->il_next;
+		list->il_next = p;
+		p->il_next = NULL;
+		p->il_intr = intr;
 	} else {
 		pcibr_intr_list[irq] = p;
-		p->next = NULL;
-		p->intr = intr;
+		p->il_next = NULL;
+		p->il_intr = intr;
 	}
 }
 
 void
 force_polled_int(void) {
 	int i;
-	struct pcibr_intr_list_t *p;
+	pcibr_intr_list_t p;
 
 	for (i=0; i<NR_IRQS;i++) {
 		p = pcibr_intr_list[i];
 		while (p) {
-			if (p->intr){
-				pcibr_force_interrupt(p->intr);
+			if (p->il_intr){
+				pcibr_force_interrupt(p->il_intr);
 			}
-			p = p->next;
+			p = p->il_next;
 		}
 	}
 }
 
 static void
 force_interrupt(int irq) {
-	struct pcibr_intr_list_t *p = pcibr_intr_list[irq];
+	pcibr_intr_list_t p = pcibr_intr_list[irq];
 
 	while (p) {
-		if (p->intr) {
-			pcibr_force_interrupt(p->intr);
+		if (p->il_intr) {
+			pcibr_force_interrupt(p->il_intr);
 		}
-		p = p->next;
+		p = p->il_next;
 	}
 }
 
@@ -286,7 +265,7 @@
 	unsigned long irr_reg;
 
 
-	regval = intr->bi_soft->bs_base->p_int_status_64;
+	regval = pcireg_intr_status_get(intr->bi_soft->bs_base);
 	irr_reg_num = irq_to_vector(irq) / 64;
 	irr_bit = irq_to_vector(irq) % 64;
 	switch (irr_reg_num) {
@@ -324,13 +303,13 @@
 	if (pda->sn_first_irq == 0) return;
 	for (i=pda->sn_first_irq;
 		i <= pda->sn_last_irq; i++) {
-			struct pcibr_intr_list_t *p = pcibr_intr_list[i];
+			pcibr_intr_list_t p = pcibr_intr_list[i];
 			if (p == NULL) {
 				continue;
 			}
 			while (p) {
-				sn_check_intr(i, p->intr);
-				p = p->next;
+				sn_check_intr(i, p->il_intr);
+				p = p->il_next;
 			}
 	}
 }
diff -Nru a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
--- a/arch/ia64/sn/kernel/setup.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/kernel/setup.c	Thu Nov  6 13:42:35 2003
@@ -1,33 +1,9 @@
 /*
- * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it 
- * under the terms of version 2 of the GNU General Public License 
- * as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it would be useful, but 
- * WITHOUT ANY WARRANTY; without even the implied warranty of 
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
- * 
- * Further, this software is distributed without any warranty that it is 
- * free of the rightful claim of any third person regarding infringement 
- * or the like.  Any license provided herein, whether implied or 
- * otherwise, applies only to this software file.  Patent licenses, if 
- * any, provided herein do not apply to combinations of this program with 
- * other software, or any other product whatsoever.
- * 
- * You should have received a copy of the GNU General Public 
- * License along with this program; if not, write the Free Software 
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- * 
- * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 
- * Mountain View, CA  94043, or:
- * 
- * http://www.sgi.com 
- * 
- * For further information regarding this notice, see: 
- * 
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 #include <linux/config.h>
@@ -82,6 +58,7 @@
 extern void bte_init_cpu (void);
 extern void sn_timer_init(void);
 extern unsigned long last_time_offset;
+extern void init_platform_hubinfo(nodepda_t **nodepdaindr);
 extern void (*ia64_mark_idle)(int);
 extern void snidle(int);
 
@@ -93,6 +70,8 @@
 
 short physical_node_map[MAX_PHYSNODE_ID];
 
+int     numionodes;
+
 /*
  * This is the address of the RRegs in the HSpace of the global
  * master.  It is used by a hack in serial.c (serial_[in|out],
@@ -240,9 +219,10 @@
 	long status, ticks_per_sec, drift;
 	int pxm;
 	int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
-	extern void io_sh_swapper(int, int);
-	extern nasid_t get_master_baseio_nasid(void);
+	extern nasid_t snia_get_master_baseio_nasid(void);
 	extern void sn_cpu_init(void);
+	extern nasid_t snia_get_console_nasid(void);
+
 
 	MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
 
@@ -263,11 +243,9 @@
 		panic("PROM version too old\n");
 	}
 
-	io_sh_swapper(get_nasid(), 0);
-
 	master_nasid = get_nasid();
-	(void)get_console_nasid();
-	(void)get_master_baseio_nasid();
+	(void)snia_get_console_nasid();
+	(void)snia_get_master_baseio_nasid();
 
 	status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
 	if (status != 0 || ticks_per_sec < 100000) {
@@ -312,6 +290,12 @@
 	 */
 	sn_cpu_init();
 
+	/*
+	 * Setup hubinfo stuff. Has to happen AFTER sn_cpu_init(),
+	 * because it uses the cnode to nasid tables.
+	 */
+	init_platform_hubinfo(nodepdaindr);
+
 #ifdef CONFIG_SMP
 	init_smp_config();
 #endif
@@ -329,6 +313,7 @@
 sn_init_pdas(char **cmdline_p)
 {
 	cnodeid_t	cnode;
+	void scan_for_ionodes(void);
 
 	/*
 	 * Make sure that the PDA fits entirely in the same page as the 
@@ -341,6 +326,9 @@
 	for (cnode=0; cnode<numnodes; cnode++)
 		pda->cnodeid_to_nasid_table[cnode] = pxm_to_nasid(nid_to_pxm_map[cnode]);
 
+	numionodes = numnodes;
+	scan_for_ionodes();
+
         /*
          * Allocate & initalize the nodepda for each node.
          */
@@ -444,4 +432,32 @@
 	}
 
 	bte_init_cpu();
+}
+
+/*
+ * Scan klconfig for TIO's.  Add the TIO nasids to the
+ * physical_node_map and the pda and increment numionodes.
+ */
+
+void
+scan_for_ionodes(void) {
+	int nasid = 0;
+	lboard_t *brd;
+
+	/* Scan all compute nodes. */
+	for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid +=2) {
+		/* if there's no nasid, don't try to read the klconfig on the node */
+		if (physical_node_map[nasid] == -1) continue;
+		brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_TIO);
+		while (brd) {
+			pda->cnodeid_to_nasid_table[numionodes] = brd->brd_nasid;
+			physical_node_map[brd->brd_nasid] = numionodes;
+			numionodes++;
+			brd = KLCF_NEXT(brd);
+			if (!brd)
+				return;
+
+			brd = find_lboard(brd, KLTYPE_TIO);
+		}
+	}
 }
diff -Nru a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
--- a/arch/ia64/sn/kernel/sn2/cache.c	Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/kernel/sn2/cache.c	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- * 
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -11,6 +10,7 @@
 #include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/system.h>
+#include <asm/pgalloc.h>
 
 /**
  * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -25,12 +25,18 @@
 void
 sn_flush_all_caches(long flush_addr, long bytes)
 {
-	flush_icache_range(flush_addr, flush_addr+bytes);
 	/*
-	 * The last call may have returned before the caches
-	 * were actually flushed, so we call it again to make
-	 * sure.
+	 * The following double call to flush_icache_range has
+	 * the following effect which is required:
+	 *
+	 * The first flush_icache_range ensures the fc() address
+	 * is visible on the FSB.  The NUMA controller however has
+	 * not necessarily forwarded the fc() request to all other
+	 * NUMA controllers. The second call will stall
+	 * at the associated fc() instruction until the first
+	 * has been forwarded to all other NUMA controllers.
 	 */
+	flush_icache_range(flush_addr, flush_addr+bytes);
 	flush_icache_range(flush_addr, flush_addr+bytes);
 	mb();
 }
diff -Nru a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
--- a/include/asm-ia64/sn/addrs.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/addrs.h	Thu Nov  6 13:42:35 2003
@@ -17,33 +17,20 @@
 #include <asm/sn/types.h>
 #endif 
 
-#ifndef __ASSEMBLY__
-
-#define PS_UINT_CAST		(__psunsigned_t)
-#define UINT64_CAST		(uint64_t)
 #define HUBREG_CAST		(volatile mmr_t *)
 
-#elif __ASSEMBLY__
-
-#define PS_UINT_CAST
-#define UINT64_CAST
-#define HUBREG_CAST
-
-#endif
-
-
-
 
 /*
  * The following macros are used to index to the beginning of a specific
  * node's address space.
  */
 
-#define NODE_OFFSET(_n)		(UINT64_CAST (_n) << NASID_SHFT)
+#define NODE_OFFSET(_n)		((uint64_t) (_n) << NASID_SHFT)
 
 #define NODE_CAC_BASE(_n)	(CAC_BASE  + NODE_OFFSET(_n))
 #define NODE_HSPEC_BASE(_n)	(HSPEC_BASE + NODE_OFFSET(_n))
 #define NODE_IO_BASE(_n)	(IO_BASE    + NODE_OFFSET(_n))
+#define TIO_IO_BASE(_n)		(TIO_BASE    + NODE_OFFSET(_n))
 #define NODE_MSPEC_BASE(_n)	(MSPEC_BASE + NODE_OFFSET(_n))
 #define NODE_UNCAC_BASE(_n)	(UNCAC_BASE + NODE_OFFSET(_n))
 
@@ -55,7 +42,10 @@
 
 
 #define RAW_NODE_SWIN_BASE(nasid, widget)				\
-	(NODE_IO_BASE(nasid) + (UINT64_CAST (widget) << SWIN_SIZE_BITS))
+	(NODE_IO_BASE(nasid) + ((uint64_t) (widget) << SWIN_SIZE_BITS))
+
+#define RAW_TIO_SWIN_BASE(nasid, widget)				\
+	(NODE_IO_BASE(nasid) + ((uint64_t) (widget) << TIO_SWIN_SIZE_BITS))
 
 #define WIDGETID_GET(addr)	((unsigned char)((addr >> SWIN_SIZE_BITS) & 0xff))
 
@@ -66,10 +56,15 @@
  */
 
 #define SWIN_SIZE_BITS		24
-#define SWIN_SIZE		(1UL<<24)
+#define SWIN_SIZE		((uint64_t) 1 << 24)
 #define	SWIN_SIZEMASK		(SWIN_SIZE - 1)
 #define	SWIN_WIDGET_MASK	0xF
 
+#define TIO_SWIN_SIZE_BITS		28
+#define TIO_SWIN_SIZE		((uint64_t) 1 << 28)
+#define	TIO_SWIN_SIZEMASK		(SWIN_SIZE - 1)
+#define	TIO_SWIN_WIDGET_MASK	0x3
+
 /*
  * Convert smallwindow address to xtalk address.
  *
@@ -78,6 +73,9 @@
  */
 #define	SWIN_WIDGETADDR(addr)	((addr) & SWIN_SIZEMASK)
 #define	SWIN_WIDGETNUM(addr)	(((addr)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+
+#define	TIO_SWIN_WIDGETADDR(addr)	((addr) & TIO_SWIN_SIZEMASK)
+#define	TIO_SWIN_WIDGETNUM(addr)	(((addr)  >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
 /*
  * Verify if addr belongs to small window address on node with "nasid"
  *
@@ -142,12 +140,20 @@
  *	Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
  *	They're always safe.
  */
+/*
+ * LOCAL_HUB_ADDR doesn't need to be changed for TIO, since, by definition,
+ * there are no "local" TIOs.
+ */
 #define LOCAL_HUB_ADDR(_x)							\
 	(((_x) & BWIN_TOP) ? (HUBREG_CAST (LOCAL_MMR_ADDR(_x)))		\
 	: (HUBREG_CAST (IALIAS_BASE + (_x))))
 #define REMOTE_HUB_ADDR(_n, _x)						\
+	((_n & 1) ?							\
+	/* TIO: */							\
+	(HUBREG_CAST (GLOBAL_MMR_ADDR(_n, _x)))				\
+	: /* SHUB: */							\
 	(((_x) & BWIN_TOP) ? (HUBREG_CAST (GLOBAL_MMR_ADDR(_n, _x)))	\
-	: (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x))))
+	: (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))))
 
 #ifndef __ASSEMBLY__
 
diff -Nru a/include/asm-ia64/sn/dmamap.h b/include/asm-ia64/sn/dmamap.h
--- a/include/asm-ia64/sn/dmamap.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/dmamap.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -9,6 +8,8 @@
 #ifndef _ASM_IA64_SN_DMAMAP_H
 #define _ASM_IA64_SN_DMAMAP_H
 
+#include <asm/sn/types.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -39,30 +40,8 @@
 	int		dma_index;	/* Beginning map register to use */
 	int		dma_size;	/* Number of map registers to use */
 	paddr_t		dma_addr;	/* Corresponding bus addr for A24/A32 */
-	caddr_t		dma_virtaddr;	/* Beginning virtual address that is mapped */
+	unsigned long	dma_virtaddr;	/* Beginning virtual address that is mapped */
 } dmamap_t;
-
-struct alenlist_s;
-
-/*
- * Prototypes of exported functions
- */
-extern dmamap_t	*dma_mapalloc(int, int, int, int);
-extern void	dma_mapfree(dmamap_t *);
-extern int	dma_map(dmamap_t *, caddr_t, int);
-extern int	dma_map2(dmamap_t *, caddr_t, caddr_t, int);
-extern paddr_t	dma_mapaddr(dmamap_t *, caddr_t);
-extern int	dma_map_alenlist(dmamap_t *, struct alenlist_s *, size_t);
-extern uint	ev_kvtoiopnum(caddr_t);
-
-/*
- * These variables are defined in master.d/kernel
- */
-extern struct map *a24map[];
-extern struct map *a32map[];
-
-extern int a24_mapsize;
-extern int a32_mapsize;
 
 #ifdef __cplusplus
 }
diff -Nru a/include/asm-ia64/sn/hcl.h b/include/asm-ia64/sn/hcl.h
--- a/include/asm-ia64/sn/hcl.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/hcl.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -10,20 +9,22 @@
 #define _ASM_IA64_SN_HCL_H
 
 #include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
 
 extern vertex_hdl_t hwgraph_root;
 extern vertex_hdl_t linux_busnum;
 
+void hwgraph_debug(char *, char *, int, vertex_hdl_t, vertex_hdl_t, char *, ...);
+
+#if 1
+#define HWGRAPH_DEBUG(args) hwgraph_debug args ;
+#else   
+#define HWGRAPH_DEBUG(args)
+#endif  
 
 typedef long            labelcl_info_place_t;
 typedef long            arbitrary_info_t;
 typedef long            arb_info_desc_t;
 
-/* Support for INVENTORY */
-struct inventory_s;
-struct invplace_s;
-
 
 /* 
  * Reserve room in every vertex for 2 pieces of fast access indexed information 
@@ -53,6 +54,14 @@
 #define HWGRAPH_EDGELBL_DOTDOT 	".."
 #define graph_edge_place_t uint
 
+#include <asm/sn/labelcl.h>
+#define hwgraph_fastinfo_set(a,b) labelcl_info_replace_IDX(a, HWGRAPH_FASTINFO, b, NULL)
+#define hwgraph_connectpt_set labelcl_info_connectpt_set
+#define hwgraph_generate_path hwgfs_generate_path
+#define hwgraph_path_to_vertex(a) hwgfs_find_handle(NULL, a, 0, 0, 0, 1)
+#define hwgraph_edge_remove(a,b,c)
+#define hwgraph_vertex_unref(a)
+
 /*
  * External declarations of EXPORTED SYMBOLS in hcl.c
  */
@@ -69,25 +78,16 @@
 extern int hwgraph_edge_get(vertex_hdl_t, char *, vertex_hdl_t *);
 
 extern arbitrary_info_t hwgraph_fastinfo_get(vertex_hdl_t);
-extern void hwgraph_fastinfo_set(vertex_hdl_t, arbitrary_info_t );
 extern vertex_hdl_t hwgraph_mk_dir(vertex_hdl_t, const char *, unsigned int, void *);
 
 extern int hwgraph_connectpt_set(vertex_hdl_t, vertex_hdl_t);
 extern vertex_hdl_t hwgraph_connectpt_get(vertex_hdl_t);
 extern int hwgraph_edge_get_next(vertex_hdl_t, char *, vertex_hdl_t *, uint *);
-extern graph_error_t hwgraph_edge_remove(vertex_hdl_t, char *, vertex_hdl_t *);
 
 extern graph_error_t hwgraph_traverse(vertex_hdl_t, char *, vertex_hdl_t *);
 
 extern int hwgraph_vertex_get_next(vertex_hdl_t *, vertex_hdl_t *);
-extern int hwgraph_inventory_get_next(vertex_hdl_t, invplace_t *, 
-				      inventory_t **);
-extern int hwgraph_inventory_add(vertex_hdl_t, int, int, major_t, minor_t, int);
-extern int hwgraph_inventory_remove(vertex_hdl_t, int, int, major_t, minor_t, int);
-extern int hwgraph_controller_num_get(vertex_hdl_t);
-extern void hwgraph_controller_num_set(vertex_hdl_t, int);
-extern int hwgraph_path_ad(vertex_hdl_t, char *, vertex_hdl_t *);
-extern vertex_hdl_t hwgraph_path_to_vertex(char *);
+extern int hwgraph_path_add(vertex_hdl_t, char *, vertex_hdl_t *);
 extern vertex_hdl_t hwgraph_path_to_dev(char *);
 extern vertex_hdl_t hwgraph_block_device_get(vertex_hdl_t);
 extern vertex_hdl_t hwgraph_char_device_get(vertex_hdl_t);
@@ -100,12 +100,10 @@
 extern int hwgraph_info_get_exported_LBL(vertex_hdl_t, char *, int *, arbitrary_info_t *);
 extern int hwgraph_info_get_next_LBL(vertex_hdl_t, char *, arbitrary_info_t *,
                                 labelcl_info_place_t *);
+extern int hwgraph_path_lookup(vertex_hdl_t, char *, vertex_hdl_t *, char **);
 extern int hwgraph_info_export_LBL(vertex_hdl_t, char *, int);
 extern int hwgraph_info_unexport_LBL(vertex_hdl_t, char *);
 extern int hwgraph_info_remove_LBL(vertex_hdl_t, char *, arbitrary_info_t *);
-extern char * vertex_to_name(vertex_hdl_t, char *, uint);
-extern graph_error_t hwgraph_vertex_unref(vertex_hdl_t);
-
-extern int init_hcl(void);
+extern char *vertex_to_name(vertex_hdl_t, char *, uint);
 
 #endif /* _ASM_IA64_SN_HCL_H */
diff -Nru a/include/asm-ia64/sn/io.h b/include/asm-ia64/sn/io.h
--- a/include/asm-ia64/sn/io.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/io.h	Thu Nov  6 13:42:35 2003
@@ -13,6 +13,8 @@
 
 #include <asm/sn/addrs.h>
 
+extern int numionodes;
+
 /* Because we only have PCI I/O ports.  */
 #define IIO_ITTE_BASE	0x400160	/* base of translation table entries */
 #define IIO_ITTE(bigwin)	(IIO_ITTE_BASE + 8*(bigwin))
@@ -58,6 +60,7 @@
 
 #include <asm/sn/sn2/shub.h>
 #include <asm/sn/sn2/shubio.h>
+#include <asm/sn/sn2/iceio.h>
 
 /*
  * Used to ensure write ordering (like mb(), but for I/O space)
diff -Nru a/include/asm-ia64/sn/ioc4.h b/include/asm-ia64/sn/ioc4.h
--- a/include/asm-ia64/sn/ioc4.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/ioc4.h	Thu Nov  6 13:42:35 2003
@@ -1,36 +1,14 @@
 /*
- * Copyright (c) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it 
- * under the terms of version 2 of the GNU General Public License 
- * as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it would be useful, but 
- * WITHOUT ANY WARRANTY; without even the implied warranty of 
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
- * 
- * Further, this software is distributed without any warranty that it is 
- * free of the rightful claim of any third person regarding infringement 
- * or the like.  Any license provided herein, whether implied or 
- * otherwise, applies only to this software file.  Patent licenses, if 
- * any, provided herein do not apply to combinations of this program with 
- * other software, or any other product whatsoever.
- * 
- * You should have received a copy of the GNU General Public 
- * License along with this program; if not, write the Free Software 
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- * 
- * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 
- * Mountain View, CA  94043, or:
- * 
- * http://www.sgi.com 
- * 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 #ifndef _ASM_IA64_SN_IOC4_H
 #define _ASM_IA64_SN_IOC4_H
 
-#if 0
 
 /*
  * ioc4.h - IOC4 chip header file
@@ -46,7 +24,7 @@
  *
  * All IOC4 registers are 32 bits wide.
  */
-typedef __uint32_t ioc4reg_t;
+typedef uint32_t ioc4reg_t;
 
 /*
  * PCI Configuration Space Register Address Map, use offset from IOC4 PCI
@@ -70,10 +48,12 @@
  * PCI Memory Space Map 
  */
 #define IOC4_PCI_ERR_ADDR_L     0x000	/* Low Error Address */
-#define IOC4_PCI_ERR_ADDR_VLD	     (0x1 << 0)
-#define IOC4_PCI_ERR_ADDR_MST_ID_MSK (0xf << 1)
-#define IOC4_PCI_ERR_ADDR_MUL_ERR    (0x1 << 5)
-#define IOC4_PCI_ERR_ADDR_ADDR_MSK   (0x3ffffff << 6)
+#define IOC4_PCI_ERR_ADDR_VLD	        (0x1 << 0)
+#define IOC4_PCI_ERR_ADDR_MST_ID_MSK    (0xf << 1)
+#define IOC4_PCI_ERR_ADDR_MST_NUM_MSK   (0xe << 1)
+#define IOC4_PCI_ERR_ADDR_MST_TYP_MSK   (0x1 << 1)
+#define IOC4_PCI_ERR_ADDR_MUL_ERR       (0x1 << 5)
+#define IOC4_PCI_ERR_ADDR_ADDR_MSK      (0x3ffffff << 6)
 
 /* Master IDs contained in PCI_ERR_ADDR_MST_ID_MSK */
 #define IOC4_MST_ID_S0_TX		0
@@ -569,26 +549,27 @@
 
 /* IOC4 UART register map */
 typedef volatile struct ioc4_uartregs {
+    char                    i4u_lcr;
     union {
-        char                    rbr;    /* read only, DLAB == 0 */
-        char                    thr;    /* write only, DLAB == 0 */
-        char                    dll;    /* DLAB == 1 */
-    } u1;
+        char                    iir;    /* read only */
+        char                    fcr;    /* write only */
+    } u3;
     union {
         char                    ier;    /* DLAB == 0 */
         char                    dlm;    /* DLAB == 1 */
     } u2;
     union {
-        char                    iir;    /* read only */
-        char                    fcr;    /* write only */
-    } u3;
-    char                    i4u_lcr;
-    char                    i4u_mcr;
-    char                    i4u_lsr;
-    char                    i4u_msr;
+        char                    rbr;    /* read only, DLAB == 0 */
+        char                    thr;    /* write only, DLAB == 0 */
+        char                    dll;    /* DLAB == 1 */
+    } u1;
     char                    i4u_scr;
+    char                    i4u_msr;
+    char                    i4u_lsr;
+    char                    i4u_mcr;
 } ioc4_uart_t;
 
+
 #define i4u_rbr u1.rbr
 #define i4u_thr u1.thr
 #define i4u_dll u1.dll
@@ -704,7 +685,6 @@
     ioc4_uart_t		    uart_3;
 } ioc4_mem_t;
 
-#endif	/* 0 */
 
 /*
  * Bytebus device space
@@ -714,7 +694,6 @@
 #define IOC4_BYTEBUS_DEV2	0xC0000L  /* Addressed using pci_bar0 */
 #define IOC4_BYTEBUS_DEV3	0xE0000L  /* Addressed using pci_bar0 */
 
-#if 0
 /* UART clock speed */
 #define IOC4_SER_XIN_CLK        66000000
 
@@ -749,7 +728,7 @@
 #define IOC4_INTA_SUBDEVS	(IOC4_SDB_SERIAL | IOC4_SDB_KBMS | IOC4_SDB_RT | IOC4_SDB_GENERIC)
 
 extern int		ioc4_subdev_enabled(vertex_hdl_t, ioc4_subdev_t);
-extern void		ioc4_subdev_enables(vertex_hdl_t, ulong_t);
+extern void		ioc4_subdev_enables(vertex_hdl_t, uint64_t);
 extern void		ioc4_subdev_enable(vertex_hdl_t, ioc4_subdev_t);
 extern void		ioc4_subdev_disable(vertex_hdl_t, ioc4_subdev_t);
 
@@ -767,7 +746,7 @@
 ioc4_intr_func_f	(intr_arg_t, ioc4reg_t);
 
 typedef void
-ioc4_intr_connect_f	(vertex_hdl_t conn_vhdl,
+ioc4_intr_connect_f	(struct pci_dev *conn_vhdl,
 			 ioc4_intr_type_t,
 			 ioc4reg_t,
 			 ioc4_intr_func_f *,
@@ -784,18 +763,17 @@
 			 intr_arg_t info,
 			 vertex_hdl_t owner_vhdl);
 
-ioc4_intr_disconnect_f	ioc4_intr_disconnect;
-ioc4_intr_connect_f	ioc4_intr_connect;
+void ioc4_intr_connect(vertex_hdl_t, ioc4_intr_type_t, ioc4reg_t,
+		  ioc4_intr_func_f *, intr_arg_t, vertex_hdl_t,
+		  vertex_hdl_t);
 
 extern int		ioc4_is_console(vertex_hdl_t conn_vhdl);
 
 extern void		ioc4_mlreset(ioc4_cfg_t *, ioc4_mem_t *);
 
-extern intr_func_f	ioc4_intr;
 
 extern ioc4_mem_t      *ioc4_mem_ptr(void *ioc4_fastinfo);
 
 typedef ioc4_intr_func_f *ioc4_intr_func_t;
 
-#endif	/* 0 */
 #endif				/* _ASM_IA64_SN_IOC4_H */
diff -Nru a/include/asm-ia64/sn/ioerror_handling.h b/include/asm-ia64/sn/ioerror_handling.h
--- a/include/asm-ia64/sn/ioerror_handling.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/ioerror_handling.h	Thu Nov  6 13:42:35 2003
@@ -8,7 +8,6 @@
 #ifndef _ASM_IA64_SN_IOERROR_HANDLING_H
 #define _ASM_IA64_SN_IOERROR_HANDLING_H
 
-#include <linux/config.h>
 #include <linux/types.h>
 #include <asm/sn/sgi.h>
 
@@ -204,12 +203,6 @@
 
 typedef uint64_t  error_priority_t;
 
-/* Error state interfaces */
-#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
-extern error_return_code_t	error_state_set(vertex_hdl_t,error_state_t);
-extern error_state_t		error_state_get(vertex_hdl_t);
-#endif
-
 /* Error action interfaces */
 
 extern error_return_code_t	error_action_set(vertex_hdl_t,
@@ -232,45 +225,6 @@
 #define v_error_skip_env_clear(v)		\
 hwgraph_info_remove_LBL(v, INFO_LBL_ERROR_SKIP_ENV, 0)
 
-/* Skip point interfaces */
-extern error_return_code_t	error_skip_point_jump(vertex_hdl_t, boolean_t);
-extern error_return_code_t	error_skip_point_clear(vertex_hdl_t);
-
-/* REFERENCED */
-#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
-
-inline static int
-error_skip_point_mark(vertex_hdl_t  v)  			 
-{									
-	label_t		*error_env = NULL;	 			
-	int		code = 0;		
-
-	/* Check if we have a valid hwgraph vertex */
-#ifdef	LATER
-	if (!dev_is_vertex(v))
-		return(code);
-#endif
-				
-	/* There is no error jump buffer for this device vertex. Allocate
-	 * one.								 
-	 */								 
-	if (v_error_skip_env_get(v, error_env) != GRAPH_SUCCESS) {	 
-		error_env = snia_kmem_zalloc(sizeof(label_t), KM_NOSLEEP);	 
-		/* Unable to allocate memory for jum buffer. This should 
-		 * be a very rare occurrence.				 
-		 */							 
-		if (!error_env)						 
-			return(-1);					 
-		/* Store the jump buffer information on the vertex.*/	 
-		if (v_error_skip_env_set(v, error_env, 0) != GRAPH_SUCCESS)
-			return(-2);					   
-	}								   
-	ASSERT(v_error_skip_env_get(v, error_env) == GRAPH_SUCCESS);
-	code = setjmp(*error_env);					   
-	return(code);							     
-}
-#endif	/* CONFIG_SGI_IO_ERROR_HANDLING */
-
 typedef uint64_t		counter_t;
 
 extern counter_t		error_retry_count_get(vertex_hdl_t);
@@ -283,14 +237,6 @@
  */
 #define	IS_ERROR_INTR_CONTEXT(_ec)	((_ec & IOECODE_DMA) 		|| \
 					 (_ec == IOECODE_PIO_WRITE))
-
-/* Some convenience macros on device state. This state is accessed only 
- * thru the calls the io error handling layer.
- */
-#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
-extern boolean_t		is_device_shutdown(vertex_hdl_t);
-#define IS_DEVICE_SHUTDOWN(_d) 	(is_device_shutdown(_d))
-#endif
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_IA64_SN_IOERROR_HANDLING_H */
diff -Nru a/include/asm-ia64/sn/iograph.h b/include/asm-ia64/sn/iograph.h
--- a/include/asm-ia64/sn/iograph.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/iograph.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -19,13 +18,10 @@
 /* edges names */
 #define EDGE_LBL_BUS			"bus"
 #define EDGE_LBL_CONN			".connection"
-#define EDGE_LBL_ECP			"ecp"		/* EPP/ECP plp */
-#define EDGE_LBL_ECPP			"ecpp"
 #define EDGE_LBL_GUEST			".guest"	/* For IOC3 */
 #define EDGE_LBL_HOST			".host"		/* For IOC3 */
 #define EDGE_LBL_PERFMON		"mon"
 #define EDGE_LBL_USRPCI			"usrpci"
-#define EDGE_LBL_VME			"vmebus"
 #define EDGE_LBL_BLOCK			"block"
 #define EDGE_LBL_BOARD			"board"
 #define EDGE_LBL_CHAR			"char"
@@ -35,50 +31,15 @@
 #define EDGE_LBL_DIRECT			"direct"
 #define EDGE_LBL_DISABLED		"disabled"
 #define EDGE_LBL_DISK			"disk"
-#define EDGE_LBL_DMA_ENGINE             "dma_engine"    /* Only available on
-							   VMEbus now        */
-#define EDGE_LBL_NET			"net"		/* all nw. devs */
-#define EDGE_LBL_EF			"ef"		/* For if_ef ethernet */
-#define EDGE_LBL_ET			"et"		/* For if_ee ethernet */
-#define EDGE_LBL_EC			"ec"		/* For if_ec2 ether */
-#define EDGE_LBL_ECF			"ec"		/* For if_ecf enet */
-#define EDGE_LBL_EM			"ec"		/* For O2 ether */
-#define EDGE_LBL_IPG			"ipg"		/* For IPG FDDI */
-#define EDGE_LBL_XPI			"xpi"		/* For IPG FDDI */
-#define EDGE_LBL_HIP			"hip"		/* For HIPPI */
-#define EDGE_LBL_GSN                    "gsn"           /* For GSN */
-#define EDGE_LBL_ATM			"atm"		/* For ATM */
-#define EDGE_LBL_FXP			"fxp"		/* For FXP ether */
-#define EDGE_LBL_EP			"ep"		/* For eplex ether */
-#define EDGE_LBL_VFE			"vfe"		/* For VFE ether */
-#define EDGE_LBL_GFE			"gfe"		/* For GFE ether */
-#define EDGE_LBL_RNS			"rns"		/* RNS PCI FDDI card */
-#define EDGE_LBL_MTR			"mtr"		/* MTR PCI 802.5 card */
-#define EDGE_LBL_FV			"fv"		/* FV VME 802.5 card */
-#define EDGE_LBL_GTR			"gtr"		/* GTR GIO 802.5 card */
-#define EDGE_LBL_ISDN                   "isdn"		/* Digi PCI ISDN-BRI card */
-
-#define EDGE_LBL_EISA			"eisa"
-#define EDGE_LBL_ENET			"ethernet"
-#define EDGE_LBL_FLOPPY			"floppy"
-#define EDGE_LBL_PFD			"pfd"		/* For O2 pfd floppy */
-#define EDGE_LBL_FOP                    "fop"           /* Fetchop pseudo device */
-#define EDGE_LBL_GIO			"gio"
-#define EDGE_LBL_HEART			"heart"		/* For RACER */
-#define EDGE_LBL_HPC			"hpc"
-#define EDGE_LBL_GFX			"gfx"
 #define EDGE_LBL_HUB			"hub"		/* For SN0 */
+#define EDGE_LBL_ICE			"ice"		/* For TIO */
 #define EDGE_LBL_HW			"hw"
-#define EDGE_LBL_SYNERGY		"synergy"	/* For SNIA only */
-#define EDGE_LBL_IBUS			"ibus"		/* For EVEREST */
 #define EDGE_LBL_INTERCONNECT		"link"
 #define EDGE_LBL_IO			"io"
-#define EDGE_LBL_IO4			"io4"		/* For EVEREST */
-#define EDGE_LBL_IOC3			"ioc3"
+#define EDGE_LBL_IOC4			"ioc4"
 #define EDGE_LBL_LUN                    "lun"
 #define EDGE_LBL_LINUX                  "linux"
 #define EDGE_LBL_LINUX_BUS              EDGE_LBL_LINUX "/bus/pci-x"
-#define EDGE_LBL_MACE                   "mace" 		/* O2 mace */
 #define EDGE_LBL_MACHDEP                "machdep"       /* Platform depedent devices */
 #define EDGE_LBL_MASTER			".master"
 #define EDGE_LBL_MEMORY			"memory"
@@ -93,6 +54,9 @@
 #define EDGE_LBL_PCIX			"pci-x"
 #define EDGE_LBL_PCIX_0			EDGE_LBL_PCIX "/0"
 #define EDGE_LBL_PCIX_1			EDGE_LBL_PCIX "/1"
+#define EDGE_LBL_AGP			"agp"
+#define EDGE_LBL_AGP_0			EDGE_LBL_AGP "/0"
+#define EDGE_LBL_AGP_1			EDGE_LBL_AGP "/1"
 #define EDGE_LBL_PORT			"port"
 #define EDGE_LBL_PROM			"prom"
 #define EDGE_LBL_RACK			"rack"
@@ -103,35 +67,32 @@
 #define EDGE_LBL_SCSI			"scsi"
 #define EDGE_LBL_SCSI_CTLR		"scsi_ctlr"
 #define EDGE_LBL_SLOT			"slot"
-#define EDGE_LBL_TAPE			"tape"
 #define EDGE_LBL_TARGET                 "target"
 #define EDGE_LBL_UNKNOWN		"unknown"
-#define EDGE_LBL_VOLUME			"volume"
-#define EDGE_LBL_VOLUME_HEADER		"volume_header"
 #define EDGE_LBL_XBOW			"xbow"
 #define	EDGE_LBL_XIO			"xio"
 #define EDGE_LBL_XSWITCH		".xswitch"
 #define EDGE_LBL_XTALK			"xtalk"
+#define EDGE_LBL_CORETALK		"coretalk"
+#define EDGE_LBL_CORELET		"corelet"
 #define EDGE_LBL_XWIDGET		"xwidget"
 #define EDGE_LBL_ELSC			"elsc"
 #define EDGE_LBL_L1			"L1"
-#define EDGE_LBL_MADGE_TR               "Madge-tokenring"
 #define EDGE_LBL_XPLINK			"xplink" 	/* Cross partition */
 #define	EDGE_LBL_XPLINK_NET		"net" 		/* XP network devs */
 #define	EDGE_LBL_XPLINK_RAW		"raw"		/* XP Raw devs */
 #define EDGE_LBL_SLAB			"slab"		/* Slab of a module */
 #define	EDGE_LBL_XPLINK_KERNEL		"kernel"	/* XP kernel devs */
 #define	EDGE_LBL_XPLINK_ADMIN		"admin"	   	/* Partition admin */
-#define	EDGE_LBL_KAIO			"kaio"	   	/* Kernel async i/o poll */
-#define EDGE_LBL_RPS                    "rps"           /* redundant power supply */ 
-#define EDGE_LBL_XBOX_RPS               "xbox_rps"      /* redundant power supply for xbox unit */ 
 #define EDGE_LBL_IOBRICK		"iobrick"
-#define EDGE_LBL_PBRICK			"Pbrick"
 #define EDGE_LBL_PEBRICK		"PEbrick"
 #define EDGE_LBL_PXBRICK		"PXbrick"
+#define EDGE_LBL_PABRICK		"PAbrick"
+#define EDGE_LBL_OPUSBRICK		"onboardio"
+#define EDGE_LBL_BUBRICK		"BUBrick"	/* TIO BringUp Brick */
 #define EDGE_LBL_IXBRICK		"IXbrick"
+#define EDGE_LBL_IABRICK		"IAbrick"
 #define EDGE_LBL_IBRICK			"Ibrick"
-#define EDGE_LBL_XBRICK			"Xbrick"
 #define EDGE_LBL_CGBRICK		"CGbrick"
 #define EDGE_LBL_CPUBUS			"cpubus"	/* CPU Interfaces (SysAd) */
 
@@ -142,25 +103,15 @@
 #define INFO_LBL_CPUID			"_cpuid"
 #define INFO_LBL_CPU_INFO		"_cpu"
 #define INFO_LBL_DETAIL_INVENT		"_detail_invent" /* inventory data*/
-#define INFO_LBL_DEVICE_DESC		"_device_desc"
-#define INFO_LBL_DIAGVAL                "_diag_reason"   /* Reason disabled */
-#define INFO_LBL_DKIOTIME		"_dkiotime"
+#define INFO_LBL_DIAGVAL		"_diag_reason"   /* Reason disabled */
 #define INFO_LBL_DRIVER			"_driver"	/* points to attached device_driver_t */
 #define INFO_LBL_ELSC			"_elsc"
 #define	INFO_LBL_SUBCH			"_subch"	/* system controller subchannel */
-#define INFO_LBL_L1SCP			"_l1scp"	/* points to l1sc_t */
-#define INFO_LBL_FC_PORTNAME		"_fc_portname"
-#define INFO_LBL_GIOIO			"_gioio"
-#define INFO_LBL_GFUNCS			"_gioio_ops"	/* ops vector for gio providers */
 #define INFO_LBL_HUB_INFO		"_hubinfo"
 #define INFO_LBL_HWGFSLIST		"_hwgfs_list"
 #define INFO_LBL_TRAVERSE		"_hwg_traverse" /* hwgraph traverse function */
-#define INFO_LBL_INVENT 		"_invent"	/* inventory data */
-#define INFO_LBL_MLRESET		"_mlreset"	/* present if device preinitialized */
 #define INFO_LBL_MODULE_INFO		"_module"	/* module data ptr */
-#define INFO_LBL_MONDATA		"_mon"		/* monitor data ptr */
 #define INFO_LBL_MDPERF_DATA		"_mdperf"	/* mdperf monitoring*/
-#define INFO_LBL_NIC			"_nic"
 #define INFO_LBL_NODE_INFO		"_node"
 #define	INFO_LBL_PCIBR_HINTS		"_pcibr_hints"
 #define INFO_LBL_PCIIO			"_pciio"
@@ -168,15 +119,11 @@
 #define INFO_LBL_PERMISSIONS		"_permissions"	/* owner, uid, gid */
 #define INFO_LBL_ROUTER_INFO		"_router"
 #define INFO_LBL_SUBDEVS		"_subdevs"	/* subdevice enable bits */
-#define INFO_LBL_VME_FUNCS		"_vmeio_ops"	/* ops vector for VME providers */
 #define INFO_LBL_XSWITCH		"_xswitch"
 #define INFO_LBL_XSWITCH_ID		"_xswitch_id"
 #define INFO_LBL_XSWITCH_VOL		"_xswitch_volunteer"
 #define INFO_LBL_XFUNCS			"_xtalk_ops"	/* ops vector for gio providers */
 #define INFO_LBL_XWIDGET		"_xwidget"
-#define INFO_LBL_GRIO_DSK		"_grio_disk"	/* guaranteed rate I/O */
-#define INFO_LBL_ASYNC_ATTACH           "_async_attach"	/* parallel attachment */
-#define INFO_LBL_GFXID			"_gfxid"	/* gfx pipe ID #s */
 /* Device/Driver  Admin directive labels  */
 #define ADMIN_LBL_INTR_TARGET		"INTR_TARGET"	/* Target cpu for device interrupts*/
 #define ADMIN_LBL_INTR_SWLEVEL		"INTR_SWLEVEL"	/* Priority level of the ithread */
@@ -199,12 +146,6 @@
 							 * thread priority
 							 * default class
 							 */
-/* Special reserved info labels (also hwgfs attributes) */
-#define _DEVNAME_ATTR		"_devname"	/* device name */
-#define _DRIVERNAME_ATTR	"_drivername"	/* driver name */
-#define _INVENT_ATTR		"_inventory"	/* device inventory data */
-#define _MASTERNODE_ATTR	"_masternode"	/* node that "controls" device */
-
 /* Info labels that begin with '_' cannot be overwritten by an attr_set call */
 #define INFO_LBL_RESERVED(name) ((name)[0] == '_')
 
diff -Nru a/include/asm-ia64/sn/klconfig.h b/include/asm-ia64/sn/klconfig.h
--- a/include/asm-ia64/sn/klconfig.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/klconfig.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -41,7 +40,6 @@
 #include <asm/sn/sgi.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/vector.h>
-#include <asm/sn/arc/hinv.h>
 #include <asm/sn/xtalk/xbow.h>
 #include <asm/sn/xtalk/xtalk.h>
 #include <asm/sn/kldir.h>
@@ -154,9 +152,12 @@
 #else
 #define NODE_OFFSET_TO_LBOARD(nasid,off)        (lboard_t*)(NODE_CAC_BASE(nasid) + (off))
 
+/*
+ * This is a HACK for Medusa when testing for TIO.
+ */
 #define KL_CONFIG_INFO(_nasid)                                          \
-	(lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ?           \
-	 NODE_OFFSET_TO_LBOARD((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
+	(lboard_t *)((KL_CONFIG_HDR(((_nasid & 1) ? 0 : _nasid))->ch_board_info) ?           \
+	 NODE_OFFSET_TO_LBOARD(((_nasid & 1) ? 0 : _nasid), KL_CONFIG_HDR(((_nasid & 1) ? 0 : _nasid))->ch_board_info) : \
 	 NULL)
 
 #endif	/* __ia64 */
@@ -187,7 +188,7 @@
         (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
 
 #define KL_CONFIG_BOARD_NASID(_brd)	((_brd)->brd_nasid)
-#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off)	((_brd)->brd_next = (_off))
+#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off)	((_brd)->brd_next_any = (_off))
 
 #define KL_CONFIG_DUPLICATE_BOARD(_brd)	((_brd)->brd_flags & DUPLICATE_BOARD)
 
@@ -301,15 +302,7 @@
  */
 
 /*
- * Values for CPU types
- */
-#define KL_CPU_R4000		0x1	/* Standard R4000 */
-#define KL_CPU_TFP		0x2	/* TFP processor */
-#define	KL_CPU_R10000		0x3	/* R10000 (T5) */
-#define KL_CPU_NONE		(-1)	/* no cpu present in slot */
-
-/*
- * IP27 BOARD classes
+ * BOARD classes
  */
 
 #define KLCLASS_MASK	0xf0   
@@ -346,71 +339,34 @@
 
 #define KLTYPE_WEIRDCPU (KLCLASS_CPU | 0x0)
 #define KLTYPE_SNIA	(KLCLASS_CPU | 0x1)
+#define KLTYPE_TIO	(KLCLASS_CPU | 0x2)
 
-#define KLTYPE_WEIRDIO	(KLCLASS_IOBRICK  | 0x0)
-#define KLTYPE_BASEIO	(KLCLASS_IO  | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
-#define KLTYPE_IO6	KLTYPE_BASEIO       /* Additional name */
-#define KLTYPE_4CHSCSI	(KLCLASS_IO  | 0x2)
-#define KLTYPE_MSCSI	KLTYPE_4CHSCSI      /* Additional name */
-#define KLTYPE_ETHERNET	(KLCLASS_IO  | 0x3)
-#define KLTYPE_MENET	KLTYPE_ETHERNET     /* Additional name */
-#define KLTYPE_FDDI  	(KLCLASS_IO  | 0x4)
-#define KLTYPE_UNUSED	(KLCLASS_IO  | 0x5) /* XXX UNUSED */
-#define KLTYPE_HAROLD   (KLCLASS_IO  | 0x6) /* PCI SHOE BOX */
-#define KLTYPE_PCI	KLTYPE_HAROLD
-#define KLTYPE_VME      (KLCLASS_IO  | 0x7) /* Any 3rd party VME card */
-#define KLTYPE_MIO   	(KLCLASS_IO  | 0x8)
-#define KLTYPE_FC    	(KLCLASS_IO  | 0x9)
-#define KLTYPE_LINC    	(KLCLASS_IO  | 0xA)
-#define KLTYPE_TPU    	(KLCLASS_IO  | 0xB) /* Tensor Processing Unit */
-#define KLTYPE_GSN_A   	(KLCLASS_IO  | 0xC) /* Main GSN board */
-#define KLTYPE_GSN_B   	(KLCLASS_IO  | 0xD) /* Auxiliary GSN board */
-#define KLTYPE_SHOEHORN (KLCLASS_IO  | 0xE)
-#define KLTYPE_SERIAL_HIPPI (KLCLASS_IO  | 0xF)
-
-#define KLTYPE_GFX	(KLCLASS_GFX | 0x0) /* unknown graphics type */
-#define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
-#define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
-
-#define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
 #define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
-#define KLTYPE_ROUTER2    KLTYPE_ROUTER		/* Obsolete! */
-#define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
 #define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
 #define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
 
-#define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
-#define KLTYPE_MIDPLANE8  (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
-#define KLTYPE_MIDPLANE    KLTYPE_MIDPLANE8
 #define KLTYPE_IOBRICK_XBOW	(KLCLASS_MIDPLANE | 0x2)
 
 #define KLTYPE_IOBRICK		(KLCLASS_IOBRICK | 0x0)
-#define KLTYPE_IBRICK		(KLCLASS_IOBRICK | 0x1)
-#define KLTYPE_PBRICK		(KLCLASS_IOBRICK | 0x2)
-#define KLTYPE_XBRICK		(KLCLASS_IOBRICK | 0x3)
 #define KLTYPE_NBRICK		(KLCLASS_IOBRICK | 0x4)
-#define KLTYPE_PEBRICK		(KLCLASS_IOBRICK | 0x5)
 #define KLTYPE_PXBRICK		(KLCLASS_IOBRICK | 0x6)
 #define KLTYPE_IXBRICK		(KLCLASS_IOBRICK | 0x7)
 #define KLTYPE_CGBRICK		(KLCLASS_IOBRICK | 0x8)
+#define KLTYPE_OPUSBRICK	(KLCLASS_IOBRICK | 0x9)
+#define KLTYPE_BUBRICK          (KLCLASS_IOBRICK | 0xa)
+#define KLTYPE_IABRICK		(KLCLASS_IOBRICK | 0xb)
+#define KLTYPE_PABRICK          (KLCLASS_IOBRICK | 0xc)
 
 
-#define KLTYPE_PBRICK_BRIDGE	KLTYPE_PBRICK
-
 /* The value of type should be more than 8 so that hinv prints
  * out the board name from the NIC string. For values less than
  * 8 the name of the board needs to be hard coded in a few places.
  * When bringup started nic names had not standardized and so we
  * had to hard code. (For people interested in history.) 
  */
-#define KLTYPE_XTHD   	(KLCLASS_PSEUDO_GFX | 0x9)
-
 #define KLTYPE_UNKNOWN	(KLCLASS_UNKNOWN | 0xf)
 
 #define KLTYPE(_x) 	((_x) & KLTYPE_MASK)
-#define IS_MIO_PRESENT(l)	((l->brd_type == KLTYPE_BASEIO) && \
-				 (l->brd_flags & SECOND_NIC_PRESENT))
-#define IS_MIO_IOC3(l,n)	(IS_MIO_PRESENT(l) && (n > 2))
 
 /* 
  * board structures
@@ -418,13 +374,8 @@
 
 #define MAX_COMPTS_PER_BRD 24
 
-#define LOCAL_BOARD 1
-#define REMOTE_BOARD 2
-
-#define LBOARD_STRUCT_VERSION 	2
-
 typedef struct lboard_s {
-	klconf_off_t 	brd_next;         /* Next BOARD */
+	klconf_off_t 	brd_next_any;     /* Next BOARD */
 	unsigned char 	struct_type;      /* type of structure, local or remote */
 	unsigned char 	brd_type;         /* type+class */
 	unsigned char 	brd_sversion;     /* version of this structure */
@@ -448,8 +399,10 @@
 	confidence_t	brd_confidence;	  /* confidence that the board is bad */
 	nasid_t		brd_owner;        /* who owns this board */
 	unsigned char 	brd_nic_flags;    /* To handle 8 more NICs */
-	char		pad[32];	  /* future expansion */
+	char		pad[24];	  /* future expansion */
 	char		brd_name[32];
+	nasid_t		brd_next_same_host; /* host of next brd w/same nasid */
+	klconf_off_t	brd_next_same;    /* Next BOARD with same nasid */
 } lboard_t;
 
 
@@ -462,12 +415,11 @@
 
 #define KLCF_CLASS(_brd)	KLCLASS((_brd)->brd_type)
 #define KLCF_TYPE(_brd)		KLTYPE((_brd)->brd_type)
-#define KLCF_REMOTE(_brd)  	(((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
 #define KLCF_NUM_COMPS(_brd)	((_brd)->brd_numcompts)
 #define KLCF_MODULE_ID(_brd)	((_brd)->brd_module)
 
 #ifndef __ia64
-#define KLCF_NEXT(_brd) 		((_brd)->brd_next ? (lboard_t *)((_brd)->brd_next):  NULL)
+#define KLCF_NEXT(_brd) 		((_brd)->brd_next_any ? (lboard_t *)((_brd)->brd_next_any):  NULL)
 #define KLCF_COMP(_brd, _ndx)   \
 		(klinfo_t *)(NODE_OFFSET_TO_K0(NASID_GET(_brd), \
 						(_brd)->brd_compts[(_ndx)]))
@@ -478,8 +430,8 @@
 
 #define NODE_OFFSET_TO_KLINFO(n,off)    ((klinfo_t*) TO_NODE_CAC(n,off))
 #define KLCF_NEXT(_brd)         \
-        ((_brd)->brd_next ?     \
-         (NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next)): NULL)
+        ((_brd)->brd_next_any ?     \
+         (NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next_any)): NULL)
 #define KLCF_COMP(_brd, _ndx)   \
                 ((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \
 			(NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)])))
@@ -515,7 +467,7 @@
 	nasid_t		nasid;            /* node number - from parent */
 	char		pad1;		  /* pad out structure. */
 	char		pad2;		  /* pad out structure. */
-	COMPONENT	*arcs_compt;      /* ptr to the arcs struct for ease*/
+	void		*data;
         klconf_off_t	errinfo;          /* component specific errors */
         unsigned short  pad3;             /* pci fields have moved over to */
         unsigned short  pad4;             /* klbri_t */
@@ -537,36 +489,14 @@
 #define KLSTRUCT_MEMBNK 	3
 #define KLSTRUCT_XBOW 		4
 #define KLSTRUCT_BRI 		5
-#define KLSTRUCT_IOC3 		6
-#define KLSTRUCT_PCI 		7
-#define KLSTRUCT_VME 		8
 #define KLSTRUCT_ROU		9
 #define KLSTRUCT_GFX 		10
 #define KLSTRUCT_SCSI 		11
-#define KLSTRUCT_FDDI 		12
-#define KLSTRUCT_MIO 		13
 #define KLSTRUCT_DISK 		14
-#define KLSTRUCT_TAPE 		15
 #define KLSTRUCT_CDROM 		16
-#define KLSTRUCT_HUB_UART 	17
-#define KLSTRUCT_IOC3ENET 	18
-#define KLSTRUCT_IOC3UART 	19
-#define KLSTRUCT_UNUSED		20 /* XXX UNUSED */
-#define KLSTRUCT_IOC3PCKM       21
-#define KLSTRUCT_RAD        	22
-#define KLSTRUCT_HUB_TTY        23
-#define KLSTRUCT_IOC3_TTY 	24
-
-/* Early Access IO proms are compatible
-   only with KLSTRUCT values upto 24. */
 
 #define KLSTRUCT_FIBERCHANNEL 	25
 #define KLSTRUCT_MOD_SERIAL_NUM 26
-#define KLSTRUCT_IOC3MS         27
-#define KLSTRUCT_TPU            28
-#define KLSTRUCT_GSN_A          29
-#define KLSTRUCT_GSN_B          30
-#define KLSTRUCT_XTHD           31
 #define KLSTRUCT_QLFIBRE        32
 #define KLSTRUCT_1394           33
 #define KLSTRUCT_USB		34
@@ -576,36 +506,14 @@
 #define KLSTRUCT_PEBRICK	38
 #define KLSTRUCT_GIGE           39
 #define KLSTRUCT_IDE		40
+#define KLSTRUCT_IOC4		41
+#define KLSTRUCT_IOC4UART	42
+#define KLSTRUCT_IOC4_TTY	43
+#define KLSTRUCT_IOC4PCKM	44
+#define KLSTRUCT_IOC4MS		45
+#define KLSTRUCT_IOC4_ATA	46
+#define KLSTRUCT_PCIGFX		47
 
-/*
- * These are the indices of various components within a lboard structure.
- */
-
-#define IP27_CPU0_INDEX 0
-#define IP27_CPU1_INDEX 1
-#define IP27_HUB_INDEX 2
-#define IP27_MEM_INDEX 3
-
-#define BASEIO_BRIDGE_INDEX 0
-#define BASEIO_IOC3_INDEX 1
-#define BASEIO_SCSI1_INDEX 2
-#define BASEIO_SCSI2_INDEX 3
-
-#define MIDPLANE_XBOW_INDEX 0
-#define ROUTER_COMPONENT_INDEX 0
-
-#define CH4SCSI_BRIDGE_INDEX 0
-
-/* Info holders for various hardware components */
-
-typedef u64 *pci_t;
-typedef u64 *vmeb_t;
-typedef u64 *vmed_t;
-typedef u64 *fddi_t;
-typedef u64 *scsi_t;
-typedef u64 *mio_t;
-typedef u64 *graphics_t;
-typedef u64 *router_t;
 
 /*
  * The port info in ip27_cfg area translates to a lboart_t in the 
@@ -712,42 +620,12 @@
 	klinfo_t 	bri_info ;
     	unsigned char	bri_eprominfo ;    /* IO6prom connected to bridge */
     	unsigned char	bri_bustype ;      /* PCI/VME BUS bridge/GIO */
-    	pci_t    	pci_specific  ;    /* PCI Board config info */
+    	u64	    	*pci_specific  ;    /* PCI Board config info */
 	klpci_device_t	bri_devices[MAX_PCI_DEVS] ;	/* PCI IDs */
 	klconf_off_t	bri_mfg_nic ;
 	unsigned long	pad;
 } klbri_t ;
 
-#define MAX_IOC3_TTY	2
-
-typedef struct klioc3_s {                          /* IOC3 */
-	klinfo_t 	ioc3_info ;
-    	unsigned char	ioc3_ssram ;        /* Info about ssram */
-    	unsigned char	ioc3_nvram ;        /* Info about nvram */
-    	klinfo_t	ioc3_superio ;      /* Info about superio */
-	klconf_off_t	ioc3_tty_off ;
-	klinfo_t	ioc3_enet ;
-	klconf_off_t	ioc3_enet_off ;
-	klconf_off_t	ioc3_kbd_off ;
-	unsigned long	pad;
-} klioc3_t ;
-
-#define MAX_VME_SLOTS 8
-
-typedef struct klvmeb_s {                          /* VME BRIDGE - PCI CTLR */
-	klinfo_t 	vmeb_info ;
-	vmeb_t		vmeb_specific ;
-    	klconf_off_t   	vmeb_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
-	unsigned long	pad;
-} klvmeb_t ;
-
-typedef struct klvmed_s {                          /* VME DEVICE - VME BOARD */
-	klinfo_t	vmed_info ;
-	vmed_t		vmed_specific ;
-    	klconf_off_t   	vmed_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
-	unsigned long	pad;
-} klvmed_t ;
-
 #define ROUTER_VECTOR_VERS	2
 
 /* XXX - Don't we need the number of ports here?!? */
@@ -780,29 +658,12 @@
 	uint		cookie;		/* for compatibility with older proms */
 	uint		moduleslot;
 	struct klgfx_s	*gfx_next_pipe;
-	graphics_t	gfx_specific;
+	u64		*gfx_specific;
 	klconf_off_t    pad0;		/* for compatibility with older proms */
 	klconf_off_t    gfx_mfg_nic;
 	unsigned long	pad;
 } klgfx_t;
 
-typedef struct klxthd_s {   
-	klinfo_t 	xthd_info ;
-	klconf_off_t	xthd_mfg_nic ;        /* MFG NIC string */
-	unsigned long	pad;
-} klxthd_t ;
-
-typedef struct kltpu_s {                     /* TPU board */
-	klinfo_t 	tpu_info ;
-	klconf_off_t	tpu_mfg_nic ;        /* MFG NIC string */
-	unsigned long	pad;
-} kltpu_t ;
-
-typedef struct klgsn_s {                     /* GSN board */
-	klinfo_t 	gsn_info ;
-	klconf_off_t	gsn_mfg_nic ;        /* MFG NIC string */
-} klgsn_t ;
-
 #define MAX_SCSI_DEVS 16
 
 /*
@@ -814,7 +675,7 @@
 
 typedef struct klscsi_s {                          /* SCSI Bus */
 	klinfo_t 	scsi_info ;
-    	scsi_t       	scsi_specific   ; 
+    	u64       	*scsi_specific   ; 
 	unsigned char 	scsi_numdevs ;
 	klconf_off_t	scsi_devinfo[MAX_SCSI_DEVS] ; 
 	unsigned long	pad;
@@ -840,11 +701,9 @@
 	unsigned long	pad;
 } klttydev_t ;
 
-typedef struct klenetdev_s {                          /* ENET device */
-	klinfo_t 	enetdev_info ;
-	struct net_data *enetdev_cfg ; /* driver fills up this */
-	unsigned long	pad;
-} klenetdev_t ;
+typedef struct klpcigfx_s {                          /* PCI GFX */
+        klinfo_t        gfx_info ;
+} klpcigfx_t ;
 
 typedef struct klkbddev_s {                          /* KBD device */
 	klinfo_t 	kbddev_info ;
@@ -858,21 +717,6 @@
 	unsigned long	pad;
 } klmsdev_t ;
 
-#define MAX_FDDI_DEVS 10 /* XXX Is this true */
-
-typedef struct klfddi_s {                          /* FDDI */
-	klinfo_t 	fddi_info ;
-    	fddi_t        	fddi_specific ;       
-	klconf_off_t	fddi_devinfo[MAX_FDDI_DEVS] ;
-	unsigned long	pad;
-} klfddi_t ;
-
-typedef struct klmio_s {                          /* MIO */
-	klinfo_t 	mio_info ;
-    	mio_t       	mio_specific   ; 
-	unsigned long	pad;
-} klmio_t ;
-
 /*
  * USB info
  */
@@ -890,16 +734,11 @@
 	klmembnk_t 	kc_mem;
 	klxbow_t  	kc_xbow;
 	klbri_t		kc_bri;
-	klioc3_t	kc_ioc3;
-	klvmeb_t	kc_vmeb;
-	klvmed_t	kc_vmed;
 	klrou_t		kc_rou;
 	klgfx_t		kc_gfx;
 	klscsi_t	kc_scsi;
 	klscctl_t	kc_scsi_ctl;
 	klscdev_t	kc_scsi_dev;
-	klfddi_t	kc_fddi;
-	klmio_t		kc_mio;
 	klmod_serial_num_t kc_snum ;
 	klusb_t		kc_usb;
 } klcomp_t;
@@ -907,56 +746,9 @@
 typedef union kldev_s {      /* for device structure allocation */
 	klscdev_t	kc_scsi_dev ;
 	klttydev_t	kc_tty_dev ;
-	klenetdev_t	kc_enet_dev ;
 	klkbddev_t 	kc_kbd_dev ;
 } kldev_t ;
 
-/* Data structure interface routines. TBD */
-
-/* Include launch info in this file itself? TBD */
-
-/*
- * TBD - Can the ARCS and device driver related info also be included in the
- * KLCONFIG area. On the IO4PROM, prom device driver info is part of cfgnode_t 
- * structure, viz private to the IO4prom.
- */
-
-/* 
- * TBD - Allocation issues. 
- *
- * Do we need to Mark off sepatate heaps for lboard_t, rboard_t, component, 
- * errinfo and allocate from them, or have a single heap and allocate all 
- * structures from it. Debug is easier in the former method since we can
- * dump all similar structs in one command, but there will be lots of holes, 
- * in memory and max limits are needed for number of structures.
- * Another way to make it organized, is to have a union of all components
- * and allocate a aligned chunk of memory greater than the biggest
- * component.
- */
-
-typedef union {
-	lboard_t *lbinfo ;
-} biptr_t ;
-
-
-#define BRI_PER_XBOW 6
-#define PCI_PER_BRI  8
-#define DEV_PER_PCI  16
-
-
-/* Virtual dipswitch values (starting from switch "7"): */
-
-#define VDS_NOGFX		0x8000	/* Don't enable gfx and autoboot */
-#define VDS_NOMP		0x100	/* Don't start slave processors */
-#define VDS_MANUMODE		0x80	/* Manufacturing mode */
-#define VDS_NOARB		0x40	/* No bootmaster arbitration */
-#define VDS_PODMODE		0x20	/* Go straight to POD mode */
-#define VDS_NO_DIAGS		0x10	/* Don't run any diags after BM arb */
-#define VDS_DEFAULTS		0x08	/* Use default environment values */
-#define VDS_NOMEMCLEAR		0x04	/* Don't run mem cfg code */
-#define VDS_2ND_IO4		0x02	/* Boot from the second IO4 */
-#define VDS_DEBUG_PROM		0x01	/* Print PROM debugging messages */
-
 /* external declarations of Linux kernel functions. */
 
 extern lboard_t *find_lboard(lboard_t *start, unsigned char type);
@@ -976,7 +768,6 @@
 extern int	config_find_xbow(nasid_t, lboard_t **, klxbow_t**);
 extern int 	update_klcfg_cpuinfo(nasid_t, int);
 extern void 	board_to_path(lboard_t *brd, char *path);
-extern moduleid_t get_module_id(nasid_t nasid);
 extern void 	nic_name_convert(char *old_name, char *new_name);
 extern int 	module_brds(nasid_t nasid, lboard_t **module_brds, int n);
 extern lboard_t *brd_from_key(uint64_t key);
diff -Nru a/include/asm-ia64/sn/ksys/l1.h b/include/asm-ia64/sn/ksys/l1.h
--- a/include/asm-ia64/sn/ksys/l1.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/ksys/l1.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,8 +6,8 @@
  * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-#ifndef _ASM_SN_KSYS_L1_H
-#define _ASM_SN_KSYS_L1_H
+#ifndef _ASM_IA64_SN_KSYS_L1_H
+#define _ASM_IA64_SN_KSYS_L1_H
 
 #include <asm/sn/types.h>
 
@@ -88,17 +87,26 @@
 					   command string */
 
 /* brick type response codes */
+#define L1_BRICKTYPE_PX         0x23            /* # */
+#define L1_BRICKTYPE_PE         0x25            /* % */
+#define L1_BRICKTYPE_N_p0       0x26            /* & */
 #define L1_BRICKTYPE_IP45       0x34            /* 4 */
-#define L1_BRICKTYPE_C          0x43            /* C */
-#define L1_BRICKTYPE_I          0x49            /* I */
-#define L1_BRICKTYPE_P          0x50            /* P */
-#define L1_BRICKTYPE_R          0x52            /* R */
-#define L1_BRICKTYPE_X          0x58            /* X */
-#define L1_BRICKTYPE_X2         0x59            /* Y */
-#define L1_BRICKTYPE_N          0x4e            /* N */
-#define L1_BRICKTYPE_PE		0x25		/* % */
-#define L1_BRICKTYPE_PX		0x23		/* # */
-#define L1_BRICKTYPE_IX		0x3d		/* = */
+#define L1_BRICKTYPE_IP41       0x35            /* 5 */
+#define L1_BRICKTYPE_TWISTER    0x36            /* 6 */ /* IP53 & ROUTER */
+#define L1_BRICKTYPE_IX         0x3d            /* = */
+#define L1_BRICKTYPE_IP34       0x61            /* a */
+#define L1_BRICKTYPE_C          0x63            /* c */
+#define L1_BRICKTYPE_I          0x69            /* i */
+#define L1_BRICKTYPE_N          0x6e            /* n */
+#define L1_BRICKTYPE_OPUS       0x6f		/* o */
+#define L1_BRICKTYPE_P          0x70            /* p */
+#define L1_BRICKTYPE_R          0x72            /* r */
+#define L1_BRICKTYPE_CHI_CG     0x76            /* v */
+#define L1_BRICKTYPE_X          0x78            /* x */
+#define L1_BRICKTYPE_X2         0x79            /* y */
+#define L1_BRICKTYPE_BU		0x88            /* ^ */ /* TIO bringup brick */
+#define L1_BRICKTYPE_IA		0x0             /* habeck: what will this be */
+#define L1_BRICKTYPE_PA		0x1             /* habeck: what will this be */
 
 /* EEPROM codes (for the "read EEPROM" request) */
 /* c brick */
@@ -133,4 +141,4 @@
 int	iobrick_module_get( nasid_t nasid );
 
 
-#endif /* _ASM_SN_KSYS_L1_H */
+#endif /* _ASM_IA64_SN_KSYS_L1_H */
diff -Nru a/include/asm-ia64/sn/module.h b/include/asm-ia64/sn/module.h
--- a/include/asm-ia64/sn/module.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/module.h	Thu Nov  6 13:42:35 2003
@@ -138,6 +138,11 @@
 #define MODULE_PEBRICK          8
 #define MODULE_PXBRICK          9
 #define MODULE_IXBRICK          10
+#define MODULE_CGBRICK		11
+#define MODULE_OPUSBRICK        12
+#define MODULE_BUBRICK		13	/* TIO BringUp Brick */
+#define MODULE_IABRICK		14
+#define MODULE_PABRICK		15
 
 /*
  * Moduleid_t comparison macros
@@ -155,12 +160,13 @@
     spinlock_t		lock;		/* Lock for this structure	   */
 
     /* List of nodes in this module */
-    cnodeid_t		nodes[MODULE_MAX_NODES];
-    geoid_t		geoid[MODULE_MAX_NODES];
+    cnodeid_t		nodes[MAX_SLABS + 1];
+    geoid_t		geoid[MAX_SLABS + 1];
     struct {
-		char	moduleid[8];
-    } io[MODULE_MAX_NODES];
-    int			nodecnt;	/* Number of nodes in array        */
+		char	 moduleid[8];
+		uint64_t iobrick_type;
+    } io[MAX_SLABS + 1];
+
     /* Fields for Module System Controller */
     int			mesgpend;	/* Message pending                 */
     int			shutdown;	/* Shutdown in progress            */
diff -Nru a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
--- a/include/asm-ia64/sn/nodepda.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/nodepda.h	Thu Nov  6 13:42:35 2003
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/sn/sgi.h>
 #include <asm/irq.h>
-#include <asm/topology.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/router.h>
 #include <asm/sn/pda.h>
@@ -51,7 +50,9 @@
 	geoid_t		geoid;
 	module_t	*module;	/* Pointer to containing module */
 	xwidgetnum_t 	basew_id;
+	xwidgetnum_t 	tio_basew_id[TIO_WIDGET_ID_MAX + 1];
 	vertex_hdl_t 	basew_xc;
+	vertex_hdl_t 	tio_basew_xc[TIO_WIDGET_ID_MAX + 1];
 	int		hubticks;
 	int		num_routers;	/* XXX not setup! Total routers in the system */
 
@@ -65,6 +66,8 @@
 	nodepda_router_info_t	*npda_rip_first;
 	nodepda_router_info_t	**npda_rip_last;
 
+
+	spinlock_t		bist_lock;
 
 	/*
 	 * The BTEs on this node are shared by the local cpus
diff -Nru a/include/asm-ia64/sn/pci/pci_defs.h b/include/asm-ia64/sn/pci/pci_defs.h
--- a/include/asm-ia64/sn/pci/pci_defs.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pci_defs.h	Thu Nov  6 13:42:35 2003
@@ -1,13 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PCI_DEFS_H
-#define _ASM_SN_PCI_PCI_DEFS_H
+#ifndef _ASM_IA64_SN_PCI_PCI_DEFS_H
+#define _ASM_IA64_SN_PCI_PCI_DEFS_H
 
 #include <linux/config.h>
 
@@ -123,13 +122,35 @@
 #define PCI_CFG_PPB_MEMPFLIMHI		0x2C	/* PfMEM Limit Addr bits 32..63 */
 #define PCI_CFG_PPB_IOBASEHI		0x30	/* IO Base Addr bits 16..31 */
 #define PCI_CFG_PPB_IOLIMHI		0x32	/* IO Limit Addr bits 16..31 */
+
+#define PCI_CFG_PPB_BASE_ADDRS		2	/* Only 2 bars in type 1 hdrs */
+
+/* ###maule:  0x34 should be capability ptr 0x35-0x37 reserved */
+
 #define	PCI_CFG_PPB_SUB_VENDOR		0x34	/* Subsystem Vendor ID */
 #define	PCI_CFG_PPB_SUB_DEVICE		0x36	/* Subsystem Device ID */
 #define	PCI_CFG_PPB_ROM_BASE		0x38	/* ROM base address */
 #define	PCI_CFG_PPB_INT_LINE		0x3C	/* Interrupt Line */
 #define	PCI_CFG_PPB_INT_PIN		0x3D	/* Interrupt Pin */
 #define	PCI_CFG_PPB_BRIDGE_CTRL		0x3E	/* Bridge Control */
-     /* XXX- these might be DEC 21152 specific */
+
+/*
+ * PPB base/limit alignment restrictions
+ */
+
+#define PCI_PPB_IO_ALIGN	0x10000		/* I/O Base alignment */
+#define PCI_PPB_MEM_ALIGN	0x100000	/* MEM Base alignment */
+#define PCI_PPB_MEMPF_ALIGN	0x100000	/* Prefetch MEM Base align */
+
+#define PCI_PPB_IO_AD16		0x0		/* 16 bit I/O base/lim reg */
+#define PCI_PPB_IO_AD32		0x1		/* 32 bit I/O base/lim reg */
+#define PCI_PPB_MEMPF_AD32	0x0		/* 32 bit PFMEM base/lim reg */
+#define PCI_PPB_MEMPF_AD64	0x1		/* 64 bit PFMEM base/lim reg */
+
+/*
+ * XXX  These might be DEC 21152 specific.  Regardless, they are vendor-specific
+ * XXX  and should really be defined somewhere else ...
+ */
 #define	PCI_CFG_PPB_CHIP_CTRL		0x40
 #define	PCI_CFG_PPB_DIAG_CTRL		0x41
 #define	PCI_CFG_PPB_ARB_CTRL		0x42
@@ -230,6 +251,7 @@
 #define	PCI_CAP_HS		0x06		/* CompactPCI Hot Swap */
 #define	PCI_CAP_PCIX		0x07		/* PCI-X */
 #define PCI_CAP_ID_HT		0x08		/* HyperTransport */
+#define PCI_CAP_MSIX		0x0d		/* Message Signaled Intr X */
 
 
 /* PIO interface macros */
@@ -321,8 +343,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef LITTLE_ENDIAN
-
 /*
  * PCI config space definition
  */
@@ -425,111 +445,86 @@
 	cap_pcix_stat_reg_t	pcix_type0_status;
 } cap_pcix_type0_t;
 
-#else
-
 /*
- * PCI config space definition
+ * Message Signaled Interrupt (MSI) Capability
  */
-typedef volatile struct pci_cfg_s {
-	uint16_t	dev_id;
-	uint16_t	vendor_id;
-	uint16_t	status;
-	uint16_t	cmd;
-	uchar_t		class;
-	uchar_t		sub_class;
-	uchar_t		prog_if;
-	uchar_t		rev;
-	uchar_t		bist;
-	uchar_t		hdr_type;
-	uchar_t		lt;
-	uchar_t		line_size;
-	uint32_t	bar[6];
-	uint32_t	cardbus;
-	uint16_t	subsys_dev_id;
-	uint16_t	subsys_vendor_id;
-	uint32_t	exp_rom;
-	uint32_t	res[2];
-	uchar_t		max_lat;
-	uchar_t		min_gnt;
-	uchar_t		int_pin;
-	uchar_t		int_line;
-} pci_cfg_t;
+typedef volatile struct cap_msi_control_reg_s {
+      uint16_t        msi_enable:             1,/* use msi         RW */
+                      msi_multi_msg_cap:      3,/* msgs capable    RO */
+                      msi_multi_msg_en:       3,/* msgs requested  RW */
+                      msi_64bit:              1,/* 64-addr capable RO */
+                      reserved1:              8;
+} cap_msi_control_reg_t;
+
+typedef volatile struct cap_msi64_s {
+       uint32_t            msi_addr_lsb;     /* 0x5c */
+       uint32_t            msi_addr_msb;     /* 0x60 */
+       uint16_t              msi_data;         /* 0x66 */
+       uint16_t              msi_unused;       /* 0x64 */
+} cap_msi64_t;
+
+typedef volatile struct cap_msi32_s {
+      uint32_t        msi_addr;               /* 0x5c */
+      uint16_t        msi_data;               /* 0x62 */
+      uint16_t        msi_unused;             /* 0x60 */
+} cap_msi32_t;
+
+typedef union cap_msi_u {
+      cap_msi64_t     msi64;
+      cap_msi32_t     msi32;
+} cap_msi_u_t;
+ 
+typedef volatile struct cap_msi_s {
+     uchar_t                 msi_cap_id; /* 0x5b */
+     uchar_t                 msi_cap_nxt;
+     cap_msi_control_reg_t   msi_control;
+     cap_msi_u_t             msi_ad;
+} cap_msi_t;
+
 
 /*
- * PCI Type 1 config space definition for PCI to PCI Bridges (PPBs)
+ * Message Signaled Interrupt (MSIX) Capability
  */
-typedef volatile struct pci_cfg1_s {
-	uint16_t	dev_id;
-	uint16_t	vendor_id;
-	uint16_t	status;
-	uint16_t	cmd;
-	uchar_t		class;
-	uchar_t		sub_class;
-	uchar_t		prog_if;
-	uchar_t		rev;
-	uchar_t		bist;
-	uchar_t		hdr_type;
-	uchar_t		lt;
-	uchar_t		line_size;
-	uint32_t	bar[2];
-	uchar_t		slt;
-	uchar_t		sub_bus_num;
-	uchar_t		snd_bus_num;
-	uchar_t		pri_bus_num;
-	uint16_t	snd_status;
-	uchar_t		io_limit;
-	uchar_t		io_base;
-	uint16_t	mem_limit;
-	uint16_t	mem_base;
-	uint16_t	pmem_limit;
-	uint16_t	pmem_base;
-	uint32_t	pmem_limit_upper;
-	uint32_t	pmem_base_upper;
-	uint16_t	io_limit_upper;
-	uint16_t	io_base_upper;
-	uint32_t	res;
-	uint32_t	exp_rom;
-	uint16_t	ppb_control;
-	uchar_t		int_pin;
-	uchar_t		int_line;
-} pci_cfg1_t;
+typedef volatile struct cap_msix_control_reg_s {
+      uint16_t	      msix_table_size:	     11,
+		      msix_reserved:          3,
+		      msix_func_mask:         1,
+                      msix_enable:            1;
+} cap_msix_control_reg_t;
+
+typedef volatile struct cap_msix_table_s {
+      uint32_t        msix_table_bir:         3,
+                      msix_table_offset:     29;
+} cap_msix_table_t;
+
+typedef volatile struct cap_msix_pba_s {
+      uint32_t        msix_pba_bir:           3,
+                      msix_pba_offset:       29;
+} cap_msix_pba_t;
+
+typedef volatile struct cap_msix_s {
+     uchar_t                 msix_cap_id; /* 0x0d */
+     uchar_t                 msix_cap_nxt;
+     cap_msix_control_reg_t  msix_control;
+     cap_msix_table_t        msix_table;
+     cap_msix_pba_t          msix_pba;
+} cap_msix_t;
+
+typedef volatile struct msix_vec_cntl_s {
+      uint32_t        mask_bit:		1,
+		      reserved:		31;
+} msix_vec_cntl_t;
+
+typedef volatile struct cap_msix_table_entry_s {
+     uint32_t		     msix_addr_lsb;
+     uint32_t		     msix_addr_msb;
+     uint32_t                msix_data;
+     msix_vec_cntl_t         msix_vec_cntl;
+} cap_msix_table_entry_t;
 
 
 
-/*
- * PCI-X Capability
- */
-typedef volatile struct cap_pcix_cmd_reg_s {
-	uint16_t	reserved1:              9,
-			max_split:		3,
-			max_mem_read_cnt:	2,
-			enable_relaxed_order:	1,
-			data_parity_enable:	1;
-} cap_pcix_cmd_reg_t;
-
-typedef volatile struct cap_pcix_stat_reg_s {
-	uint32_t	reserved1:		2,
-			split_complt_err:	1,
-			max_cum_read:		3,
-			max_out_split:		3,
-			max_mem_read_cnt:	2,
-			device_complex:		1,
-			unexpect_split_complt:	1,
-			split_complt_discard:	1,
-			mhz133_capable:		1,
-			bit64_device:		1,
-			bus_num:		8,
-			dev_num:		5,
-			func_num:		3;
-} cap_pcix_stat_reg_t;
 
-typedef volatile struct cap_pcix_type0_s {
-	cap_pcix_cmd_reg_t	pcix_type0_command;
-	uchar_t			pcix_cap_nxt;
-	uchar_t			pcix_cap_id;
-	cap_pcix_stat_reg_t	pcix_type0_status;
-} cap_pcix_type0_t;
 
-#endif
 #endif	/* __ASSEMBLY__ */
-#endif /* _ASM_SN_PCI_PCI_DEFS_H */
+#endif /* _ASM_IA64_SN_PCI_PCI_DEFS_H */
diff -Nru a/include/asm-ia64/sn/pci/pcibr.h b/include/asm-ia64/sn/pci/pcibr.h
--- a/include/asm-ia64/sn/pci/pcibr.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pcibr.h	Thu Nov  6 13:42:35 2003
@@ -1,13 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PCIBR_H
-#define _ASM_SN_PCI_PCIBR_H
+#ifndef _ASM_IA64_SN_PCI_PCIBR_H
+#define _ASM_IA64_SN_PCI_PCIBR_H
 
 #if defined(__KERNEL__)
 
@@ -17,7 +16,7 @@
 #include <asm/sn/pio.h>
 
 #include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/bridge.h>
+#include <asm/sn/pci/pcibr_asic.h>
 
 /* =====================================================================
  *    symbolic constants used by pcibr's xtalk bus provider
@@ -42,26 +41,6 @@
 typedef struct pcibr_intr_s *pcibr_intr_t;
 
 /* =====================================================================
- *    primary entry points: Bridge (pcibr) device driver
- *
- *	These functions are normal device driver entry points
- *	and are called along with the similar entry points from
- *	other device drivers. They are included here as documentation
- *	of their existence and purpose.
- *
- *	pcibr_init() is called to inform us that there is a pcibr driver
- *	configured into the kernel; it is responsible for registering
- *	as a crosstalk widget and providing a routine to be called
- *	when a widget with the proper part number is observed.
- *
- *	pcibr_attach() is called for each vertex in the hardware graph
- *	corresponding to a crosstalk widget with the manufacturer
- *	code and part number registered by pcibr_init().
- */
-
-extern int		pcibr_attach(vertex_hdl_t);
-
-/* =====================================================================
  *    bus provider function table
  *
  *	Normally, this table is only handed off explicitly
@@ -73,8 +52,9 @@
  *	pcibr, we can go directly to this ops table.
  */
 
-extern pciio_provider_t pcibr_provider;
+extern pciio_provider_t pci_xbridge_provider;
 extern pciio_provider_t pci_pic_provider;
+extern pciio_provider_t pci_tiocp_provider;
 
 /* =====================================================================
  *    secondary entry points: pcibr PCI bus provider
@@ -108,6 +88,11 @@
 
 extern void		pcibr_piomap_done(pcibr_piomap_t piomap);
 
+extern int		pcibr_piomap_probe(pcibr_piomap_t piomap,
+					   off_t offset,
+					   int len,
+					   void *valp);
+
 extern caddr_t		pcibr_piotrans_addr(vertex_hdl_t dev,
 					    device_desc_t dev_desc,
 					    pciio_space_t space,
@@ -194,19 +179,18 @@
 
 extern int		pcibr_reset(vertex_hdl_t dev);
 
-extern int              pcibr_write_gather_flush(vertex_hdl_t dev);
-
 extern pciio_endian_t	pcibr_endian_set(vertex_hdl_t dev,
 					 pciio_endian_t device_end,
 					 pciio_endian_t desired_end);
 
-extern pciio_priority_t pcibr_priority_set(vertex_hdl_t dev,
-					   pciio_priority_t device_prio);
-
 extern uint64_t		pcibr_config_get(vertex_hdl_t conn,
 					 unsigned reg,
 					 unsigned size);
 
+extern uint64_t		pcibr_config_get_safe(vertex_hdl_t conn,
+					 unsigned reg,
+					 unsigned size);
+
 extern void		pcibr_config_set(vertex_hdl_t conn,
 					 unsigned reg,
 					 unsigned size,
@@ -215,6 +199,10 @@
 extern int		pcibr_error_devenable(vertex_hdl_t pconn_vhdl,
 					      int error_code);
 
+extern pciio_slot_t	pcibr_error_extract(vertex_hdl_t pcibr_vhdl,
+					    pciio_space_t *spacep,
+					    iopaddr_t *addrp);
+
 extern int		pcibr_wrb_flush(vertex_hdl_t pconn_vhdl);
 extern int		pcibr_rrb_check(vertex_hdl_t pconn_vhdl,
 					int *count_vchan0,
@@ -238,7 +226,16 @@
 					       rrb_alloc_funct_f *func);
 
 extern int		pcibr_device_unregister(vertex_hdl_t);
-extern int		pcibr_dma_enabled(vertex_hdl_t);
+extern void             pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
+extern void             pcibr_driver_unreg_callback(vertex_hdl_t,
+                                                    int, int, int);
+extern pciio_businfo_t	pcibr_businfo_get(vertex_hdl_t);
+
+extern void *		pcibr_bridge_ptr_get(vertex_hdl_t, int);
+
+extern uint64_t		pcibr_disable_mst_timeout(vertex_hdl_t pcibr_vhdl);
+extern int		pcibr_enable_mst_timeout(vertex_hdl_t pcibr_vhdl);
+
 /*
  * Bridge-specific flags that can be set via pcibr_device_flags_set
  * and cleared via pcibr_device_flags_clear.  Other flags are
@@ -328,9 +325,6 @@
  * the allocation time in the current implementation of PCI bridge.
  */
 extern iopaddr_t	pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
-
-extern xwidget_intr_preset_f pcibr_xintr_preset;
-
 extern void		pcibr_hints_fix_rrbs(vertex_hdl_t);
 extern void		pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
 extern void		pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, ulong);
@@ -395,7 +389,7 @@
         pcibr_slot_up_resp_t     up;
         pcibr_slot_down_resp_t   down;
         pcibr_slot_info_resp_t   query;
-        void                    *any;
+        void                    *any;                             
     }                       req_respp;
     int                     req_size;
 };
@@ -422,7 +416,7 @@
     int                     resp_bss_ninfo;
     char                    resp_bss_devio_bssd_space[16];
     iopaddr_t               resp_bss_devio_bssd_base; 
-    bridgereg_t             resp_bss_device;
+    uint64_t		    resp_bss_device;
     int                     resp_bss_pmu_uctr;
     int                     resp_bss_d32_uctr;
     int                     resp_bss_d64_uctr;
@@ -430,7 +424,7 @@
     unsigned                resp_bss_d64_flags;
     iopaddr_t               resp_bss_d32_base;
     unsigned                resp_bss_d32_flags;
-    atomic_t                resp_bss_ext_ates_active;
+    int			    resp_bss_ext_ates_active;
     volatile unsigned      *resp_bss_cmd_pointer;
     unsigned                resp_bss_cmd_shadow;
     int                     resp_bs_rrb_valid;
@@ -438,12 +432,11 @@
     int                     resp_bs_rrb_valid_v2;
     int                     resp_bs_rrb_valid_v3;
     int                     resp_bs_rrb_res;
-    bridgereg_t             resp_b_resp;
-    bridgereg_t             resp_b_int_device;
-    bridgereg_t             resp_b_int_enable;
-    bridgereg_t             resp_b_int_host;
-    picreg_t		    resp_p_int_enable;
-    picreg_t		    resp_p_int_host;
+    uint64_t		    resp_b_resp;
+    uint64_t		    resp_b_int_device;
+    uint64_t		    resp_b_int_enable;
+    uint64_t		    resp_b_int_host;
+
     struct pcibr_slot_func_info_resp_s {
         int                     resp_f_status;
         char                    resp_f_slot_name[MAXDEVNAME];
@@ -469,8 +462,8 @@
         int                     resp_f_att_det_error;
 
     } resp_func[8];
-};
 
+};
 
 /*
  * PCI specific errors, interpreted by pciconfig command
@@ -508,4 +501,4 @@
 /* ERANGE                        34    */
 /* EUNATCH                       42    */
 
-#endif				/* _ASM_SN_PCI_PCIBR_H */
+#endif				/* _ASM_IA64_SN_PCI_PCIBR_H */
diff -Nru a/include/asm-ia64/sn/pci/pcibr_asic.h b/include/asm-ia64/sn/pci/pcibr_asic.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/pci/pcibr_asic.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,511 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/* 
+ * This header file contains #defines that are common across the different
+ * PCI BRIDGE ASICs (PIC, TIOCP).
+ *
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_ASIC_H
+#define _ASM_IA64_SN_PCI_PCIBR_ASIC_H
+
+#include <asm/sn/pci/pic.h>
+#include <asm/sn/pci/tiocp.h>
+
+/* Generic Bridge MMR structure pointer used for all PCI BRIDGE ASICS */
+typedef void pci_bridge_t;
+
+/*
+ * All PCI Bridge ASICS have the "id" register as the first register in
+ * the MMR space.  On BRIDGE & XBRIDGE this register is a 32 bit register,
+ * On PIC & TIOCP it's a 64bit register but can be read as a 32bit reg.
+ */
+typedef volatile struct pci_bridge_id {
+    uint32_t		id;			/* 0x000004 */
+    uint32_t		pad_000000;		/* 0x000000 */
+} pci_bridge_id_t;
+
+/* bridgetype macros given a pci_bridge_t pointer. */
+#define is_pic(bridge) IS_PIC_BRIDGE(((pci_bridge_id_t *)bridge)->id)
+#define is_tiocp(bridge) IS_TIOCP_BRIDGE(((pci_bridge_id_t *)bridge)->id)
+
+/* busmode macros given a pci_bridge_t pointer. */
+#define is_pcix(bridge) pcireg_mode_get((void *)bridge)
+#define is_pci(bridge) !is_pcix(bridge)
+
+/* 
+ * The different PCI Bridge types supported on the SGI platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN       -1
+#define PCIBR_BRIDGETYPE_BRIDGE         0
+#define PCIBR_BRIDGETYPE_XBRIDGE        1
+#define PCIBR_BRIDGETYPE_PIC            2
+#define PCIBR_BRIDGETYPE_TIOCP          3
+
+/*****************************************************************************
+ *************************** PCI BRIDGE MMR DEFINES **************************
+ *****************************************************************************/
+
+/*
+ * STATUS register		offset 0x00000008
+ */
+#define PCIBR_STAT_TX_CREDIT_SHFT	8
+#define PCIBR_STAT_TX_CREDIT		(0xF << PCIBR_STAT_TX_CREDIT_SHFT)
+#define PCIBR_STAT_RX_CREDIT_SHFT	12
+#define PCIBR_STAT_RX_CREDIT		(0xF << PCIBR_STAT_RX_CREDIT_SHFT)
+#define PCIBR_STAT_LLP_TX_CNT_SHFT	16
+#define PCIBR_STAT_LLP_TX_CNT		(0xFF << PCIBR_STAT_LLP_TX_CNT_SHFT)
+#define PCIBR_STAT_LLP_RX_CNT_SHFT	24
+#define PCIBR_STAT_LLP_RX_CNT		(0xFF << PCIBR_STAT_LLP_RX_CNT_SHFT)
+#define PCIBR_STAT_PCIX_ACTIVE_SHFT	33
+#define PCIBR_STAT_PCIX_ACTIVE		(0x1ull << PCIBR_STAT_PCIX_ACTIVE_SHFT)
+#define PCIBR_STAT_PCIX_SPEED_SHFT	34
+#define PCIBR_STAT_PCIX_SPEED		(0x3ull << PCIBR_STAT_PCIX_SPEED_SHFT)
+
+/*
+ * CONTROL register		offset 0x00000020
+ */
+#define PCIBR_CTRL_PCI_SPEED_SHFT	4
+#define PCIBR_CTRL_PCI_SPEED		(0x3 << PCIBR_CTRL_PCI_SPEED_SHFT)
+#define PCIBR_CTRL_SYS_END_SHFT		9
+#define PCIBR_CTRL_SYS_END		(0x1 << PCIBR_CTRL_SYS_END_SHFT)
+#define PCIBR_CTRL_CLR_TLLP_SHFT	10
+#define PCIBR_CTRL_CLR_TLLP		(0x1 << PCIBR_CTRL_CLR_TLLP_SHFT)
+#define PCIBR_CTRL_CLR_RLLP_SHFT	11
+#define PCIBR_CTRL_CLR_RLLP		(0x1 << PCIBR_CTRL_CLR_RLLP_SHFT)
+#define PCIBR_CTRL_LLP_XBOW_CRD_SHFT	12
+#define PCIBR_CTRL_CRED_LIM		(0xF << PCIBR_CTRL_LLP_XBOW_CRD_SHFT)
+#define PCIBR_CTRL_F_BAD_PKT_SHFT	16
+#define PCIBR_CTRL_F_BAD_PKT		(0x1 << PCIBR_CTRL_F_BAD_PKT_SHFT)
+#define PCIBR_CTRL_PAGE_SIZE_SHFT	21
+#define PCIBR_CTRL_PAGE_SIZE		(0x1 << PCIBR_CTRL_PAGE_SIZE_SHFT)
+#define PCIBR_CTRL_MEM_SWAP_SHFT	22
+#define PCIBR_CTRL_MEM_SWAP		(0x1 << PCIBR_CTRL_MEM_SWAP_SHFT)
+#define PCIBR_CTRL_RST_SHFT		24
+#define PCIBR_CTRL_RST_PIN(x)		(PCIBR_CTRL_RST(0x1 << (x)))
+#define PCIBR_CTRL_RST(n)		((n) << PCIBR_CTRL_RST_SHFT)
+#define PCIBR_CTRL_RST_MASK		(PCIBR_CTRL_RST(0xF))
+#define PCIBR_CTRL_PAR_EN_REQ_SHFT	29
+#define PCIBR_CTRL_PAR_EN_REQ		(0x1 << PCIBR_CTRL_PAR_EN_REQ_SHFT)
+#define PCIBR_CTRL_PAR_EN_RESP_SHFT	30
+#define PCIBR_CTRL_PAR_EN_RESP		(0x1 << PCIBR_CTRL_PAR_EN_RESP_SHFT)
+#define PCIBR_CTRL_PAR_EN_ATE_SHFT	31
+#define PCIBR_CTRL_PAR_EN_ATE		(0x1ull << PCIBR_CTRL_PAR_EN_ATE_SHFT)
+#define PCIBR_CTRL_FUN_NUM_MASK		(PCIBR_CTRL_FUN_NUM(0x7))
+#define PCIBR_CTRL_FUN_NUM(x)		((unsigned long long)(x) << 40)
+#define PCIBR_CTRL_DEV_NUM_MASK		(PCIBR_CTRL_DEV_NUM(0x1f))
+#define PCIBR_CTRL_DEV_NUM(x)		((unsigned long long)(x) << 43)
+#define PCIBR_CTRL_BUS_NUM_MASK		(PCIBR_CTRL_BUS_NUM(0xff))
+#define PCIBR_CTRL_BUS_NUM(x)		((unsigned long long)(x) << 48)
+#define PCIBR_CTRL_RELAX_ORDER_SHFT	61
+#define PCIBR_CTRL_RELAX_ORDER		(0x1ull << PCIBR_CTRL_RELAX_ORDER_SHFT)
+#define PCIBR_CTRL_NO_SNOOP_SHFT	62
+#define PCIBR_CTRL_NO_SNOOP		(0x1ull << PCIBR_CTRL_NO_SNOOP_SHFT)
+
+/*
+ * PCI DIRECT Mapping		offset 0x00000080
+ */
+#define PCIBR_DIRMAP_DIROFF_SHFT	0
+#define PCIBR_DIRMAP_DIROFF		(0x1FFFF << PCIBR_DIRMAP_DIROFF_SHFT)
+#define PCIBR_DIRMAP_ADD512_SHFT	17
+#define PCIBR_DIRMAP_ADD512		(0x1 << PCIBR_DIRMAP_ADD512_SHFT)
+
+#define PCIBR_DIRMAP_OFF_ADDRSHFT	(31)	/* lsbit of DIR_OFF */
+
+/*
+ * Interrupt Status register		offset 0x00000100
+ */
+#define PCIBR_ISR_PCIX_SPLIT_MSG_PE	(0x1ull << 45)
+#define PCIBR_ISR_PCIX_SPLIT_EMSG	(0x1ull << 44)
+#define PCIBR_ISR_PCIX_SPLIT_TO		(0x1ull << 43)
+#define PCIBR_ISR_PCIX_UNEX_COMP	(0x1ull << 42)
+#define PCIBR_ISR_INT_RAM_PERR		(0x1ull << 41)
+#define PCIBR_ISR_PCIX_ARB_ERR		(0x1ull << 40)
+#define PCIBR_ISR_PCIX_REQ_TOUT		(0x1ull << 39)
+#define PCIBR_ISR_PCIX_TABORT		(0x1ull << 38)
+#define PCIBR_ISR_PCIX_PERR		(0x1ull << 37)
+#define PCIBR_ISR_PCIX_SERR		(0x1ull << 36)
+#define PCIBR_ISR_PCIX_MRETRY		(0x1ull << 35)
+#define PCIBR_ISR_PCIX_MTOUT		(0x1ull << 34)
+#define PCIBR_ISR_PCIX_DA_PARITY	(0x1ull << 33)
+#define PCIBR_ISR_PCIX_AD_PARITY	(0x1ull << 32)
+#define PCIBR_ISR_PMU_PAGE_FAULT	(0x1ull << 30)
+#define PCIBR_ISR_UNEXP_RESP		(0x1ull << 29)
+#define PCIBR_ISR_BAD_XRESP_PKT		(0x1ull << 28)
+#define PCIBR_ISR_BAD_XREQ_PKT		(0x1ull << 27)
+#define PCIBR_ISR_RESP_XTLK_ERR		(0x1ull << 26)
+#define PCIBR_ISR_REQ_XTLK_ERR		(0x1ull << 25)
+#define PCIBR_ISR_INVLD_ADDR		(0x1ull << 24)
+#define PCIBR_ISR_UNSUPPORTED_XOP	(0x1ull << 23)
+#define PCIBR_ISR_XREQ_FIFO_OFLOW	(0x1ull << 22)
+#define PCIBR_ISR_LLP_REC_SNERR		(0x1ull << 21)
+#define PCIBR_ISR_LLP_REC_CBERR		(0x1ull << 20)
+#define PCIBR_ISR_LLP_RCTY		(0x1ull << 19)
+#define PCIBR_ISR_LLP_TX_RETRY		(0x1ull << 18)
+#define PCIBR_ISR_LLP_TCTY		(0x1ull << 17)
+#define PCIBR_ISR_PCI_ABORT		(0x1ull << 15)
+#define PCIBR_ISR_PCI_PARITY		(0x1ull << 14)
+#define PCIBR_ISR_PCI_SERR		(0x1ull << 13)
+#define PCIBR_ISR_PCI_PERR		(0x1ull << 12)
+#define PCIBR_ISR_PCI_MST_TIMEOUT	(0x1ull << 11)
+#define PCIBR_ISR_PCI_RETRY_CNT		(0x1ull << 10)
+#define PCIBR_ISR_XREAD_REQ_TIMEOUT	(0x1ull << 9)
+#define PCIBR_ISR_INT_MSK		(0xffull << 0)
+#define PCIBR_ISR_INT(x)		(0x1ull << (x))
+
+#define PCIBR_ISR_LINK_ERROR		\
+                (PCIBR_ISR_LLP_REC_SNERR|PCIBR_ISR_LLP_REC_CBERR|	\
+                 PCIBR_ISR_LLP_RCTY|PCIBR_ISR_LLP_TX_RETRY|		\
+                 PCIBR_ISR_LLP_TCTY)
+#define PCIBR_ISR_PCIBUS_PIOERR		\
+                (PCIBR_ISR_PCI_MST_TIMEOUT|PCIBR_ISR_PCI_ABORT|		\
+                 PCIBR_ISR_PCIX_MTOUT|PCIBR_ISR_PCIX_TABORT)
+#define PCIBR_ISR_PCIBUS_ERROR		\
+		(PCIBR_ISR_PCIBUS_PIOERR|PCIBR_ISR_PCI_PERR|		\
+		 PCIBR_ISR_PCI_SERR|PCIBR_ISR_PCI_RETRY_CNT|		\
+		 PCIBR_ISR_PCI_PARITY|PCIBR_ISR_PCIX_PERR|		\
+		 PCIBR_ISR_PCIX_SERR|PCIBR_ISR_PCIX_MRETRY|		\
+		 PCIBR_ISR_PCIX_AD_PARITY|PCIBR_ISR_PCIX_DA_PARITY|	\
+		 PCIBR_ISR_PCIX_REQ_TOUT|PCIBR_ISR_PCIX_UNEX_COMP|	\
+		 PCIBR_ISR_PCIX_SPLIT_TO|PCIBR_ISR_PCIX_SPLIT_EMSG|	\
+		 PCIBR_ISR_PCIX_SPLIT_MSG_PE)
+#define PCIBR_ISR_XTALK_ERROR		\
+		(PCIBR_ISR_XREAD_REQ_TIMEOUT|PCIBR_ISR_XREQ_FIFO_OFLOW|	\
+		 PCIBR_ISR_UNSUPPORTED_XOP|PCIBR_ISR_INVLD_ADDR|	\
+		 PCIBR_ISR_REQ_XTLK_ERR|PCIBR_ISR_RESP_XTLK_ERR|	\
+		 PCIBR_ISR_BAD_XREQ_PKT|PCIBR_ISR_BAD_XRESP_PKT|	\
+		 PCIBR_ISR_UNEXP_RESP|TIOCP_ISR_CTALK_PROT_ERR)
+#define PCIBR_ISR_ERRORS		\
+		(PCIBR_ISR_LINK_ERROR|PCIBR_ISR_PCIBUS_ERROR|		\
+		 PCIBR_ISR_XTALK_ERROR|					\
+		 PCIBR_ISR_PMU_PAGE_FAULT|PCIBR_ISR_INT_RAM_PERR)
+
+#define PCIBR_ISR_ERROR_FATAL		\
+		((PCIBR_ISR_XTALK_ERROR & ~PCIBR_ISR_XREAD_REQ_TIMEOUT)|\
+		 PCIBR_ISR_PCI_SERR|PCIBR_ISR_PCI_PARITY|		\
+		 PCIBR_ISR_PCIX_SERR|PCIBR_ISR_PCIX_AD_PARITY|		\
+		 PCIBR_ISR_PCIX_DA_PARITY|PCIBR_ISR_INT_RAM_PERR|	\
+		 PCIBR_ISR_PCIX_SPLIT_MSG_PE)
+
+#define PCIBR_ISR_ERROR_DUMP		\
+		(PCIBR_ISR_PCIBUS_ERROR|PCIBR_ISR_PMU_PAGE_FAULT|	\
+		 PCIBR_ISR_XTALK_ERROR|PCIBR_ISR_PCIX_ARB_ERR|		\
+		 PCIBR_ISR_INT_RAM_PERR)
+
+#define PCIBR_ISR_ERR_START		8
+#define PCIBR_ISR_MAX_ERRS_PIC          45
+#define PCIBR_ISR_MAX_ERRS_TIOCP	47
+#define PCIBR_ISR_MAX_ERRS              PCIBR_ISR_MAX_ERRS_TIOCP
+
+/*
+ * Interrupt Enable register		offset 0x00000108
+ */
+#define PCIBR_IER_PCIX_SPLIT_MSG_PE	(0x1ull << 45)
+#define PCIBR_IER_PCIX_SPLIT_EMSG	(0x1ull << 44)
+#define PCIBR_IER_PCIX_SPLIT_TO		(0x1ull << 43)
+#define PCIBR_IER_PCIX_UNEX_COMP	(0x1ull << 42)
+#define PCIBR_IER_INT_RAM_PERR		(0x1ull << 41)
+#define PCIBR_IER_PCIX_ARB_ERR		(0x1ull << 40)
+#define PCIBR_IER_PCIX_REQ_TOUT		(0x1ull << 39)
+#define PCIBR_IER_PCIX_TABORT		(0x1ull << 38)
+#define PCIBR_IER_PCIX_PERR		(0x1ull << 37)
+#define PCIBR_IER_PCIX_SERR		(0x1ull << 36)
+#define PCIBR_IER_PCIX_MRETRY		(0x1ull << 35)
+#define PCIBR_IER_PCIX_MTOUT		(0x1ull << 34)
+#define PCIBR_IER_PCIX_DA_PARITY	(0x1ull << 33)
+#define PCIBR_IER_PCIX_AD_PARITY	(0x1ull << 32)
+#define PCIBR_IER_PMU_PAGE_FAULT	(0x1ull << 30)
+#define PCIBR_IER_UNEXP_RESP		(0x1ull << 29)
+#define PCIBR_IER_BAD_XRESP_PKT		(0x1ull << 28)
+#define PCIBR_IER_BAD_XREQ_PKT		(0x1ull << 27)
+#define PCIBR_IER_RESP_XTLK_ERR		(0x1ull << 26)
+#define PCIBR_IER_REQ_XTLK_ERR		(0x1ull << 25)
+#define PCIBR_IER_INVLD_ADDR		(0x1ull << 24)
+#define PCIBR_IER_UNSUPPORTED_XOP	(0x1ull << 23)
+#define PCIBR_IER_XREQ_FIFO_OFLOW	(0x1ull << 22)
+#define PCIBR_IER_LLP_REC_SNERR		(0x1ull << 21)
+#define PCIBR_IER_LLP_REC_CBERR		(0x1ull << 20)
+#define PCIBR_IER_LLP_RCTY		(0x1ull << 19)
+#define PCIBR_IER_LLP_TX_RETRY		(0x1ull << 18)
+#define PCIBR_IER_LLP_TCTY		(0x1ull << 17)
+#define PCIBR_IER_PCI_ABORT		(0x1ull << 15)
+#define PCIBR_IER_PCI_PARITY		(0x1ull << 14)
+#define PCIBR_IER_PCI_SERR		(0x1ull << 13)
+#define PCIBR_IER_PCI_PERR		(0x1ull << 12)
+#define PCIBR_IER_PCI_MST_TIMEOUT	(0x1ull << 11)
+#define PCIBR_IER_PCI_RETRY_CNT		(0x1ull << 10)
+#define PCIBR_IER_XREAD_REQ_TIMEOUT	(0x1ull << 9)
+#define PCIBR_IER_INT_MSK		(0xffull << 0)
+#define PCIBR_IER_INT(x)		(0x1ull << (x))
+
+/*
+ * Reset Interrupt register		offset 0x00000110
+ */
+#define PCIBR_IRR_PCIX_SPLIT_MSG_PE	(0x1ull << 45)
+#define PCIBR_IRR_PCIX_SPLIT_EMSG	(0x1ull << 44)
+#define PCIBR_IRR_PCIX_SPLIT_TO		(0x1ull << 43)
+#define PCIBR_IRR_PCIX_UNEX_COMP	(0x1ull << 42)
+#define PCIBR_IRR_INT_RAM_PERR		(0x1ull << 41)
+#define PCIBR_IRR_PCIX_ARB_ERR		(0x1ull << 40)
+#define PCIBR_IRR_PCIX_REQ_TOUT		(0x1ull << 39)
+#define PCIBR_IRR_PCIX_TABORT		(0x1ull << 38)
+#define PCIBR_IRR_PCIX_PERR		(0x1ull << 37)
+#define PCIBR_IRR_PCIX_SERR		(0x1ull << 36)
+#define PCIBR_IRR_PCIX_MRETRY		(0x1ull << 35)
+#define PCIBR_IRR_PCIX_MTOUT		(0x1ull << 34)
+#define PCIBR_IRR_PCIX_DA_PARITY	(0x1ull << 33)
+#define PCIBR_IRR_PCIX_AD_PARITY	(0x1ull << 32)
+#define PCIBR_IRR_PMU_PAGE_FAULT	(0x1ull << 30)
+#define PCIBR_IRR_UNEXP_RESP		(0x1ull << 29)
+#define PCIBR_IRR_BAD_XRESP_PKT		(0x1ull << 28)
+#define PCIBR_IRR_BAD_XREQ_PKT		(0x1ull << 27)
+#define PCIBR_IRR_RESP_XTLK_ERR		(0x1ull << 26)
+#define PCIBR_IRR_REQ_XTLK_ERR		(0x1ull << 25)
+#define PCIBR_IRR_INVLD_ADDR		(0x1ull << 24)
+#define PCIBR_IRR_UNSUPPORTED_XOP	(0x1ull << 23)
+#define PCIBR_IRR_XREQ_FIFO_OFLOW	(0x1ull << 22)
+#define PCIBR_IRR_LLP_REC_SNERR		(0x1ull << 21)
+#define PCIBR_IRR_LLP_REC_CBERR		(0x1ull << 20)
+#define PCIBR_IRR_LLP_RCTY		(0x1ull << 19)
+#define PCIBR_IRR_LLP_TX_RETRY		(0x1ull << 18)
+#define PCIBR_IRR_LLP_TCTY		(0x1ull << 17)
+#define PCIBR_IRR_PCI_ABORT		(0x1ull << 15)
+#define PCIBR_IRR_PCI_PARITY		(0x1ull << 14)
+#define PCIBR_IRR_PCI_SERR		(0x1ull << 13)
+#define PCIBR_IRR_PCI_PERR		(0x1ull << 12)
+#define PCIBR_IRR_PCI_MST_TIMEOUT	(0x1ull << 11)
+#define PCIBR_IRR_PCI_RETRY_CNT		(0x1ull << 10)
+#define PCIBR_IRR_XREAD_REQ_TIMEOUT	(0x1ull << 9)
+#define PCIBR_IRR_MULTI_CLR		(0x1ull << 6)
+#define PCIBR_IRR_CRP_GRP_CLR		(0x1ull << 5)
+#define PCIBR_IRR_RESP_BUF_GRP_CLR	(0x1ull << 4)
+#define PCIBR_IRR_REQ_DSP_GRP_CLR	(0x1ull << 3)
+#define PCIBR_IRR_LLP_GRP_CLR		(0x1ull << 2)
+#define PCIBR_IRR_SSRAM_GRP_CLR		(0x1ull << 1)
+#define PCIBR_IRR_PCI_GRP_CLR		(0x1ull << 0)
+#define PCIBR_IRR_GIO_GRP_CLR		(0x1ull << 0)
+#define PCIBR_IRR_ALL_CLR		0xffffffffffffffff
+
+
+/*
+ * Intr Device Select register	offset 0x00000120
+ */
+#define PCIBR_INT_DEV_SHFT(n)		((n)*3)
+#define PCIBR_INT_DEV_MASK(n)		(0x7 << PCIBR_INT_DEV_SHFT(n))
+
+
+/*
+ * DEVICE(x) register		offset 0x00000200
+ */
+#define PCIBR_DEV_OFF_ADDR_SHFT		20
+#define PCIBR_DEV_OFF_MASK		0x00000fff
+#define PCIBR_DEV_DEV_IO_MEM		(1ull << 12)
+#define PCIBR_DEV_DEV_SWAP		(1ull << 13)
+#define PCIBR_DEV_GBR			(1ull << 14)
+#define PCIBR_DEV_BARRIER		(1ull << 15)
+#define PCIBR_DEV_COH			(1ull << 16)
+#define PCIBR_DEV_PRECISE		(1ull << 17)
+#define PCIBR_DEV_PREF			(1ull << 18)
+#define PCIBR_DEV_SWAP_DIR		(1ull << 19)
+#define PCIBR_DEV_RT			(1ull << 21)
+#define PCIBR_DEV_DEV_SIZE		(1ull << 22)
+#define PCIBR_DEV_DIR_WRGA_EN		(1ull << 23)
+#define PCIBR_DEV_VIRTUAL_EN		(1ull << 25)
+#define PCIBR_DEV_FORCE_PCI_PAR		(1ull << 26)
+#define PCIBR_DEV_PAGE_CHK_DIS		(1ull << 27)
+#define PCIBR_DEV_ERR_LOCK_EN		(1ull << 28)
+
+/* habeck fix this */
+#define XBRIDGE_DEV_PMU_BITS		(0)
+#define BRIDGE_DEV_PMU_BITS		(0)
+#define BRIDGE_DEV_D32_BITS		(PCIBR_DEV_DIR_WRGA_EN		| \
+					 PCIBR_DEV_SWAP_DIR		| \
+					 PCIBR_DEV_PREF			| \
+					 PCIBR_DEV_PRECISE		| \
+					 PCIBR_DEV_COH			| \
+					 PCIBR_DEV_BARRIER)
+#define XBRIDGE_DEV_D64_BITS		(PCIBR_DEV_DIR_WRGA_EN		| \
+					 PCIBR_DEV_COH			| \
+					 PCIBR_DEV_BARRIER)
+#define BRIDGE_DEV_D64_BITS		(PCIBR_DEV_DIR_WRGA_EN		| \
+					 PCIBR_DEV_SWAP_DIR		| \
+					 PCIBR_DEV_COH			| \
+					 PCIBR_DEV_BARRIER)
+/* end: habeck fix this */
+
+/*
+ * Even & Odd RRB registers	offset 0x000000280 & 0x000000288
+ */
+/* Individual RRB masks after shifting down */
+#define PCIBR_RRB_EN			0x8
+#define PCIBR_RRB_DEV  			0x7
+#define PCIBR_RRB_VDEV 			0x6
+#define PCIBR_RRB_PDEV 			0x1
+
+/* RRB Virtual Channels (note VCHAN0 is the normal channel) */
+#define VCHAN0				0
+#define VCHAN1				1
+#define VCHAN2				2
+#define VCHAN3				3
+
+/* 
+ * RRB status register		offset 0x00000290
+ */
+#define PCIBR_RRB_VALID(r)		(0x00010000 << (r))
+#define PCIBR_RRB_INUSE(r)		(0x00000001 << (r))
+
+/* 
+ * RRB clear register 		offset 0x00000298
+ */
+#define PCIBR_RRB_CLEAR(r)		(0x00000001 << (r))
+
+
+/*****************************************************************************
+ *************************** PCI BRIDGE DMA DEFINES **************************
+ *****************************************************************************/
+typedef uint64_t			bridge_ate_t;
+typedef volatile bridge_ate_t	       *bridge_ate_p;
+
+/*
+ * PMU Address Transaltion Entry defines
+ */
+#define PCIBR_ATE_V			(0x1 << 0)
+#define PCIBR_ATE_CO			(0x1 << 1)
+#define PCIBR_ATE_PREC			(0x1 << 2)
+#define PCIBR_ATE_PREF			(0x1 << 3)
+#define PCIBR_ATE_BAR			(0x1 << 4)
+#define PCIBR_ATE_ADDR_SHFT		12
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT			29
+#define ATE_SWAP_ON(x)			((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x)			((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/* 
+ * Bridge 32bit Bus DMA addresses 
+ */
+#define PCIBR_LOCAL_BASE		0
+#define PCIBR_DMA_MAPPED_BASE		0x40000000
+#define PCIBR_DMA_MAPPED_SIZE		0x40000000      /* 1G Bytes */
+#define PCIBR_DMA_DIRECT_BASE		0x80000000
+#define PCIBR_DMA_DIRECT_SIZE		0x80000000      /* 2G Bytes */
+
+#define PCI32_LOCAL_BASE		PCIBR_LOCAL_BASE
+#define PCI32_MAPPED_BASE		PCIBR_DMA_MAPPED_BASE
+#define PCI32_DIRECT_BASE		PCIBR_DMA_DIRECT_BASE
+
+#define IS_PCI32_LOCAL(x)		((uint64_t)(x) < PCI32_MAPPED_BASE)
+#define IS_PCI32_MAPPED(x)		((uint64_t)(x) < PCI32_DIRECT_BASE && \
+					 (uint64_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x)		((uint64_t)(x) >= PCI32_MAPPED_BASE)
+
+
+/* 
+ * Bridge 64bit Direct Map Attributes
+ */
+/* habeck:  this needs to be updated for TIO-CP support */
+#define PCI64_ATTR_TARG_MASK		0xf000000000000000
+#define PCI64_ATTR_TARG_SHFT		60
+#define PCI64_ATTR_PREF			(1ull << 59)
+#define PCI64_ATTR_PREC			(1ull << 58)
+#define PCI64_ATTR_VIRTUAL		(1ull << 57)
+#define PCI64_ATTR_BAR			(1ull << 56)
+#define PCI64_ATTR_SWAP			(1ull << 55)
+
+
+/*****************************************************************************
+ *************************** PCI BRIDGE PIO DEFINES **************************
+ *****************************************************************************/
+
+/*
+ * PCI Device Config space offsets
+ */
+#define PCIBR_CONFIG_BASE		0x20000
+#define PCIBR_CONFIG_TYPE1_BASE		0x28000
+#define PCIBR_CONFIG_END		0x30000
+#define PCIBR_CONFIG_SLOT_SIZE		0x1000
+#define PCIBR_CONFIG_FUNC_SIZE		0x100
+
+#define PCIBRIDGE_TYPE0_CFG_DEV0(busnum) \
+ 		((busnum) ? PCIBR_CONFIG_BASE + PIC_BUS1_OFFSET : \
+ 			    PCIBR_CONFIG_BASE)
+#define PCIBRIDGE_TYPE1_CFG(busnum) \
+		((busnum) ? PCIBR_CONFIG_TYPE1_BASE + PIC_BUS1_OFFSET : \
+			    PCIBR_CONFIG_TYPE1_BASE)
+#define PCIBRIDGE_TYPE0_CFG_DEV(busnum, s) \
+        	(PCIBRIDGE_TYPE0_CFG_DEV0(busnum) + \
+        	(s) * PCIBR_CONFIG_SLOT_SIZE)
+#define PCIBRIDGE_TYPE0_CFG_DEVF(busnum, s, f) \
+        	(PCIBRIDGE_TYPE0_CFG_DEV0(busnum) + \
+        	(s) * PCIBR_CONFIG_SLOT_SIZE+\
+        	(f) * PCIBR_CONFIG_FUNC_SIZE)
+
+#define PCIBR_TYPE1_CFG(ps)		PCIBRIDGE_TYPE1_CFG((ps)->bs_busnum)
+
+/* NOTE: 's' is the internal device number, not the external slot number */
+#define PCIBR_BUS_TYPE0_CFG_DEV(ps, s) \
+		PCIBRIDGE_TYPE0_CFG_DEV((ps)->bs_busnum, s+1)
+#define PCIBR_BUS_TYPE0_CFG_DEVF(ps, s, f) \
+		PCIBRIDGE_TYPE0_CFG_DEVF((ps)->bs_busnum, s+1, f)
+
+/* NOTE: 's' is the external slot number, not the internal device number */
+#define PCIBR_TYPE0_CFG_SLOT(s)		(PCIBR_CONFIG_BASE + \
+					 (PCIBR_CONFIG_SLOT_SIZE * (s)))
+#define PCIBR_TYPE0_CFG_SLOTF(s, f)	(PCIBR_CONFIG_BASE + \
+					 (PCIBR_CONFIG_SLOT_SIZE * (s)) + \
+					 (PCIBR_CONFIG_FUNC_SIZE * (f)))
+
+/*
+ * PCI Device Space macros 
+ */
+#define PCIBR_DEV_CNT			4		/* Up to 4 devices */
+#define PCIBR_DEVIO0			0x00200000 
+#define PCIBR_DEVIO1			0x00400000 
+#define PCIBR_DEVIO2			0x00600000
+#define PCIBR_DEVIO3			0x00700000
+#define PCIBR_DEVIO_OFF			0x00100000
+
+/* NOTE: Device 0 & 1 have 2MByte spaces,  Device 2 & 3 have a 1MByte space */
+#define PCIBR_DEVIO_2MB			0x00200000 
+#define PCIBR_DEVIO_1MB			0x00100000
+
+#define PCIBRIDGE_DEVIO0(busnum) ((busnum) ? \
+        (PCIBR_DEVIO0 + PIC_BUS1_OFFSET) : PCIBR_DEVIO0)
+#define PCIBRIDGE_DEVIO2(busnum) ((busnum) ? \
+        (PCIBR_DEVIO2 + PIC_BUS1_OFFSET) : PCIBR_DEVIO2)
+
+#define PCIBRIDGE_DEVIO(busnum, x) \
+	    ((x)<=1 ? PCIBRIDGE_DEVIO0(busnum)+(x)*PCIBR_DEVIO_2MB : \
+		PCIBRIDGE_DEVIO2(busnum)+((x)-2)*PCIBR_DEVIO_1MB)
+
+#define PCIBR_BRIDGE_DEVIO(ps, s)	PCIBRIDGE_DEVIO((ps)->bs_busnum, s)
+
+
+/*****************************************************************************
+ *************************** PCI BRIDGE MISC DEFINES *************************
+ *****************************************************************************/
+
+/* 
+ * PCI-X Read Buffer Attribute Register (RBAR) 
+ */
+#define NUM_RBAR 16     /* number of RBAR registers */
+
+/* 
+ * I/O page size 
+ */
+#if PAGE_SIZE == 4096
+#define IOPFNSHIFT			12      /* 4K per mapped page */
+#else
+#define IOPFNSHIFT			14      /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE			(1 << IOPFNSHIFT)
+#define IOPG(x)				((x) >> IOPFNSHIFT)
+#define IOPGOFF(x)			((x) & (IOPGSIZE-1))
+
+#endif /* _ASM_IA64_SN_PCI_PCIBR_ASIC_H */
diff -Nru a/include/asm-ia64/sn/pci/pcibr_private.h b/include/asm-ia64/sn/pci/pcibr_private.h
--- a/include/asm-ia64/sn/pci/pcibr_private.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pcibr_private.h	Thu Nov  6 13:42:35 2003
@@ -1,13 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PCIBR_PRIVATE_H
-#define _ASM_SN_PCI_PCIBR_PRIVATE_H
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PRIVATE_H
+#define _ASM_IA64_SN_PCI_PCIBR_PRIVATE_H
 
 /*
  * pcibr_private.h -- private definitions for pcibr
@@ -35,18 +34,19 @@
 typedef struct pcibr_intr_cbuf_s *pcibr_intr_cbuf_t;
 
 typedef volatile unsigned *cfg_p;
-typedef volatile bridgereg_t *reg_p;
 
 /*
  * extern functions
  */
-cfg_p pcibr_slot_config_addr(bridge_t *, pciio_slot_t, int);
-cfg_p pcibr_func_config_addr(bridge_t *, pciio_bus_t bus, pciio_slot_t, pciio_function_t, int);
-unsigned pcibr_slot_config_get(bridge_t *, pciio_slot_t, int);
-unsigned pcibr_func_config_get(bridge_t *, pciio_slot_t, pciio_function_t, int);
+cfg_p pcibr_slot_config_addr(pci_bridge_t *, pciio_slot_t, int);
+cfg_p pcibr_func_config_addr(pci_bridge_t *, pciio_bus_t,
+				pciio_slot_t, pciio_function_t, int);
+unsigned pcibr_slot_config_get(pci_bridge_t *, pciio_slot_t, int);
+unsigned pcibr_func_config_get(pci_bridge_t *, pciio_slot_t, 
+				pciio_function_t, int);
 void pcibr_debug(uint32_t, vertex_hdl_t, char *, ...);
-void pcibr_slot_config_set(bridge_t *, pciio_slot_t, int, unsigned);
-void pcibr_func_config_set(bridge_t *, pciio_slot_t, pciio_function_t, int, 
+void pcibr_slot_config_set(pci_bridge_t *, pciio_slot_t, int, unsigned);
+void pcibr_func_config_set(pci_bridge_t *, pciio_slot_t, pciio_function_t, int, 
 								unsigned);
 /*
  * PCIBR_DEBUG() macro and debug bitmask defines
@@ -70,6 +70,7 @@
 #define PCIBR_DEBUG_HOTPLUG	0x00008000
 
 /* high freqency debug events (ie. map allocation, direct translation,...) */
+#define PCIBR_DEBUG_MMR		0x02000000  /* bridge MMR accesses */
 #define PCIBR_DEBUG_DEVREG	0x04000000  /* bridges device reg sets */
 #define PCIBR_DEBUG_PIOMAP	0x08000000  /* pcibr_piomap */
 #define PCIBR_DEBUG_PIODIR	0x10000000  /* pcibr_piotrans */
@@ -80,7 +81,7 @@
 extern char	 *pcibr_debug_module;
 extern int	  pcibr_debug_widget;
 extern int	  pcibr_debug_slot;
-extern uint32_t pcibr_debug_mask;
+extern uint32_t   pcibr_debug_mask;
 
 /* For low frequency events (ie. initialization, resource allocation,...) */
 #define PCIBR_DEBUG_ALWAYS(args) pcibr_debug args ;
@@ -90,7 +91,7 @@
  * set, then the overhead for this macro is just an extra 'if' check.
  */
 /* For high frequency events (ie. map allocation, direct translation,...) */
-#if 1 || DEBUG
+#if DEBUG
 #define PCIBR_DEBUG(args) PCIBR_DEBUG_ALWAYS(args)
 #else	/* DEBUG */
 #define PCIBR_DEBUG(args)
@@ -114,7 +115,7 @@
     xtalk_piomap_t          bp_xtalk_pio;	/* corresponding xtalk resource */
     pcibr_piomap_t	    bp_next;	/* Next piomap on the list */
     pcibr_soft_t	    bp_soft;	/* backpointer to bridge soft data */
-    atomic_t		    bp_toc[1];	/* PCI timeout counter */
+    atomic_t		    bp_toc;	/* PCI timeout counter */
 
 };
 
@@ -163,8 +164,8 @@
 #define	bi_flags	bi_pi.pi_flags	/* PCIBR_INTR flags */
 #define	bi_dev		bi_pi.pi_dev	/* associated pci card */
 #define	bi_lines	bi_pi.pi_lines	/* which PCI interrupt line(s) */
-#define bi_func		bi_pi.pi_func	/* handler function (when connected) */
-#define bi_arg		bi_pi.pi_arg	/* handler parameter (when connected) */
+#define	bi_func		bi_pi.pi_func	/* handler function (when connected) */
+#define	bi_arg		bi_pi.pi_arg	/* handler parameter (when connected) */
 #define bi_mustruncpu	bi_pi.pi_mustruncpu /* Where we must run. */
 #define bi_irq		bi_pi.pi_irq	/* IRQ assigned. */
 #define bi_cpu		bi_pi.pi_cpu	/* cpu assigned. */
@@ -200,14 +201,10 @@
 #define PCIBR_INFO_SLOT_GET_INT(info)	    (((pcibr_info_t)info)->f_dev)
 
 #define PCIBR_DEVICE_TO_SLOT(pcibr_soft, dev_num) \
-	(((dev_num) != PCIIO_SLOT_NONE) ? \
-	    (IS_PIC_SOFT((pcibr_soft)) ? ((dev_num) + 1) : (dev_num)) : \
-	    PCIIO_SLOT_NONE)
+	(((dev_num) != PCIIO_SLOT_NONE) ? ((dev_num) + 1) : PCIIO_SLOT_NONE)
 
 #define PCIBR_SLOT_TO_DEVICE(pcibr_soft, slot) \
-        (((slot) != PCIIO_SLOT_NONE) ? \
-            (IS_PIC_SOFT((pcibr_soft)) ? ((slot) - 1) : (slot)) : \
-            PCIIO_SLOT_NONE)
+        (((slot) != PCIIO_SLOT_NONE) ? ((slot) - 1) : PCIIO_SLOT_NONE)
 
 /*
  * per-connect point pcibr data, including standard pciio data in-line:
@@ -225,11 +222,11 @@
 #define	f_pops		f_c.c_pops	/* cached provider from c_master */
 #define	f_efunc		f_c.c_efunc	/* error handling function */
 #define	f_einfo		f_c.c_einfo	/* first parameter for efunc */
-#define f_window        f_c.c_window    /* state of BASE regs */
+#define	f_window	f_c.c_window	/* state of BASE regs */
 #define	f_rwindow	f_c.c_rwindow	/* expansion ROM BASE regs */
 #define	f_rbase		f_c.c_rbase	/* expansion ROM base */
 #define	f_rsize		f_c.c_rsize	/* expansion ROM size */
-#define f_piospace      f_c.c_piospace  /* additional I/O spaces allocated */
+#define	f_piospace	f_c.c_piospace	/* additional I/O spaces allocated */
 
     /* pcibr-specific connection state */
     int			    f_ibit[4];	/* Bridge bit for each INTx */
@@ -246,7 +243,8 @@
 struct pcibr_intr_list_s {
     pcibr_intr_list_t       il_next;
     pcibr_intr_t            il_intr;
-    volatile bridgereg_t   *il_wrbf;	/* ptr to b_wr_req_buf[] */
+    pcibr_soft_t	    il_soft;
+    pciio_slot_t	    il_slot;
 };
 
 /* =====================================================================
@@ -254,47 +252,30 @@
  */
 struct pcibr_intr_wrap_s {
     pcibr_soft_t            iw_soft;	/* which bridge */
-    volatile bridgereg_t   *iw_stat;	/* ptr to b_int_status */
-    bridgereg_t             iw_ibit;	/* bit in b_int_status */
+    int			    iw_ibit;	/* bit in b_int_status */
     pcibr_intr_list_t       iw_list;	/* ghostbusters! */
     int			    iw_hdlrcnt;	/* running handler count */
     int			    iw_shared;  /* if Bridge bit is shared */
     int			    iw_connected; /* if already connected */
 };
 
-#define	PCIBR_ISR_ERR_START		8
-#define PCIBR_ISR_MAX_ERRS_BRIDGE 	32
-#define PCIBR_ISR_MAX_ERRS_PIC		45
-#define PCIBR_ISR_MAX_ERRS	PCIBR_ISR_MAX_ERRS_PIC
-
 /*
  * PCI Base Address Register window allocation constants.
  * To reduce the size of the internal resource mapping structures, do
  * not use the entire PCI bus I/O address space
  */ 
-#define PCIBR_BUS_IO_BASE      0x100000
+#define PCIBR_BUS_IO_BASE      0x200000
 #define PCIBR_BUS_IO_MAX       0x0FFFFFFF
 #define PCIBR_BUS_IO_PAGE      0x100000
 
-#define PCIBR_BUS_SWIN_BASE    _PAGESZ
+#define PCIBR_BUS_SWIN_BASE    PAGE_SIZE
 #define PCIBR_BUS_SWIN_MAX     0x000FFFFF
-#define PCIBR_BUS_SWIN_PAGE    _PAGESZ
+#define PCIBR_BUS_SWIN_PAGE    PAGE_SIZE
 
 #define PCIBR_BUS_MEM_BASE     0x200000
 #define PCIBR_BUS_MEM_MAX      0x3FFFFFFF
 #define PCIBR_BUS_MEM_PAGE     0x100000
 
-/* defines for pcibr_soft_s->bs_bridge_type */
-#define PCIBR_BRIDGETYPE_BRIDGE		0
-#define PCIBR_BRIDGETYPE_XBRIDGE	1
-#define PCIBR_BRIDGETYPE_PIC		2
-#define IS_XBRIDGE_SOFT(ps) (ps->bs_bridge_type == PCIBR_BRIDGETYPE_XBRIDGE)
-#define IS_PIC_SOFT(ps)     (ps->bs_bridge_type == PCIBR_BRIDGETYPE_PIC)
-#define IS_PIC_BUSNUM_SOFT(ps, bus)	\
-		(IS_PIC_SOFT(ps) && ((ps)->bs_busnum == (bus)))
-#define IS_BRIDGE_SOFT(ps)  (ps->bs_bridge_type == PCIBR_BRIDGETYPE_BRIDGE)
-#define IS_XBRIDGE_OR_PIC_SOFT(ps) (IS_XBRIDGE_SOFT(ps) || IS_PIC_SOFT(ps))
-
 /*
  * Runtime checks for workarounds.
  */
@@ -302,11 +283,13 @@
 	((1 << XWIDGET_PART_REV_NUM_REV(pcibr_soft->bs_rev_num)) & pv)
 /*
  * Defines for individual WARs. Each is a bitmask of applicable
- * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B, etc.
+ * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
+ * (3 << 1) == (rev A or rev B), etc
  */
 #define PV854697 (~0)     /* PIC: write 64bit regs as 64bits. permanent */
 #define PV854827 (~0)     /* PIC: fake widget 0xf presence bit. permanent */
-#define PV855271 (1 << 1) /* PIC: PIC: use virt chan iff 64-bit device. */
+#define PV855271 (1 << 1) /* PIC: use virt chan iff 64-bit device. */
+#define PV878674 (~0)     /* PIC: Dont allow 64bit PIOs.  permanent */
 #define PV855272 (1 << 1) /* PIC: runaway interrupt WAR */
 #define PV856155 (1 << 1) /* PIC: arbitration WAR */
 #define PV856864 (1 << 1) /* PIC: lower timeout to free TNUMs quicker */
@@ -315,6 +298,12 @@
 #define PV867308 (3 << 1) /* PIC: make LLP error interrupts FATAL for PIC */
 
 
+/* Bridgetype macros given a pcibr_soft structure */
+#define IS_PIC_SOFT(ps)     (ps->bs_bridge_type == PCIBR_BRIDGETYPE_PIC)
+#define IS_TIOCP_SOFT(ps)   (ps->bs_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
+#define IS_PIC_BUSNUM_SOFT(ps, bus)	\
+		(IS_PIC_SOFT(ps) && ((ps)->bs_busnum == (bus)))
+
 /* defines for pcibr_soft_s->bs_bridge_mode */
 #define PCIBR_BRIDGEMODE_PCI_33		0x0
 #define PCIBR_BRIDGEMODE_PCI_66		0x2
@@ -347,17 +336,18 @@
  */
 
 struct pcibr_soft_s {
-    vertex_hdl_t          bs_conn;		/* xtalk connection point */
-    vertex_hdl_t          bs_vhdl;		/* vertex owned by pcibr */
-    uint64_t                bs_int_enable;	/* Mask of enabled intrs */
-    bridge_t               *bs_base;		/* PIO pointer to Bridge chip */
-    char                   *bs_name;		/* hw graph name */
-    xwidgetnum_t            bs_xid;		/* Bridge's xtalk ID number */
-    vertex_hdl_t          bs_master;		/* xtalk master vertex */
-    xwidgetnum_t            bs_mxid;		/* master's xtalk ID number */
+    vertex_hdl_t            bs_conn;	/* xtalk connection point */
+    vertex_hdl_t            bs_vhdl;	/* vertex owned by pcibr */
+    pci_bridge_t	   *bs_base;	/* PIO pointer to Bridge chip */
+    char                   *bs_name;	/* hw graph name */
+    char		    bs_asic_name[16];	/* ASIC name */
+    xwidgetnum_t            bs_xid;	/* Bridge's xtalk ID number */
+    vertex_hdl_t            bs_master;	/* xtalk master vertex */
+    xwidgetnum_t            bs_mxid;	/* master's xtalk ID number */
     pciio_slot_t            bs_first_slot;      /* first existing slot */
     pciio_slot_t            bs_last_slot;       /* last existing slot */
     pciio_slot_t            bs_last_reset;      /* last slot to reset */
+    uint32_t		    bs_unused_slot;	/* unavailable slots bitmask */
     pciio_slot_t	    bs_min_slot;	/* lowest possible slot */
     pciio_slot_t	    bs_max_slot;	/* highest possible slot */
     pcibr_soft_t	    bs_peers_soft;	/* PICs other bus's soft */
@@ -367,18 +357,19 @@
     xwidgetnum_t	    bs_dir_xport;	/* xtalk port for 32-bit PCI direct map */
 
     struct resource	    bs_int_ate_resource;/* root resource for internal ATEs */
-    struct resource	    bs_ext_ate_resource;/* root resource for external ATEs */
     void	 	    *bs_allocated_ate_res;/* resource struct allocated */
     short		    bs_int_ate_size;	/* number of internal ates */
     short		    bs_bridge_type;	/* see defines above */
     short		    bs_bridge_mode;	/* see defines above */
-    int                     bs_rev_num;		/* revision number of Bridge */
+
+    int                     bs_rev_num;	/* revision number of Bridge */
 
     /* bs_dma_flags are the forced dma flags used on all DMAs. Used for
      * working around ASIC rev issues and protocol specific requirements
      */
     unsigned                bs_dma_flags;	/* forced DMA flags */
 
+    nasid_t		    bs_nasid;		/* nasid this bus is on */
     moduleid_t		    bs_moduleid;	/* io brick moduleid */
     short		    bs_bricktype;	/* io brick type */
 
@@ -388,8 +379,14 @@
      */
     spinlock_t              bs_lock;
     
+#ifdef PCI_HOTPLUG
+    /* Lock to serialize access to a PCI bus during hot-plug operations */
+    mrlock_t		    bs_bus_lock[1]; 
+#endif /* PCI_HOTPLUG */
+
     vertex_hdl_t	    bs_noslot_conn;	/* NO-SLOT connection point */
     pcibr_info_t	    bs_noslot_info;
+
     struct pcibr_soft_slot_s {
 	/* information we keep about each CFG slot */
 
@@ -445,7 +442,7 @@
 	/* Shadow value for Device(x) register,
 	 * so we don't have to go to the chip.
 	 */
-	bridgereg_t             bss_device;
+	uint64_t		bss_device;
 
 	/* Number of sets on GBR/REALTIME bit outstanding
 	 * Used by Priority I/O for tracking reservations
@@ -474,13 +471,6 @@
 	iopaddr_t		bss_d32_base;
 	unsigned		bss_d32_flags;
 
-	/* Shadow information used for implementing
-	 * Bridge Hardware WAR #484930
-	 */
-	atomic_t		bss_ext_ates_active;
-        volatile unsigned      *bss_cmd_pointer;
-	unsigned		bss_cmd_shadow;
-
     } bs_slot[8];
 
     pcibr_intr_bits_f	       *bs_intr_bits;
@@ -522,11 +512,12 @@
      *  time for the indexed slot/vchan number; array[slot][vchan]
      */
     int                     bs_rrb_fixed;
-    int                     bs_rrb_avail[2];
-    int                     bs_rrb_res[8];
-    int                     bs_rrb_res_dflt[8];
+    int			    bs_rrb_avail[2];
+    int			    bs_rrb_res[8];
+    int			    bs_rrb_res_dflt[8];
     int			    bs_rrb_valid[8][4];
     int			    bs_rrb_valid_dflt[8][4];
+
     struct {
 	/* Each Bridge interrupt bit has a single XIO
 	 * interrupt channel allocated.
@@ -537,6 +528,10 @@
 	 * Bridge interrupt bit.
 	 */
 	struct pcibr_intr_wrap_s  bsi_pcibr_intr_wrap;
+	/* The bus and interrupt bit, used for pcibr_setpciint().
+	 * The pci busnum is bit3, int_bits bit2:0
+	 */
+	uint32_t		bsi_int_bit;
 
     } bs_intr[8];
 
@@ -564,9 +559,6 @@
      */
     struct br_errintr_info {
 	int                     bserr_toutcnt;
-#ifdef LATER
-	toid_t                  bserr_toutid;	/* Timeout started by errintr */
-#endif	/* LATER */
 	iopaddr_t               bserr_addr;	/* Address where error occured */
 	uint64_t		bserr_intstat;	/* interrupts active at error dump */
     } bs_errinfo;
@@ -602,9 +594,9 @@
 #define PCIBR_BUS_ADDR_IO_FREED        2  /* Reserved PROM I/O addr freed */
 
     struct bs_errintr_stat_s {
-	uint32_t              bs_errcount_total;
-	uint32_t              bs_lasterr_timestamp;
-	uint32_t              bs_lasterr_snapshot;
+	uint32_t		bs_errcount_total;
+	uint32_t		bs_lasterr_timestamp;
+	uint32_t		bs_lasterr_snapshot;
     } bs_errintr_stat[PCIBR_ISR_MAX_ERRS];
 
     /*
@@ -620,6 +612,13 @@
      */
     unsigned		bs_pio_end_io;
     unsigned		bs_pio_end_mem;
+
+    /*
+     * Generic bus info.  Note that some/all of the information in this
+     * structure is not kept up to date in real time.  It will be filled in
+     * for each pcibr_businfo_get() call.
+     */
+    struct pciio_businfo_s	bs_businfo;
 };
 
 #define	PCIBR_ERRTIME_THRESHOLD		(100)
@@ -662,8 +661,8 @@
 /*
  * mem alloc/free macros
  */
-#define NEWAf(ptr,n,f)	(ptr = snia_kmem_zalloc((n)*sizeof (*(ptr)), (f&PCIIO_NOSLEEP)?KM_NOSLEEP:KM_SLEEP))
-#define NEWA(ptr,n)	(ptr = snia_kmem_zalloc((n)*sizeof (*(ptr)), KM_SLEEP))
+#define NEWAf(ptr,n,f)	(ptr = snia_kmem_zalloc((n)*sizeof (*(ptr))))
+#define NEWA(ptr,n)	(ptr = snia_kmem_zalloc((n)*sizeof (*(ptr))))
 #define DELA(ptr,n)	(kfree(ptr))
 
 #define NEWf(ptr,f)	NEWAf(ptr,1,f)
@@ -682,17 +681,17 @@
     size_t                  count;	/* size of PIO space */
 };
 
-/* Use io spin locks. This ensures that all the PIO writes from a particular
- * CPU to a particular IO device are synched before the start of the next
- * set of PIO operations to the same device.
- */
-#ifdef PCI_LATER
-#define pcibr_lock(pcibr_soft)		io_splock(pcibr_soft->bs_lock)
-#define pcibr_unlock(pcibr_soft, s)	io_spunlock(pcibr_soft->bs_lock,s)
-#else
-#define pcibr_lock(pcibr_soft)		1
-#define pcibr_unlock(pcibr_soft, s)	
-#endif	/* PCI_LATER */
+/* 
+ * pcibr_soft structure locking macros
+ */
+inline static unsigned long
+pcibr_lock(pcibr_soft_t pcibr_soft)
+{
+        unsigned long flag;
+        spin_lock_irqsave(&pcibr_soft->bs_lock, flag);
+        return(flag);
+}
+#define pcibr_unlock(pcibr_soft, flag)  spin_unlock_irqrestore(&pcibr_soft->bs_lock, flag)
 
 #define PCIBR_VALID_SLOT(ps, s)     (s < PCIBR_NUM_SLOTS(ps))
 #define PCIBR_D64_BASE_UNSET    (0xFFFFFFFFFFFFFFFF)
@@ -703,14 +702,13 @@
 #if PCIBR_SOFT_LIST
 typedef struct pcibr_list_s *pcibr_list_p;
 struct pcibr_list_s {
-	pcibr_list_p            bl_next;
-	pcibr_soft_t            bl_soft;
-	vertex_hdl_t          bl_vhdl;
+    pcibr_list_p            bl_next;
+    pcibr_soft_t            bl_soft;
+    vertex_hdl_t            bl_vhdl;
 };
 #endif /* PCIBR_SOFT_LIST */
 
-
-// Devices per widget: 2 buses, 2 slots per bus, 8 functions per slot.
+/* Devices per widget: 2 buses, 2 slots per bus, 8 functions per slot. */
 #define DEV_PER_WIDGET (2*2*8)
 
 struct sn_flush_device_list {
@@ -736,4 +734,132 @@
         unsigned long        iio_itte7;
 };
 
-#endif				/* _ASM_SN_PCI_PCIBR_PRIVATE_H */
+
+/*
+ * habeck: move this section of code to "porting" specific header file...
+ * maybe something like pcibr_porting.h???
+ */
+#define ATOMIC_INC(a)	atomic_inc(a)
+#define ATOMIC_DEC(a)	atomic_dec(a)
+#define ATOMIC_READ(a)	atomic_read(a)
+
+#define K_DEBUG		KERN_DEBUG
+#define K_CONT		KERN_INFO
+#define K_NOTE		KERN_NOTICE
+#define K_WARN		KERN_WARNING
+#define K_ERR		KERN_ERR
+#define K_CRIT		KERN_CRIT
+#define K_ALERT		KERN_ALERT
+#define K_EMER		KERN_EMERG
+
+#define KERN_MSG(type, msg...)		printk(type msg)
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern uint64_t		pcireg_id_get(void *ptr);
+extern uint64_t		pcireg_stat_get(void *);
+extern uint64_t		pcireg_bus_err_get(void *);
+extern uint64_t		pcireg_control_get(void *);
+extern void		pcireg_control_set(void *, uint64_t);
+extern void		pcireg_control_bit_clr(void *, uint64_t);
+extern void		pcireg_control_bit_set(void *, uint64_t);
+extern uint64_t		pcireg_req_timeout_get(void *);
+extern void		pcireg_req_timeout_set(void *, uint64_t);
+extern uint64_t		pcireg_intr_dst_get(void *);
+extern void		pcireg_intr_dst_set(void *, uint64_t);
+extern uint64_t		pcireg_intr_dst_target_id_get(void *);
+extern void		pcireg_intr_dst_target_id_set(void *, uint64_t);
+extern uint64_t		pcireg_intr_dst_addr_get(void *);
+extern void		pcireg_intr_dst_addr_set(void *, uint64_t);
+extern uint64_t		pcireg_cmdword_err_get(void *);
+extern uint64_t		pcireg_llp_cfg_get(void *);
+extern void		pcireg_llp_cfg_set(void *, uint64_t);
+extern uint64_t		pcireg_tflush_get(void *);
+extern uint64_t		pcireg_linkside_err_get(void *);
+extern uint64_t		pcireg_resp_err_get(void *);
+extern uint64_t		pcireg_resp_err_addr_get(void *);
+extern uint64_t		pcireg_resp_err_buf_get(void *);
+extern uint64_t		pcireg_resp_err_dev_get(void *);
+extern uint64_t		pcireg_linkside_err_addr_get(void *);
+extern uint64_t		pcireg_dirmap_get(void *);
+extern void		pcireg_dirmap_set(void *, uint64_t);
+extern void		pcireg_dirmap_wid_set(void *, uint64_t);
+extern void		pcireg_dirmap_diroff_set(void *, uint64_t);
+extern void		pcireg_dirmap_add512_set(void *);
+extern void		pcireg_dirmap_add512_clr(void *);
+extern uint64_t		pcireg_map_fault_get(void *);
+extern uint64_t		pcireg_ssram_parity_get(void *);
+extern uint64_t		pcireg_arbitration_get(void *);
+extern void		pcireg_arbitration_set(void *, uint64_t);
+extern void		pcireg_arbitration_bit_clr(void *, uint64_t);
+extern void		pcireg_arbitration_bit_set(void *, uint64_t);
+extern uint64_t		pcireg_parity_err_get(void *);
+extern uint64_t		pcireg_type1_cntr_get(void *);
+extern void		pcireg_type1_cntr_set(void *, uint64_t);
+extern uint64_t		pcireg_timeout_get(void *);
+extern void		pcireg_timeout_set(void *, uint64_t);
+extern void		pcireg_timeout_bit_clr(void *, uint64_t);
+extern void		pcireg_timeout_bit_set(void *, uint64_t);
+extern uint64_t		pcireg_pci_bus_addr_get(void *);
+extern uint64_t		pcireg_pci_bus_addr_addr_get(void *);
+extern uint64_t		pcireg_intr_status_get(void *);
+extern uint64_t		pcireg_intr_enable_get(void *);
+extern void		pcireg_intr_enable_set(void *, uint64_t);
+extern void		pcireg_intr_enable_bit_clr(void *, uint64_t);
+extern void		pcireg_intr_enable_bit_set(void *, uint64_t);
+extern void		pcireg_intr_reset_set(void *, uint64_t);
+extern void		pcireg_intr_reset_bit_set(void *, uint64_t);
+extern uint64_t		pcireg_intr_mode_get(void *);
+extern void		pcireg_intr_mode_set(void *, uint64_t);
+extern void		pcireg_intr_mode_bit_set(void *, uint64_t);
+extern void		pcireg_intr_mode_bit_clr(void *, uint64_t);
+extern uint64_t		pcireg_intr_device_get(void *);
+extern void		pcireg_intr_device_set(void *, uint64_t);
+extern void		pcireg_intr_device_bit_set(void *, uint64_t);
+extern void		pcireg_intr_device_bit_clr(void *, uint64_t);
+extern uint64_t		pcireg_intr_host_err_get(void *);
+extern void		pcireg_intr_host_err_set(void *, uint64_t);
+extern uint64_t		pcireg_intr_addr_get(void *, int);
+extern void		pcireg_intr_addr_set(void *, int, uint64_t);
+extern void *		pcireg_intr_addr_addr(void *, int);
+extern uint64_t		pcireg_intr_addr_vect_get(void *, int);
+extern void		pcireg_intr_addr_vect_set(void *, int, uint64_t);
+extern uint64_t		pcireg_intr_addr_addr_get(void *, int);
+extern void		pcireg_intr_addr_addr_set(void *, int, uint64_t);
+extern uint64_t		pcireg_intr_view_get(void *);
+extern uint64_t		pcireg_intr_multiple_get(void *);
+extern void		pcireg_force_always_set(void *, int);
+extern void *		pcireg_force_always_addr_get(void *, int);
+extern void		pcireg_force_intr_set(void *, int);
+extern uint64_t		pcireg_device_get(void *, int);
+extern void		pcireg_device_set(void *, int, uint64_t);
+extern void		pcireg_device_bit_set(void *, int, uint64_t);
+extern void		pcireg_device_bit_clr(void *, int, uint64_t);
+extern uint64_t		pcireg_rrb_get(void *, int);
+extern void		pcireg_rrb_set(void *, int, uint64_t);
+extern void		pcireg_rrb_bit_set(void *, int, uint64_t);
+extern void		pcireg_rrb_bit_clr(void *, int, uint64_t);
+extern uint64_t		pcireg_rrb_status_get(void *);
+extern void		pcireg_rrb_clear_set(void *, uint64_t);
+extern uint64_t		pcireg_wrb_flush_get(void *, int);
+extern uint64_t		pcireg_pcix_bus_err_addr_get(void *);
+extern uint64_t		pcireg_pcix_bus_err_attr_get(void *);
+extern uint64_t		pcireg_pcix_bus_err_data_get(void *);
+extern uint64_t		pcireg_pcix_req_err_attr_get(void *);
+extern uint64_t		pcireg_pcix_req_err_addr_get(void *);
+extern uint64_t		pcireg_pcix_pio_split_addr_get(void *);
+extern uint64_t		pcireg_pcix_pio_split_attr_get(void *);
+
+extern cfg_p		pcireg_type1_cfg_addr(void *, pciio_function_t,
+					      int);
+extern cfg_p		pcireg_type0_cfg_addr(void *, pciio_slot_t,
+					      pciio_function_t, int);
+extern bridge_ate_t	pcireg_int_ate_get(void *, int);
+extern void		pcireg_int_ate_set(void *, int, bridge_ate_t);
+extern bridge_ate_p	pcireg_int_ate_addr(void *, int);
+
+extern uint64_t		pcireg_speed_get(void *);
+extern uint64_t		pcireg_mode_get(void *);
+
+#endif				/* _ASM_IA64_SN_PCI_PCIBR_PRIVATE_H */
diff -Nru a/include/asm-ia64/sn/pci/pciio.h b/include/asm-ia64/sn/pci/pciio.h
--- a/include/asm-ia64/sn/pci/pciio.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pciio.h	Thu Nov  6 13:42:35 2003
@@ -1,29 +1,40 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PCIIO_H
-#define _ASM_SN_PCI_PCIIO_H
+#ifndef _ASM_IA64_SN_PCI_PCIIO_H
+#define _ASM_IA64_SN_PCI_PCIIO_H
 
 /*
  * pciio.h -- platform-independent PCI interface
  */
 
+#ifdef __KERNEL__
 #include <linux/config.h>
 #include <linux/ioport.h>
 #include <asm/sn/ioerror.h>
 #include <asm/sn/driver.h>
 #include <asm/sn/hcl.h>
-
+#else
+#include <linux/config.h>
+#include <linux/ioport.h>
+#include <ioerror.h>
+#include <driver.h>
+#include <hcl.h>
+#endif
 
 #ifndef __ASSEMBLY__
 
+#ifdef __KERNEL__
 #include <asm/sn/dmamap.h>
 #include <asm/sn/alenlist.h>
+#else
+#include <dmamap.h>
+#include <alenlist.h>
+#endif
 
 typedef int pciio_vendor_id_t;
 
@@ -40,6 +51,7 @@
 #define	PCIIO_SLOTS		((pciio_slot_t)32)
 #define	PCIIO_FUNCS		((pciio_function_t)8)
 
+#define PCIIO_BUS_NONE		((pciio_bus_t)255)	/* bus 255 reserved */
 #define	PCIIO_SLOT_NONE		((pciio_slot_t)255)
 #define	PCIIO_FUNC_NONE		((pciio_function_t)255)
 
@@ -141,6 +153,16 @@
  *	addresses xor-ed with 3 relative to what the
  *	device expects.
  *
+ * PCIIO_AGP_FASTWRITE
+ * PCIIO_AGP_NOFASTWRITE:
+ *	For AGP devices, enable or disable fast writes.  If FASTWRITE is to
+ *	be enabled, both the bus provider hardware and the AGP card must
+ *	support AGP fast writes.  It is an error to select both of these
+ * 	at the same time.  If neither is selected, the default mode is
+ *	undetermined.  If PCIIO_AGP_FASTWRITE is selected, but the card 
+ *	and/or provider hardware do not support fast writes, an error will
+ *	be returned at map allocation time.
+ *
  * NOTE: any "provider specific" flags that
  * conflict with the generic flags will
  * override the generic flags, locally
@@ -173,6 +195,12 @@
 #define	PCIIO_WORD_VALUES	0x2000	/* set BYTE SWAP for "word values" */
 
 /*
+ * Select AGP fast writes
+ */
+#define PCIIO_AGP_FASTWRITE	0x4000	/* select fast writes for PIO */
+#define PCIIO_AGP_NOFASTWRITE	0x8000	/* No fast writes */
+
+/*
  * Interface to deal with PCI endianness.
  * The driver calls pciio_endian_set once, supplying the actual endianness of
  * the device and the desired endianness.  On SGI systems, only use LITTLE if
@@ -188,18 +216,49 @@
 } pciio_endian_t;
 
 /*
- * Interface to set PCI arbitration priority for devices that require
- * realtime characteristics.  pciio_priority_set is used to switch a
- * device between the PCI high-priority arbitration ring and the low
- * priority arbitration ring.
- *
- * (Note: this is strictly for the PCI arbitrary priority.  It has
- * no direct relationship to GBR.)
- */
-typedef enum pciio_priority_e {
-    PCI_PRIO_LOW,
-    PCI_PRIO_HIGH
-} pciio_priority_t;
+ * Generic PCI bus information
+ */
+typedef enum pciio_asic_type_e {
+    PCIIO_ASIC_TYPE_UNKNOWN,
+    PCIIO_ASIC_TYPE_MACE,
+    PCIIO_ASIC_TYPE_BRIDGE,
+    PCIIO_ASIC_TYPE_XBRIDGE,
+    PCIIO_ASIC_TYPE_PIC,
+    PCIIO_ASIC_TYPE_TIOCP
+} pciio_asic_type_t;
+
+typedef enum pciio_bus_type_e {
+    PCIIO_BUS_TYPE_UNKNOWN,
+    PCIIO_BUS_TYPE_PCI,
+    PCIIO_BUS_TYPE_PCIX
+} pciio_bus_type_t;
+
+typedef enum pciio_bus_speed_e {
+    PCIIO_BUS_SPEED_UNKNOWN,
+    PCIIO_BUS_SPEED_33,
+    PCIIO_BUS_SPEED_66,
+    PCIIO_BUS_SPEED_100,
+    PCIIO_BUS_SPEED_133
+} pciio_bus_speed_t;
+
+#define PCIIO_GET_MULTI_MASTER(bi)   (pciio_businfo_multi_master_get(bi))
+#define PCIIO_GET_ASIC_TYPE(bi)      (pciio_businfo_asic_type_get(bi))
+#define PCIIO_GET_BUS_TYPE(bi)       (pciio_businfo_bus_type_get(bi))
+#define PCIIO_GET_BUS_SPEED(bi)      (pciio_businfo_bus_speed_get(bi))
+#define PCIIO_IS_BUS_TYPE_PCI(bi)    (PCIIO_BUS_TYPE_PCI == \
+                                      (pciio_businfo_bus_type_get(bi)))
+#define PCIIO_IS_BUS_TYPE_PCIX(bi)   (PCIIO_BUS_TYPE_PCIX == \
+                                      (pciio_businfo_bus_type_get(bi)))
+#define PCIIO_IS_BUS_SPEED_33(bi)    (PCIIO_BUS_SPEED_33 == \
+                                      (pciio_businfo_bus_speed_get(bi)))
+#define PCIIO_IS_BUS_SPEED_66(bi)    (PCIIO_BUS_SPEED_66 == \
+                                      (pciio_businfo_bus_speed_get(bi)))
+#define PCIIO_IS_BUS_SPEED_100(bi)   (PCIIO_BUS_SPEED_100 == \
+                                      (pciio_businfo_bus_speed_get(bi)))
+#define PCIIO_IS_BUS_SPEED_133(bi)   (PCIIO_BUS_SPEED_133 == \
+                                      (pciio_businfo_bus_speed_get(bi)))
+
+
 
 /*
  * handles of various sorts
@@ -209,9 +268,12 @@
 typedef struct pciio_intr_s *pciio_intr_t;
 typedef struct pciio_info_s *pciio_info_t;
 typedef struct pciio_piospace_s *pciio_piospace_t;
+typedef struct pciio_ppb_s *pciio_ppb_t;
 typedef struct pciio_win_info_s *pciio_win_info_t;
 typedef struct pciio_win_map_s *pciio_win_map_t;
 typedef struct pciio_win_alloc_s *pciio_win_alloc_t;
+typedef struct pciio_bus_map_s *pciio_bus_map_t;
+typedef struct pciio_businfo_s *pciio_businfo_t;
 
 /* PIO MANAGEMENT */
 
@@ -396,18 +458,11 @@
 typedef int	
 pciio_reset_f		(vertex_hdl_t conn);	/* pci connection point */
 
-typedef int
-pciio_write_gather_flush_f (vertex_hdl_t dev);    /* Device flushing buffers */
-
 typedef pciio_endian_t			/* actual endianness */
 pciio_endian_set_f      (vertex_hdl_t dev,	/* specify endianness for this device */
 			 pciio_endian_t device_end,	/* endianness of device */
 			 pciio_endian_t desired_end);	/* desired endianness */
 
-typedef pciio_priority_t
-pciio_priority_set_f    (vertex_hdl_t pcicard,
-			 pciio_priority_t device_prio);
-
 typedef uint64_t
 pciio_config_get_f	(vertex_hdl_t conn,	/* pci connection point */
 			 unsigned reg,		/* register byte offset */
@@ -442,8 +497,8 @@
 typedef int
 pciio_device_unregister_f	(vertex_hdl_t conn);
 
-typedef int
-pciio_dma_enabled_f		(vertex_hdl_t conn);
+typedef pciio_businfo_t
+pciio_businfo_get_f		(vertex_hdl_t conn);
 
 /*
  * Adapters that provide a PCI interface adhere to this software interface.
@@ -479,9 +534,7 @@
     pciio_provider_startup_f *provider_startup;
     pciio_provider_shutdown_f *provider_shutdown;
     pciio_reset_f	   *reset;
-    pciio_write_gather_flush_f *write_gather_flush;
     pciio_endian_set_f     *endian_set;
-    pciio_priority_set_f   *priority_set;
     pciio_config_get_f	   *config_get;
     pciio_config_set_f	   *config_set;
 
@@ -493,7 +546,9 @@
     pciio_driver_reg_callback_f *driver_reg_callback;
     pciio_driver_unreg_callback_f *driver_unreg_callback;
     pciio_device_unregister_f 	*device_unregister;
-    pciio_dma_enabled_f		*dma_enabled;
+
+    /* GENERIC BUS INFO */
+    pciio_businfo_get_f *businfo_get;
 } pciio_provider_t;
 
 /* PCI devices use these standard PCI provider interfaces */
@@ -521,13 +576,9 @@
 extern pciio_provider_startup_f pciio_provider_startup;
 extern pciio_provider_shutdown_f pciio_provider_shutdown;
 extern pciio_reset_f pciio_reset;
-extern pciio_write_gather_flush_f pciio_write_gather_flush;
 extern pciio_endian_set_f pciio_endian_set;
-extern pciio_priority_set_f pciio_priority_set;
 extern pciio_config_get_f pciio_config_get;
 extern pciio_config_set_f pciio_config_set;
-extern pciio_error_devenable_f pciio_error_devenable;
-extern pciio_error_extract_f pciio_error_extract;
 
 /* Widgetdev in the IOERROR structure is encoded as follows.
  *	+---------------------------+
@@ -693,10 +744,15 @@
 extern size_t		pciio_info_rom_size_get(pciio_info_t);
 extern int		pciio_info_type1_get(pciio_info_t);
 extern int              pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
-extern int		pciio_dma_enabled(vertex_hdl_t);
+extern pciio_businfo_t	pciio_businfo_get(vertex_hdl_t);
+extern int               pciio_businfo_multi_master_get(pciio_businfo_t businfo);
+extern pciio_asic_type_t pciio_businfo_asic_type_get(pciio_businfo_t businfo);
+extern pciio_bus_type_t  pciio_businfo_bus_type_get(pciio_businfo_t businfo);
+extern pciio_bus_speed_t pciio_businfo_bus_speed_get(pciio_businfo_t businfo);
+extern void		pciio_latency_set(vertex_hdl_t, uchar_t);
 
 /**
- * sn_pci_set_vchan - Set the requested Virtual Channel bits into the mapped DMA
+ * sn_pci_set_vchan - Set the requested Virtual Channel bits into the mapped DMA 
  *                    address.
  * @pci_dev: pci device pointer
  * @addr: mapped dma address
@@ -704,24 +760,23 @@
  *
  * Set the Virtual Channel bit in the mapped dma address.
  */
-
 static inline int
 sn_pci_set_vchan(struct pci_dev *pci_dev,
-		 dma_addr_t *addr,
-		 int vchan)
+	dma_addr_t *addr,
+	int vchan)
 {
+
 	if (vchan > 1) {
 		return -1;
 	}
 
-	if (!(*addr >> 32))     /* Using a mask here would be cleaner */
-		return 0;       /* but this generates better code */
+	if (!(*addr >> 32))	/* Using a mask here would be cleaner */
+		return 0;	/* but this generates better code */
 
 	if (vchan == 1) {
 		/* Set Bit 57 */
 		*addr |= (1UL << 57);
-	}
-	else {
+	} else {
 		/* Clear Bit 57 */
 		*addr &= ~(1UL << 57);
 	}
@@ -730,4 +785,30 @@
 }
 
 #endif				/* C or C++ */
-#endif				/* _ASM_SN_PCI_PCIIO_H */
+
+
+/*
+ * Prototypes
+ */
+
+int snia_badaddr_val(volatile void *addr, int len, volatile void *ptr);
+nasid_t snia_get_console_nasid(void);
+nasid_t snia_get_master_baseio_nasid(void);
+void snia_ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror);
+/* XXX: should probably be called __sn2_pci_rrb_alloc */
+int snia_pcibr_rrb_alloc(struct pci_dev *pci_dev, int *count_vchan0, int *count_vchan1);
+pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev,
+	pciio_endian_t device_end, pciio_endian_t desired_end);
+iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, device_desc_t dev_desc,
+	paddr_t paddr, size_t byte_count, unsigned flags);
+pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *pci_dev,
+	device_desc_t dev_desc, size_t byte_count_max, unsigned flags);
+void snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap);
+iopaddr_t snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, paddr_t paddr,
+	size_t byte_count);
+void snia_pciio_dmamap_done(pciio_dmamap_t pciio_dmamap);
+void *snia_kmem_zalloc(size_t size);
+unsigned int snia_msi_alloc(struct pci_dev *pci_dev, int number_requested, unsigned int *irqs);
+unsigned int snia_msix_alloc(struct pci_dev *pci_dev, int number_requested, unsigned int *irqs);
+
+#endif				/* _ASM_IA64_SN_PCI_PCIIO_H */
diff -Nru a/include/asm-ia64/sn/pci/pciio_private.h b/include/asm-ia64/sn/pci/pciio_private.h
--- a/include/asm-ia64/sn/pci/pciio_private.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pciio_private.h	Thu Nov  6 13:42:35 2003
@@ -1,13 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PCIIO_PRIVATE_H
-#define _ASM_SN_PCI_PCIIO_PRIVATE_H
+#ifndef _ASM_IA64_SN_PCI_PCIIO_PRIVATE_H
+#define _ASM_IA64_SN_PCI_PCIIO_PRIVATE_H
 
 #include <asm/sn/pci/pciio.h>
 #include <asm/sn/pci/pci_defs.h>
@@ -17,8 +16,6 @@
  * PCI drivers should NOT include this file.
  */
 
-#ident "sys/PCI/pciio_private: $Revision: 1.13 $"
-
 /*
  * All PCI providers set up PIO using this information.
  */
@@ -62,6 +59,70 @@
 #define PCIIO_INTR_NOTHREAD	2	/* interrupt handler wants to be called at interrupt level */
 
 /*
+ * Generic PCI bus information
+ */
+struct pciio_businfo_s {
+    int                 bi_multi_master;/* Bus provider supports multiple */
+                                        /* dma masters behind a single slot. */
+                                        /* Needed to work around a thrashing */
+                                        /* issue in SGI Bridge ASIC and */
+                                        /* its derivatives. */
+    pciio_asic_type_t   bi_asic_type;   /* PCI ASIC type */
+    pciio_bus_type_t    bi_bus_type;    /* PCI bus type */
+    pciio_bus_speed_t   bi_bus_speed;   /* PCI bus speed */
+};
+
+/*
+ * PPB specific parameters
+ *
+ * Note:  This is currently only relavent for Bridge and its decendents.  Mace
+ * PCI handles PPB's differently.
+ */
+
+typedef struct pciio_ppbspace_s {
+	uint64_t	base;
+	uint64_t	size;
+	uint64_t	next;
+} pciio_ppbspace_t;
+
+struct pciio_ppb_s {
+    /* vhdl, linkages, etc. */
+
+    vertex_hdl_t	    b_vhdl;
+    pciio_info_t	    b_pciio;	/* back ptr to pciio info */
+    pciio_info_t	    b_fns;	/* funs on secondary bus */
+
+    /* bus numbering information */
+
+    pciio_bus_t		    b_primary;
+    pciio_bus_t		    b_secondary;
+    pciio_bus_t		    b_subordinate;
+
+    /* base/limit information */
+
+    pciio_ppbspace_t	    b_io;
+    pciio_ppbspace_t	    b_mem;
+    pciio_ppbspace_t	    b_mempf;
+
+    /* Misc info */
+
+    uint16_t		    b_master_sec;	/* # funcs with DMA master */
+						/* ability on secondary bus */
+    uint16_t		    b_master_sub;	/* ditto for all subordinate */
+					  	/* busses. */
+    struct pciio_businfo_s  b_businfo;		/* generic secondary bus info */
+};
+
+/*
+ * Structure to keep track of PCI bus numbers which are available for
+ * assignment to secondary busses of PPB's per provider.
+ */
+
+struct pciio_bus_map_s {
+	uint32_t	bm_map[8];	/* bus number allocation map */
+};
+
+/*
  * Some PCI provider implementations keep track of PCI window Base Address
  * Register (BAR) address range assignment via the rmalloc()/rmfree() arena
  * management routines.  These implementations use the following data
@@ -101,6 +162,7 @@
 struct pciio_info_s {
     char                   *c_fingerprint;
     vertex_hdl_t            c_vertex;	/* back pointer to vertex */
+    vertex_hdl_t	    c_hostvertex;/* top most device in tree */
     pciio_bus_t             c_bus;	/* which bus the card is in */
     pciio_slot_t            c_slot;	/* which slot the card is in */
     pciio_function_t        c_func;	/* which func (on multi-func cards) */
@@ -114,6 +176,8 @@
 
     struct pciio_win_info_s {           /* state of BASE regs */
         pciio_space_t           w_space;
+        char                    w_code;		/* low 4 bits of MEM BAR */
+						/* low 2 bits of IO BAR */
         iopaddr_t               w_base;
         size_t                  w_size;
         int                     w_devio_index;   /* DevIO[] register used to
@@ -124,7 +188,18 @@
 #define c_rbase		c_rwindow.w_base		/* EXPANSION ROM base addr */
 #define c_rsize		c_rwindow.w_size		/* EXPANSION ROM size (bytes) */
     pciio_piospace_t	    c_piospace;	/* additional I/O spaces allocated */
+
+
+
+    pciio_info_t	    c_forw;	/* next function on this bus */
+					/* sorted by slot/func */
+    pciio_info_t	    c_back;	/* prev function on this bus */
+
+    pciio_ppb_t		    c_ppb;	/* ppb info if dev is a ppb */
+    pciio_ppb_t		    c_parent_ppb;	/* ppb dev is under if */
+						/* c_bus != 0 */
+    int			    c_type1;	/* use type1 addressing */
 };
 
 extern char             pciio_info_fingerprint[];
-#endif				/* _ASM_SN_PCI_PCIIO_PRIVATE_H */
+#endif				/* _ASM_IA64_SN_PCI_PCIIO_PRIVATE_H */
diff -Nru a/include/asm-ia64/sn/pci/pic.h b/include/asm-ia64/sn/pci/pic.h
--- a/include/asm-ia64/sn/pci/pic.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pci/pic.h	Thu Nov  6 13:42:35 2003
@@ -1,22 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
-#ifndef _ASM_SN_PCI_PIC_H
-#define _ASM_SN_PCI_PIC_H
-
-
-/*
- * The PIC ASIC is a follow-on to the Bridge and Xbridge ASICs.
- * It shares many of the same registers as those chips and therefore
- * the primary structure for the PIC will be bridge_s as defined
- * in irix/kern/sys/PCI/bridge.h.   This file is intended as a complement
- * to bridge.h, which includes this file.  
- */
+#ifndef _ASM_IA64_SN_PCI_PIC_H
+#define _ASM_IA64_SN_PCI_PIC_H
 
 /*
  * PIC AS DEVICE ZERO
@@ -66,1936 +56,593 @@
  */
 
 
-#ifndef __ASSEMBLY__
-
-#ifdef __cplusplus
-extern "C" {
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <asm/sn/xtalk/xwidget.h>	/* generic widget header */
+#else
+#include <linux/config.h>
+#include <xtalk/xwidget.h>
 #endif
 
-// #include <sys/types.h>
-#include <asm/sn/pci/pciio.h>
-
-
-/*********************************************************************
- *    bus provider function table
- *
- *	Normally, this table is only handed off explicitly
- *	during provider initialization, and the PCI generic
- *	layer will stash a pointer to it in the vertex; however,
- *	exporting it explicitly enables a performance hack in
- *	the generic PCI provider where if we know at compile
- *	time that the only possible PCI provider is a
- *	pcibr, we can go directly to this ops table.
- */
-
-extern pciio_provider_t pci_pic_provider;
-
+/*****************************************************************************
+ *************************** PIC PART & REV DEFINES **************************
+ *****************************************************************************/
 
-/*********************************************************************
- * misc defines
- *
- */
 #define PIC_WIDGET_PART_NUM_BUS0 0xd102
 #define PIC_WIDGET_PART_NUM_BUS1 0xd112
 #define PIC_WIDGET_MFGR_NUM 0x24
 #define PIC_WIDGET_REV_A  0x1
+#define PIC_WIDGET_REV_B  0x2
+#define PIC_WIDGET_REV_C  0x3
+
+#define IS_PIC_BUS0(wid) (XWIDGET_PART_NUM(wid) == PIC_WIDGET_PART_NUM_BUS0 && \
+			XWIDGET_MFG_NUM(wid) == PIC_WIDGET_MFGR_NUM)
+#define IS_PIC_BUS1(wid) (XWIDGET_PART_NUM(wid) == PIC_WIDGET_PART_NUM_BUS1 && \
+			XWIDGET_MFG_NUM(wid) == PIC_WIDGET_MFGR_NUM)
+#define IS_PIC_BRIDGE(wid) (IS_PIC_BUS0(wid) || IS_PIC_BUS1(wid))
+
 
 #define IS_PIC_PART_REV_A(rev) \
 	((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_A)) || \
 	(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_A)))
+#define IS_PIC_PART_REV_B(rev) \
+	((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_B)) || \
+	(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_B)))
+#define IS_PIC_PART_REV_C(rev) \
+	((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_C)) || \
+	(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_C)))
 
-/*********************************************************************
- * register offset defines
- *
- */
-	/* Identification Register  -- read-only */
-#define PIC_IDENTIFICATION 0x00000000
-
-	/* Status Register  -- read-only */
-#define PIC_STATUS 0x00000008
-
-	/* Upper Address Holding Register Bus Side Errors  -- read-only */
-#define PIC_UPPER_ADDR_REG_BUS_SIDE_ERRS 0x00000010
-
-	/* Lower Address Holding Register Bus Side Errors  -- read-only */
-#define PIC_LOWER_ADDR_REG_BUS_SIDE_ERRS 0x00000018
-
-	/* Control Register  -- read/write */
-#define PIC_CONTROL 0x00000020
-
-	/* PCI Request Time-out Value Register  -- read/write */
-#define PIC_PCI_REQ_TIME_OUT_VALUE 0x00000028
-
-	/* Interrupt Destination Upper Address Register  -- read/write */
-#define PIC_INTR_DEST_UPPER_ADDR 0x00000030
-
-	/* Interrupt Destination Lower Address Register  -- read/write */
-#define PIC_INTR_DEST_LOWER_ADDR 0x00000038
-
-	/* Command Word Holding Register Bus Side  -- read-only */
-#define PIC_CMD_WORD_REG_BUS_SIDE 0x00000040
-
-	/* LLP Configuration Register (Bus 0 Only)  -- read/write */
-#define PIC_LLP_CFG_REG_(BUS_0_ONLY) 0x00000048
-
-	/* PCI Target Flush Register  -- read-only */
-#define PIC_PCI_TARGET_FLUSH 0x00000050
-
-	/* Command Word Holding Register Link Side  -- read-only */
-#define PIC_CMD_WORD_REG_LINK_SIDE 0x00000058
-
-	/* Response Buffer Error Upper Address Holding  -- read-only */
-#define PIC_RESP_BUF_ERR_UPPER_ADDR_ 0x00000060
-
-	/* Response Buffer Error Lower Address Holding  -- read-only */
-#define PIC_RESP_BUF_ERR_LOWER_ADDR_ 0x00000068
-
-	/* Test Pin Control Register  -- read/write */
-#define PIC_TEST_PIN_CONTROL 0x00000070
-
-	/* Address Holding Register Link Side Errors  -- read-only */
-#define PIC_ADDR_REG_LINK_SIDE_ERRS 0x00000078
-
-	/* Direct Map Register  -- read/write */
-#define PIC_DIRECT_MAP 0x00000080
-
-	/* PCI Map Fault Address Register  -- read-only */
-#define PIC_PCI_MAP_FAULT_ADDR 0x00000090
-
-	/* Arbitration Priority Register  -- read/write */
-#define PIC_ARBITRATION_PRIORITY 0x000000A0
-
-	/* Internal Ram Parity Error Register  -- read-only */
-#define PIC_INTERNAL_RAM_PARITY_ERR 0x000000B0
-
-	/* PCI Time-out Register  -- read/write */
-#define PIC_PCI_TIME_OUT 0x000000C0
-
-	/* PCI Type 1 Configuration Register  -- read/write */
-#define PIC_PCI_TYPE_1_CFG 0x000000C8
-
-	/* PCI Bus Error Upper Address Holding Register  -- read-only */
-#define PIC_PCI_BUS_ERR_UPPER_ADDR_ 0x000000D0
-
-	/* PCI Bus Error Lower Address Holding Register  -- read-only */
-#define PIC_PCI_BUS_ERR_LOWER_ADDR_ 0x000000D8
-
-	/* PCIX Error Address Register  -- read-only */
-#define PIC_PCIX_ERR_ADDR 0x000000E0
-
-	/* PCIX Error Attribute Register  -- read-only */
-#define PIC_PCIX_ERR_ATTRIBUTE 0x000000E8
-
-	/* PCIX Error Data Register  -- read-only */
-#define PIC_PCIX_ERR_DATA 0x000000F0
-
-	/* PCIX Read Request Timeout Error Register  -- read-only */
-#define PIC_PCIX_READ_REQ_TIMEOUT_ERR 0x000000F8
-
-	/* Interrupt Status Register  -- read-only */
-#define PIC_INTR_STATUS 0x00000100
-
-	/* Interrupt Enable Register  -- read/write */
-#define PIC_INTR_ENABLE 0x00000108
-
-	/* Reset Interrupt Status Register  -- write-only */
-#define PIC_RESET_INTR_STATUS 0x00000110
-
-	/* Interrupt Mode Register  -- read/write */
-#define PIC_INTR_MODE 0x00000118
-
-	/* Interrupt Device Register  -- read/write */
-#define PIC_INTR_DEVICE 0x00000120
-
-	/* Host Error Field Register  -- read/write */
-#define PIC_HOST_ERR_FIELD 0x00000128
-
-	/* Interrupt Pin 0 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_0_HOST_ADDR 0x00000130
-
-	/* Interrupt Pin 1 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_1_HOST_ADDR 0x00000138
-
-	/* Interrupt Pin 2 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_2_HOST_ADDR 0x00000140
-
-	/* Interrupt Pin 3 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_3_HOST_ADDR 0x00000148
-
-	/* Interrupt Pin 4 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_4_HOST_ADDR 0x00000150
-
-	/* Interrupt Pin 5 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_5_HOST_ADDR 0x00000158
-
-	/* Interrupt Pin 6 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_6_HOST_ADDR 0x00000160
-
-	/* Interrupt Pin 7 Host Address Register  -- read/write */
-#define PIC_INTR_PIN_7_HOST_ADDR 0x00000168
-
-	/* Error Interrupt View Register  -- read-only */
-#define PIC_ERR_INTR_VIEW 0x00000170
-
-	/* Multiple Interrupt Register  -- read-only */
-#define PIC_MULTIPLE_INTR 0x00000178
-
-	/* Force Always Interrupt 0 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_0 0x00000180
-
-	/* Force Always Interrupt 1 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_1 0x00000188
-
-	/* Force Always Interrupt 2 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_2 0x00000190
-
-	/* Force Always Interrupt 3 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_3 0x00000198
-
-	/* Force Always Interrupt 4 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_4 0x000001A0
-
-	/* Force Always Interrupt 5 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_5 0x000001A8
-
-	/* Force Always Interrupt 6 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_6 0x000001B0
-
-	/* Force Always Interrupt 7 Register  -- write-only */
-#define PIC_FORCE_ALWAYS_INTR_7 0x000001B8
-
-	/* Force w/Pin Interrupt 0 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_0 0x000001C0
-
-	/* Force w/Pin Interrupt 1 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_1 0x000001C8
-
-	/* Force w/Pin Interrupt 2 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_2 0x000001D0
-
-	/* Force w/Pin Interrupt 3 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_3 0x000001D8
-
-	/* Force w/Pin Interrupt 4 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_4 0x000001E0
-
-	/* Force w/Pin Interrupt 5 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_5 0x000001E8
-
-	/* Force w/Pin Interrupt 6 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_6 0x000001F0
-
-	/* Force w/Pin Interrupt 7 Register  -- write-only */
-#define PIC_FORCE_PIN_INTR_7 0x000001F8
-
-	/* Device 0 Register  -- read/write */
-#define PIC_DEVICE_0 0x00000200
-
-	/* Device 1 Register  -- read/write */
-#define PIC_DEVICE_1 0x00000208
-
-	/* Device 2 Register  -- read/write */
-#define PIC_DEVICE_2 0x00000210
-
-	/* Device 3 Register  -- read/write */
-#define PIC_DEVICE_3 0x00000218
-
-	/* Device 0 Write Request Buffer Register  -- read-only */
-#define PIC_DEVICE_0_WRITE_REQ_BUF 0x00000240
-
-	/* Device 1 Write Request Buffer Register  -- read-only */
-#define PIC_DEVICE_1_WRITE_REQ_BUF 0x00000248
-
-	/* Device 2 Write Request Buffer Register  -- read-only */
-#define PIC_DEVICE_2_WRITE_REQ_BUF 0x00000250
-
-	/* Device 3 Write Request Buffer Register  -- read-only */
-#define PIC_DEVICE_3_WRITE_REQ_BUF 0x00000258
-
-	/* Even Device Response Buffer Register  -- read/write */
-#define PIC_EVEN_DEVICE_RESP_BUF 0x00000280
-
-	/* Odd Device Response Buffer Register  -- read/write */
-#define PIC_ODD_DEVICE_RESP_BUF 0x00000288
-
-	/* Read Response Buffer Status Register  -- read-only */
-#define PIC_READ_RESP_BUF_STATUS 0x00000290
-
-	/* Read Response Buffer Clear Register  -- write-only */
-#define PIC_READ_RESP_BUF_CLEAR 0x00000298
-
-	/* PCI RR 0 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_0_UPPER_ADDR_MATCH 0x00000300
-
-	/* PCI RR 0 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_0_LOWER_ADDR_MATCH 0x00000308
-
-	/* PCI RR 1 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_1_UPPER_ADDR_MATCH 0x00000310
-
-	/* PCI RR 1 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_1_LOWER_ADDR_MATCH 0x00000318
-
-	/* PCI RR 2 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_2_UPPER_ADDR_MATCH 0x00000320
-
-	/* PCI RR 2 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_2_LOWER_ADDR_MATCH 0x00000328
-
-	/* PCI RR 3 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_3_UPPER_ADDR_MATCH 0x00000330
-
-	/* PCI RR 3 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_3_LOWER_ADDR_MATCH 0x00000338
-
-	/* PCI RR 4 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_4_UPPER_ADDR_MATCH 0x00000340
-
-	/* PCI RR 4 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_4_LOWER_ADDR_MATCH 0x00000348
-
-	/* PCI RR 5 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_5_UPPER_ADDR_MATCH 0x00000350
-
-	/* PCI RR 5 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_5_LOWER_ADDR_MATCH 0x00000358
-
-	/* PCI RR 6 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_6_UPPER_ADDR_MATCH 0x00000360
-
-	/* PCI RR 6 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_6_LOWER_ADDR_MATCH 0x00000368
-
-	/* PCI RR 7 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_7_UPPER_ADDR_MATCH 0x00000370
-
-	/* PCI RR 7 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_7_LOWER_ADDR_MATCH 0x00000378
-
-	/* PCI RR 8 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_8_UPPER_ADDR_MATCH 0x00000380
-
-	/* PCI RR 8 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_8_LOWER_ADDR_MATCH 0x00000388
-
-	/* PCI RR 9 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_9_UPPER_ADDR_MATCH 0x00000390
-
-	/* PCI RR 9 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_9_LOWER_ADDR_MATCH 0x00000398
-
-	/* PCI RR 10 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_10_UPPER_ADDR_MATCH 0x000003A0
-
-	/* PCI RR 10 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_10_LOWER_ADDR_MATCH 0x000003A8
-
-	/* PCI RR 11 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_11_UPPER_ADDR_MATCH 0x000003B0
-
-	/* PCI RR 11 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_11_LOWER_ADDR_MATCH 0x000003B8
-
-	/* PCI RR 12 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_12_UPPER_ADDR_MATCH 0x000003C0
-
-	/* PCI RR 12 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_12_LOWER_ADDR_MATCH 0x000003C8
-
-	/* PCI RR 13 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_13_UPPER_ADDR_MATCH 0x000003D0
-
-	/* PCI RR 13 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_13_LOWER_ADDR_MATCH 0x000003D8
-
-	/* PCI RR 14 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_14_UPPER_ADDR_MATCH 0x000003E0
-
-	/* PCI RR 14 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_14_LOWER_ADDR_MATCH 0x000003E8
-
-	/* PCI RR 15 Upper Address Match Register  -- read-only */
-#define PIC_PCI_RR_15_UPPER_ADDR_MATCH 0x000003F0
-
-	/* PCI RR 15 Lower Address Match Register  -- read-only */
-#define PIC_PCI_RR_15_LOWER_ADDR_MATCH 0x000003F8
-
-	/* Buffer 0 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_0_FLUSH_CNT_WITH_DATA_TOUCH 0x00000400
-
-	/* Buffer 0 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_0_FLUSH_CNT_W_O_DATA_TOUCH 0x00000408
-
-	/* Buffer 0 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_0_REQ_IN_FLIGHT_CNT 0x00000410
-
-	/* Buffer 0 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_0_PREFETCH_REQ_CNT 0x00000418
-
-	/* Buffer 0 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_0_TOTAL_PCI_RETRY_CNT 0x00000420
-
-	/* Buffer 0 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_0_MAX_PCI_RETRY_CNT 0x00000428
-
-	/* Buffer 0 Max Latency Count Register  -- read/write */
-#define PIC_BUF_0_MAX_LATENCY_CNT 0x00000430
-
-	/* Buffer 0 Clear All Register  -- read/write */
-#define PIC_BUF_0_CLEAR_ALL 0x00000438
-
-	/* Buffer 2 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_2_FLUSH_CNT_WITH_DATA_TOUCH 0x00000440
-
-	/* Buffer 2 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_2_FLUSH_CNT_W_O_DATA_TOUCH 0x00000448
-
-	/* Buffer 2 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_2_REQ_IN_FLIGHT_CNT 0x00000450
-
-	/* Buffer 2 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_2_PREFETCH_REQ_CNT 0x00000458
-
-	/* Buffer 2 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_2_TOTAL_PCI_RETRY_CNT 0x00000460
-
-	/* Buffer 2 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_2_MAX_PCI_RETRY_CNT 0x00000468
-
-	/* Buffer 2 Max Latency Count Register  -- read/write */
-#define PIC_BUF_2_MAX_LATENCY_CNT 0x00000470
-
-	/* Buffer 2 Clear All Register  -- read/write */
-#define PIC_BUF_2_CLEAR_ALL 0x00000478
-
-	/* Buffer 4 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_4_FLUSH_CNT_WITH_DATA_TOUCH 0x00000480
-
-	/* Buffer 4 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_4_FLUSH_CNT_W_O_DATA_TOUCH 0x00000488
-
-	/* Buffer 4 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_4_REQ_IN_FLIGHT_CNT 0x00000490
-
-	/* Buffer 4 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_4_PREFETCH_REQ_CNT 0x00000498
-
-	/* Buffer 4 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_4_TOTAL_PCI_RETRY_CNT 0x000004A0
-
-	/* Buffer 4 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_4_MAX_PCI_RETRY_CNT 0x000004A8
-
-	/* Buffer 4 Max Latency Count Register  -- read/write */
-#define PIC_BUF_4_MAX_LATENCY_CNT 0x000004B0
-
-	/* Buffer 4 Clear All Register  -- read/write */
-#define PIC_BUF_4_CLEAR_ALL 0x000004B8
-
-	/* Buffer 6 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_6_FLUSH_CNT_WITH_DATA_TOUCH 0x000004C0
-
-	/* Buffer 6 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_6_FLUSH_CNT_W_O_DATA_TOUCH 0x000004C8
-
-	/* Buffer 6 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_6_REQ_IN_FLIGHT_CNT 0x000004D0
-
-	/* Buffer 6 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_6_PREFETCH_REQ_CNT 0x000004D8
-
-	/* Buffer 6 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_6_TOTAL_PCI_RETRY_CNT 0x000004E0
-
-	/* Buffer 6 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_6_MAX_PCI_RETRY_CNT 0x000004E8
-
-	/* Buffer 6 Max Latency Count Register  -- read/write */
-#define PIC_BUF_6_MAX_LATENCY_CNT 0x000004F0
-
-	/* Buffer 6 Clear All Register  -- read/write */
-#define PIC_BUF_6_CLEAR_ALL 0x000004F8
-
-	/* Buffer 8 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_8_FLUSH_CNT_WITH_DATA_TOUCH 0x00000500
-
-	/* Buffer 8 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_8_FLUSH_CNT_W_O_DATA_TOUCH 0x00000508
-
-	/* Buffer 8 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_8_REQ_IN_FLIGHT_CNT 0x00000510
-
-	/* Buffer 8 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_8_PREFETCH_REQ_CNT 0x00000518
-
-	/* Buffer 8 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_8_TOTAL_PCI_RETRY_CNT 0x00000520
-
-	/* Buffer 8 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_8_MAX_PCI_RETRY_CNT 0x00000528
-
-	/* Buffer 8 Max Latency Count Register  -- read/write */
-#define PIC_BUF_8_MAX_LATENCY_CNT 0x00000530
-
-	/* Buffer 8 Clear All Register  -- read/write */
-#define PIC_BUF_8_CLEAR_ALL 0x00000538
-
-	/* Buffer 10 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_10_FLUSH_CNT_WITH_DATA_TOUCH 0x00000540
-
-	/* Buffer 10 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_10_FLUSH_CNT_W_O_DATA_TOUCH 0x00000548
-
-	/* Buffer 10 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_10_REQ_IN_FLIGHT_CNT 0x00000550
-
-	/* Buffer 10 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_10_PREFETCH_REQ_CNT 0x00000558
-
-	/* Buffer 10 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_10_TOTAL_PCI_RETRY_CNT 0x00000560
-
-	/* Buffer 10 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_10_MAX_PCI_RETRY_CNT 0x00000568
-
-	/* Buffer 10 Max Latency Count Register  -- read/write */
-#define PIC_BUF_10_MAX_LATENCY_CNT 0x00000570
-
-	/* Buffer 10 Clear All Register  -- read/write */
-#define PIC_BUF_10_CLEAR_ALL 0x00000578
-
-	/* Buffer 12 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_12_FLUSH_CNT_WITH_DATA_TOUCH 0x00000580
-
-	/* Buffer 12 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_12_FLUSH_CNT_W_O_DATA_TOUCH 0x00000588
-
-	/* Buffer 12 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_12_REQ_IN_FLIGHT_CNT 0x00000590
-
-	/* Buffer 12 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_12_PREFETCH_REQ_CNT 0x00000598
-
-	/* Buffer 12 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_12_TOTAL_PCI_RETRY_CNT 0x000005A0
-
-	/* Buffer 12 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_12_MAX_PCI_RETRY_CNT 0x000005A8
-
-	/* Buffer 12 Max Latency Count Register  -- read/write */
-#define PIC_BUF_12_MAX_LATENCY_CNT 0x000005B0
-
-	/* Buffer 12 Clear All Register  -- read/write */
-#define PIC_BUF_12_CLEAR_ALL 0x000005B8
-
-	/* Buffer 14 Flush Count with Data Touch Register  -- read/write */
-#define PIC_BUF_14_FLUSH_CNT_WITH_DATA_TOUCH 0x000005C0
-
-	/* Buffer 14 Flush Count w/o Data Touch Register  -- read/write */
-#define PIC_BUF_14_FLUSH_CNT_W_O_DATA_TOUCH 0x000005C8
-
-	/* Buffer 14 Request in Flight Count Register  -- read/write */
-#define PIC_BUF_14_REQ_IN_FLIGHT_CNT 0x000005D0
-
-	/* Buffer 14 Prefetch Request Count Register  -- read/write */
-#define PIC_BUF_14_PREFETCH_REQ_CNT 0x000005D8
-
-	/* Buffer 14 Total PCI Retry Count Register  -- read/write */
-#define PIC_BUF_14_TOTAL_PCI_RETRY_CNT 0x000005E0
-
-	/* Buffer 14 Max PCI Retry Count Register  -- read/write */
-#define PIC_BUF_14_MAX_PCI_RETRY_CNT 0x000005E8
-
-	/* Buffer 14 Max Latency Count Register  -- read/write */
-#define PIC_BUF_14_MAX_LATENCY_CNT 0x000005F0
-
-	/* Buffer 14 Clear All Register  -- read/write */
-#define PIC_BUF_14_CLEAR_ALL 0x000005F8
-
-	/* PCIX Read Buffer 0 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_0_ADDR 0x00000A00
-
-	/* PCIX Read Buffer 0 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_0_ATTRIBUTE 0x00000A08
-
-	/* PCIX Read Buffer 1 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_1_ADDR 0x00000A10
-
-	/* PCIX Read Buffer 1 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_1_ATTRIBUTE 0x00000A18
-
-	/* PCIX Read Buffer 2 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_2_ADDR 0x00000A20
-
-	/* PCIX Read Buffer 2 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_2_ATTRIBUTE 0x00000A28
-
-	/* PCIX Read Buffer 3 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_3_ADDR 0x00000A30
-
-	/* PCIX Read Buffer 3 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_3_ATTRIBUTE 0x00000A38
-
-	/* PCIX Read Buffer 4 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_4_ADDR 0x00000A40
-
-	/* PCIX Read Buffer 4 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_4_ATTRIBUTE 0x00000A48
-
-	/* PCIX Read Buffer 5 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_5_ADDR 0x00000A50
-
-	/* PCIX Read Buffer 5 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_5_ATTRIBUTE 0x00000A58
-
-	/* PCIX Read Buffer 6 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_6_ADDR 0x00000A60
-
-	/* PCIX Read Buffer 6 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_6_ATTRIBUTE 0x00000A68
-
-	/* PCIX Read Buffer 7 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_7_ADDR 0x00000A70
-
-	/* PCIX Read Buffer 7 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_7_ATTRIBUTE 0x00000A78
-
-	/* PCIX Read Buffer 8 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_8_ADDR 0x00000A80
-
-	/* PCIX Read Buffer 8 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_8_ATTRIBUTE 0x00000A88
-
-	/* PCIX Read Buffer 9 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_9_ADDR 0x00000A90
-
-	/* PCIX Read Buffer 9 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_9_ATTRIBUTE 0x00000A98
-
-	/* PCIX Read Buffer 10 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_10_ADDR 0x00000AA0
-
-	/* PCIX Read Buffer 10 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_10_ATTRIBUTE 0x00000AA8
-
-	/* PCIX Read Buffer 11 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_11_ADDR 0x00000AB0
-
-	/* PCIX Read Buffer 11 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_11_ATTRIBUTE 0x00000AB8
-
-	/* PCIX Read Buffer 12 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_12_ADDR 0x00000AC0
-
-	/* PCIX Read Buffer 12 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_12_ATTRIBUTE 0x00000AC8
-
-	/* PCIX Read Buffer 13 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_13_ADDR 0x00000AD0
-
-	/* PCIX Read Buffer 13 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_13_ATTRIBUTE 0x00000AD8
-
-	/* PCIX Read Buffer 14 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_14_ADDR 0x00000AE0
-
-	/* PCIX Read Buffer 14 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_14_ATTRIBUTE 0x00000AE8
-
-	/* PCIX Read Buffer 15 Address Register  -- read-only */
-#define PIC_PCIX_READ_BUF_15_ADDR 0x00000AF0
-
-	/* PCIX Read Buffer 15 Attribute Register  -- read-only */
-#define PIC_PCIX_READ_BUF_15_ATTRIBUTE 0x00000AF8
-
-	/* PCIX Write Buffer 0 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_0_ADDR 0x00000B00
-
-	/* PCIX Write Buffer 0 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_0_ATTRIBUTE 0x00000B08
-
-	/* PCIX Write Buffer 0 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_0_VALID 0x00000B10
-
-	/* PCIX Write Buffer 1 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_1_ADDR 0x00000B20
-
-	/* PCIX Write Buffer 1 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_1_ATTRIBUTE 0x00000B28
-
-	/* PCIX Write Buffer 1 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_1_VALID 0x00000B30
-
-	/* PCIX Write Buffer 2 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_2_ADDR 0x00000B40
-
-	/* PCIX Write Buffer 2 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_2_ATTRIBUTE 0x00000B48
-
-	/* PCIX Write Buffer 2 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_2_VALID 0x00000B50
-
-	/* PCIX Write Buffer 3 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_3_ADDR 0x00000B60
-
-	/* PCIX Write Buffer 3 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_3_ATTRIBUTE 0x00000B68
-
-	/* PCIX Write Buffer 3 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_3_VALID 0x00000B70
-
-	/* PCIX Write Buffer 4 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_4_ADDR 0x00000B80
-
-	/* PCIX Write Buffer 4 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_4_ATTRIBUTE 0x00000B88
-
-	/* PCIX Write Buffer 4 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_4_VALID 0x00000B90
-
-	/* PCIX Write Buffer 5 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_5_ADDR 0x00000BA0
-
-	/* PCIX Write Buffer 5 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_5_ATTRIBUTE 0x00000BA8
-
-	/* PCIX Write Buffer 5 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_5_VALID 0x00000BB0
-
-	/* PCIX Write Buffer 6 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_6_ADDR 0x00000BC0
-
-	/* PCIX Write Buffer 6 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_6_ATTRIBUTE 0x00000BC8
-
-	/* PCIX Write Buffer 6 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_6_VALID 0x00000BD0
-
-	/* PCIX Write Buffer 7 Address Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_7_ADDR 0x00000BE0
-
-	/* PCIX Write Buffer 7 Attribute Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_7_ATTRIBUTE 0x00000BE8
-
-	/* PCIX Write Buffer 7 Valid Register  -- read-only */
-#define PIC_PCIX_WRITE_BUF_7_VALID 0x00000BF0
+#ifndef __ASSEMBLY__
 
-/*********************************************************************
- * misc typedefs
- *
- */
 typedef uint64_t picreg_t;
+typedef uint64_t picate_t;
 
-/*********************************************************************
- * PIC register structures
- *
- */
+/*****************************************************************************
+ *********************** PIC MMR structure mapping ***************************
+ *****************************************************************************/
+
+/* NOTE: PIC WAR. PV#854697.  PIC does not allow writes just to [31:0]
+ * of a 64-bit register.  When writing PIC registers, always write the 
+ * entire 64 bits.
+ */
+
+typedef volatile struct pic_s {
+
+    /* 0x000000-0x00FFFF -- Local Registers */
+
+    /* 0x000000-0x000057 -- Standard Widget Configuration */
+    picreg_t		p_wid_id;			/* 0x000000 */
+    picreg_t		p_wid_stat;			/* 0x000008 */
+    picreg_t		p_wid_err_upper;		/* 0x000010 */
+    picreg_t		p_wid_err_lower;		/* 0x000018 */
+    #define p_wid_err p_wid_err_lower
+    picreg_t		p_wid_control;			/* 0x000020 */
+    picreg_t		p_wid_req_timeout;		/* 0x000028 */
+    picreg_t		p_wid_int_upper;		/* 0x000030 */
+    picreg_t		p_wid_int_lower;		/* 0x000038 */
+    #define p_wid_int p_wid_int_lower
+    picreg_t		p_wid_err_cmdword;		/* 0x000040 */
+    picreg_t		p_wid_llp;			/* 0x000048 */
+    picreg_t		p_wid_tflush;			/* 0x000050 */
+
+    /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
+    picreg_t		p_wid_aux_err;			/* 0x000058 */
+    picreg_t		p_wid_resp_upper;		/* 0x000060 */
+    picreg_t		p_wid_resp_lower;		/* 0x000068 */
+    #define p_wid_resp p_wid_resp_lower
+    picreg_t		p_wid_tst_pin_ctrl;		/* 0x000070 */
+    picreg_t		p_wid_addr_lkerr;		/* 0x000078 */
+
+    /* 0x000080-0x00008F -- PMU & MAP */
+    picreg_t		p_dir_map;			/* 0x000080 */
+    picreg_t		_pad_000088;			/* 0x000088 */
+
+    /* 0x000090-0x00009F -- SSRAM */
+    picreg_t		p_map_fault;			/* 0x000090 */
+    picreg_t		_pad_000098;			/* 0x000098 */
+
+    /* 0x0000A0-0x0000AF -- Arbitration */
+    picreg_t		p_arb;				/* 0x0000A0 */
+    picreg_t		_pad_0000A8;			/* 0x0000A8 */
+
+    /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+    picreg_t		p_ate_parity_err;		/* 0x0000B0 */
+    picreg_t		_pad_0000B8;			/* 0x0000B8 */
+
+    /* 0x0000C0-0x0000FF -- PCI/GIO */
+    picreg_t		p_bus_timeout;			/* 0x0000C0 */
+    picreg_t		p_pci_cfg;			/* 0x0000C8 */
+    picreg_t		p_pci_err_upper;		/* 0x0000D0 */
+    picreg_t		p_pci_err_lower;		/* 0x0000D8 */
+    #define p_pci_err p_pci_err_lower
+    picreg_t		_pad_0000E0[4];			/* 0x0000{E0..F8} */
+
+    /* 0x000100-0x0001FF -- Interrupt */
+    picreg_t		p_int_status;			/* 0x000100 */
+    picreg_t		p_int_enable;			/* 0x000108 */
+    picreg_t		p_int_rst_stat;			/* 0x000110 */
+    picreg_t		p_int_mode;			/* 0x000118 */
+    picreg_t		p_int_device;			/* 0x000120 */
+    picreg_t		p_int_host_err;			/* 0x000128 */
+    picreg_t		p_int_addr[8];			/* 0x0001{30,,,68} */
+    picreg_t		p_err_int_view;			/* 0x000170 */
+    picreg_t		p_mult_int;			/* 0x000178 */
+    picreg_t		p_force_always[8];		/* 0x0001{80,,,B8} */
+    picreg_t		p_force_pin[8];			/* 0x0001{C0,,,F8} */
+
+    /* 0x000200-0x000298 -- Device */
+    picreg_t		p_device[4];			/* 0x0002{00,,,18} */
+    picreg_t		_pad_000220[4];			/* 0x0002{20,,,38} */
+    picreg_t		p_wr_req_buf[4];		/* 0x0002{40,,,58} */
+    picreg_t		_pad_000260[4];			/* 0x0002{60,,,78} */
+    picreg_t		p_rrb_map[2];			/* 0x0002{80,,,88} */
+    #define p_even_resp p_rrb_map[0]			/* 0x000280 */
+    #define p_odd_resp  p_rrb_map[1]			/* 0x000288 */
+    picreg_t		p_resp_status;			/* 0x000290 */
+    picreg_t		p_resp_clear;			/* 0x000298 */
+
+    picreg_t		_pad_0002A0[12];		/* 0x0002{A0..F8} */
+
+    /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+    struct {
+	picreg_t	upper;				/* 0x0003{00,,,F0} */
+	picreg_t	lower;				/* 0x0003{08,,,F8} */
+    } p_buf_addr_match[16];
+
+    /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+    struct {
+	picreg_t	flush_w_touch;			/* 0x000{400,,,5C0} */
+	picreg_t	flush_wo_touch;			/* 0x000{408,,,5C8} */
+	picreg_t	inflight;			/* 0x000{410,,,5D0} */
+	picreg_t	prefetch;			/* 0x000{418,,,5D8} */
+	picreg_t	total_pci_retry;		/* 0x000{420,,,5E0} */
+	picreg_t	max_pci_retry;			/* 0x000{428,,,5E8} */
+	picreg_t	max_latency;			/* 0x000{430,,,5F0} */
+	picreg_t	clear_all;			/* 0x000{438,,,5F8} */
+    } p_buf_count[8];
+
+    
+    /* 0x000600-0x0009FF -- PCI/X registers */
+    picreg_t		p_pcix_bus_err_addr;		/* 0x000600 */
+    picreg_t		p_pcix_bus_err_attr;		/* 0x000608 */
+    picreg_t		p_pcix_bus_err_data;		/* 0x000610 */
+    picreg_t		p_pcix_pio_split_addr;		/* 0x000618 */
+    picreg_t		p_pcix_pio_split_attr;		/* 0x000620 */
+    picreg_t		p_pcix_dma_req_err_attr;	/* 0x000628 */
+    picreg_t		p_pcix_dma_req_err_addr;	/* 0x000630 */
+    picreg_t		p_pcix_timeout;			/* 0x000638 */
+
+    picreg_t		_pad_000640[120];		/* 0x000{640,,,9F8} */
+
+    /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+    struct {
+	picreg_t	p_buf_addr;			/* 0x000{A00,,,AF0} */
+	picreg_t	p_buf_attr;			/* 0X000{A08,,,AF8} */
+    } p_pcix_read_buf_64[16];
+
+    struct {
+	picreg_t	p_buf_addr;			/* 0x000{B00,,,BE0} */
+	picreg_t	p_buf_attr;			/* 0x000{B08,,,BE8} */
+	picreg_t	p_buf_valid;			/* 0x000{B10,,,BF0} */
+	picreg_t	__pad1;				/* 0x000{B18,,,BF8} */
+    } p_pcix_write_buf_64[8];
+
+    /* End of Local Registers -- Start of Address Map space */
+
+    char		_pad_000c00[0x010000 - 0x000c00];
+
+    /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
+    picate_t		p_int_ate_ram[1024];		/* 0x010000-0x011fff */
+
+    /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
+    picate_t		p_int_ate_ram_mp[1024];		/* 0x012000-0x013fff */
+
+    char		_pad_014000[0x18000 - 0x014000];
+
+    /* 0x18000-0x197F8 -- PIC Write Request Ram */
+    picreg_t		p_wr_req_lower[256];		/* 0x18000 - 0x187F8 */
+    picreg_t		p_wr_req_upper[256];		/* 0x18800 - 0x18FF8 */
+    picreg_t		p_wr_req_parity[256];		/* 0x19000 - 0x197F8 */
+
+    char		_pad_019800[0x20000 - 0x019800];
+
+    /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
+    union {
+	uchar_t		c[0x1000 / 1];			/* 0x02{0000,,,7FFF} */
+	uint16_t	s[0x1000 / 2];			/* 0x02{0000,,,7FFF} */
+	uint32_t	l[0x1000 / 4];			/* 0x02{0000,,,7FFF} */
+	uint64_t	d[0x1000 / 8];			/* 0x02{0000,,,7FFF} */
+	union {
+	    uchar_t	c[0x100 / 1];
+	    uint16_t	s[0x100 / 2];
+	    uint32_t	l[0x100 / 4];
+	    uint64_t	d[0x100 / 8];
+	} f[8];
+    } p_type0_cfg_dev[8];				/* 0x02{0000,,,7FFF} */
+
+    /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+    union {
+	uchar_t		c[0x1000 / 1];			/* 0x028000-0x029000 */
+	uint16_t	s[0x1000 / 2];			/* 0x028000-0x029000 */
+	uint32_t	l[0x1000 / 4];			/* 0x028000-0x029000 */
+	uint64_t	d[0x1000 / 8];			/* 0x028000-0x029000 */
+	union {
+	    uchar_t	c[0x100 / 1];
+	    uint16_t	s[0x100 / 2];
+	    uint32_t	l[0x100 / 4];
+	    uint64_t	d[0x100 / 8];
+	} f[8];
+    } p_type1_cfg;					/* 0x028000-0x029000 */
+
+    char		_pad_029000[0x030000-0x029000];
+
+    /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+    union {
+	uchar_t		c[8 / 1];
+	uint16_t	s[8 / 2];
+	uint32_t	l[8 / 4];
+	uint64_t	d[8 / 8];
+    } p_pci_iack;					/* 0x030000-0x030007 */
+
+    char		_pad_030007[0x040000-0x030008];
+
+    /* 0x040000-0x030007 -- PCIX Special Cycle */
+    union {
+	uchar_t		c[8 / 1];
+	uint16_t	s[8 / 2];
+	uint32_t	l[8 / 4];
+	uint64_t	d[8 / 8];
+    } p_pcix_cycle;					/* 0x040000-0x040007 */
+} pic_t;
+
+
+/*****************************************************************************
+ *************************** PIC BRIDGE MMR DEFINES **************************
+ *****************************************************************************/
+
+/*
+ * PIC STATUS register		offset 0x00000008
+ */
+#define PIC_STAT_TX_CREDIT_SHFT		PCIBR_STAT_TX_CREDIT_SHFT
+#define PIC_STAT_TX_CREDIT		PCIBR_STAT_TX_CREDIT
+#define PIC_STAT_RX_REQ_CNT_SHFT	PCIBR_STAT_RX_CREDIT_SHFT
+#define PIC_STAT_RX_REQ_CNT		PCIBR_STAT_RX_CREDIT
+#define PIC_STAT_LLP_TX_CNT_SHFT	PCIBR_STAT_LLP_TX_CNT_SHFT
+#define PIC_STAT_LLP_TX_CNT		PCIBR_STAT_LLP_TX_CNT
+#define PIC_STAT_LLP_RX_CNT_SHFT	PCIBR_STAT_LLP_RX_CNT_SHFT
+#define PIC_STAT_LLP_RX_CNT		PCIBR_STAT_LLP_RX_CNT
+#define PIC_STAT_PCIX_ACTIVE_SHFT	PCIBR_STAT_PCIX_ACTIVE_SHFT
+#define PIC_STAT_PCIX_ACTIVE		PCIBR_STAT_PCIX_ACTIVE
+#define PIC_STAT_PCIX_SPEED_SHFT	PCIBR_STAT_PCIX_SPEED_SHFT
+#define PIC_STAT_PCIX_SPEED		PCIBR_STAT_PCIX_SPEED
+
+/*
+ * PIC CONTROL register		offset 0x00000020
+ */
+#define PIC_CTRL_WIDGET_ID_SHFT		0
+#define PIC_CTRL_WIDGET_ID		(0xF << PIC_CTRL_WIDGET_ID_SHFT)
+#define PIC_CTRL_PCI_SPEED_SHFT		PCIBR_CTRL_PCI_SPEED_SHFT
+#define PIC_CTRL_PCI_SPEED		PCIBR_CTRL_PCI_SPEED
+#define PIC_CTRL_SYS_END_SHFT		PCIBR_CTRL_SYS_END_SHFT
+#define PIC_CTRL_SYS_END		PCIBR_CTRL_SYS_END
+#define PIC_CTRL_CLR_TLLP_SHFT		PCIBR_CTRL_CLR_TLLP_SHFT
+#define PIC_CTRL_CLR_TLLP		PCIBR_CTRL_CLR_TLLP
+#define PIC_CTRL_CLR_RLLP_SHFT		PCIBR_CTRL_CLR_RLLP_SHFT
+#define PIC_CTRL_CLR_RLLP		PCIBR_CTRL_CLR_RLLP
+#define PIC_CTRL_LLP_XBOW_CRD_SHFT	PCIBR_CTRL_LLP_XBOW_CRD_SHFT
+#define PIC_CTRL_CRED_LIM		PCIBR_CTRL_CRED_LIM
+#define PIC_CTRL_F_BAD_PKT_SHFT		PCIBR_CTRL_F_BAD_PKT_SHFT
+#define PIC_CTRL_F_BAD_PKT		PCIBR_CTRL_F_BAD_PKT
+#define PIC_CTRL_PAGE_SIZE_SHFT		PCIBR_CTRL_PAGE_SIZE_SHFT
+#define PIC_CTRL_PAGE_SIZE		PCIBR_CTRL_PAGE_SIZE
+#define PIC_CTRL_MEM_SWAP_SHFT		PCIBR_CTRL_MEM_SWAP_SHFT
+#define PIC_CTRL_MEM_SWAP		PCIBR_CTRL_MEM_SWAP
+#define PIC_CTRL_RST_SHFT		PCIBR_CTRL_RST_SHFT
+#define PIC_CTRL_RST_PIN(x)		PCIBR_CTRL_RST_PIN(x)
+#define PIC_CTRL_RST(n)			PCIBR_CTRL_RST(n)
+#define PIC_CTRL_RST_MASK		PCIBR_CTRL_RST_MASK
+#define PIC_CTRL_PAR_EN_REQ_SHFT	PCIBR_CTRL_PAR_EN_REQ_SHFT
+#define PIC_CTRL_PAR_EN_REQ		PCIBR_CTRL_PAR_EN_REQ
+#define PIC_CTRL_PAR_EN_RESP_SHFT	PCIBR_CTRL_PAR_EN_RESP_SHFT
+#define PIC_CTRL_PAR_EN_RESP		PCIBR_CTRL_PAR_EN_RESP
+#define PIC_CTRL_PAR_EN_ATE_SHFT	PCIBR_CTRL_PAR_EN_ATE_SHFT
+#define PIC_CTRL_PAR_EN_ATE		PCIBR_CTRL_PAR_EN_ATE
+#define PIC_CTRL_FUN_NUM_MASK		PCIBR_CTRL_FUN_NUM_MASK
+#define PIC_CTRL_FUN_NUM(x)		PCIBR_CTRL_FUN_NUM(x)
+#define PIC_CTRL_DEV_NUM_MASK		PCIBR_CTRL_BUS_NUM_MASK
+#define PIC_CTRL_DEV_NUM(x)		PCIBR_CTRL_DEV_NUM(x)
+#define PIC_CTRL_BUS_NUM_MASK		PCIBR_CTRL_BUS_NUM_MASK
+#define PIC_CTRL_BUS_NUM(x)		PCIBR_CTRL_BUS_NUM(x)
+#define PIC_CTRL_RELAX_ORDER_SHFT	PCIBR_CTRL_RELAX_ORDER_SHFT
+#define PIC_CTRL_RELAX_ORDER		PCIBR_CTRL_RELAX_ORDER
+#define PIC_CTRL_NO_SNOOP_SHFT		PCIBR_CTRL_NO_SNOOP_SHFT
+#define PIC_CTRL_NO_SNOOP		PCIBR_CTRL_NO_SNOOP
+
+/*
+ * PIC Intr Destination Addr	offset 0x00000038 
+ */
+#define PIC_INTR_DEST_ADDR		PIC_XTALK_ADDR_MASK
+#define PIC_INTR_DEST_TID_SHFT		48
+#define PIC_INTR_DEST_TID		(0xFull << PIC_INTR_DEST_TID_SHFT)
+
+/*
+ * PIC PCI Responce Buffer	offset 0x00000068
+ */
+#define PIC_RSP_BUF_ADDR		PIC_XTALK_ADDR_MASK
+#define PIC_RSP_BUF_NUM_SHFT		48
+#define PIC_RSP_BUF_NUM			(0xFull << PIC_RSP_BUF_NUM_SHFT)
+#define PIC_RSP_BUF_DEV_NUM_SHFT	52
+#define PIC_RSP_BUF_DEV_NUM		(0x3ull << PIC_RSP_BUF_DEV_NUM_SHFT)
+
+/*
+ * PIC PCI DIRECT MAP register	offset 0x00000080
+ */
+#define PIC_DIRMAP_DIROFF_SHFT		PCIBR_DIRMAP_DIROFF_SHFT
+#define PIC_DIRMAP_DIROFF		PCIBR_DIRMAP_DIROFF
+#define PIC_DIRMAP_ADD512_SHFT		PCIBR_DIRMAP_ADD512_SHFT
+#define PIC_DIRMAP_ADD512		PCIBR_DIRMAP_ADD512
+#define PIC_DIRMAP_WID_SHFT		20
+#define PIC_DIRMAP_WID			(0xF << PIC_DIRMAP_WID_SHFT)
+
+#define PIC_DIRMAP_OFF_ADDRSHFT		PCIBR_DIRMAP_OFF_ADDRSHFT
+
+/* 
+ * PIC INTR STATUS register	offset 0x00000100
+ */
+#define PIC_ISR_PCIX_SPLIT_MSG_PE	PCIBR_ISR_PCIX_SPLIT_MSG_PE
+#define PIC_ISR_PCIX_SPLIT_EMSG		PCIBR_ISR_PCIX_SPLIT_EMSG
+#define PIC_ISR_PCIX_SPLIT_TO		PCIBR_ISR_PCIX_SPLIT_TO
+#define PIC_ISR_PCIX_UNEX_COMP		PCIBR_ISR_PCIX_UNEX_COMP
+#define PIC_ISR_INT_RAM_PERR		PCIBR_ISR_INT_RAM_PERR
+#define PIC_ISR_PCIX_ARB_ERR		PCIBR_ISR_PCIX_ARB_ERR
+#define PIC_ISR_PCIX_REQ_TOUT		PCIBR_ISR_PCIX_REQ_TOUT
+#define PIC_ISR_PCIX_TABORT		PCIBR_ISR_PCIX_TABORT
+#define PIC_ISR_PCIX_PERR		PCIBR_ISR_PCIX_PERR
+#define PIC_ISR_PCIX_SERR		PCIBR_ISR_PCIX_SERR
+#define PIC_ISR_PCIX_MRETRY		PCIBR_ISR_PCIX_MRETRY
+#define PIC_ISR_PCIX_MTOUT		PCIBR_ISR_PCIX_MTOUT
+#define PIC_ISR_PCIX_DA_PARITY		PCIBR_ISR_PCIX_DA_PARITY
+#define PIC_ISR_PCIX_AD_PARITY		PCIBR_ISR_PCIX_AD_PARITY
+#define PIC_ISR_PMU_PAGE_FAULT		PCIBR_ISR_PMU_PAGE_FAULT
+#define PIC_ISR_UNEXP_RESP		PCIBR_ISR_UNEXP_RESP
+#define PIC_ISR_BAD_XRESP_PKT		PCIBR_ISR_BAD_XRESP_PKT
+#define PIC_ISR_BAD_XREQ_PKT		PCIBR_ISR_BAD_XREQ_PKT
+#define PIC_ISR_RESP_XTLK_ERR		PCIBR_ISR_RESP_XTLK_ERR
+#define PIC_ISR_REQ_XTLK_ERR		PCIBR_ISR_REQ_XTLK_ERR
+#define PIC_ISR_INVLD_ADDR		PCIBR_ISR_INVLD_ADDR
+#define PIC_ISR_UNSUPPORTED_XOP		PCIBR_ISR_UNSUPPORTED_XOP
+#define PIC_ISR_XREQ_FIFO_OFLOW		PCIBR_ISR_XREQ_FIFO_OFLOW
+#define PIC_ISR_LLP_REC_SNERR		PCIBR_ISR_LLP_REC_SNERR
+#define PIC_ISR_LLP_REC_CBERR		PCIBR_ISR_LLP_REC_CBERR
+#define PIC_ISR_LLP_RCTY		PCIBR_ISR_LLP_RCTY
+#define PIC_ISR_LLP_TX_RETRY		PCIBR_ISR_LLP_TX_RETRY
+#define PIC_ISR_LLP_TCTY		PCIBR_ISR_LLP_TCTY
+#define PIC_ISR_PCI_ABORT		PCIBR_ISR_PCI_ABORT
+#define PIC_ISR_PCI_PARITY		PCIBR_ISR_PCI_PARITY
+#define PIC_ISR_PCI_SERR		PCIBR_ISR_PCI_SERR
+#define PIC_ISR_PCI_PERR		PCIBR_ISR_PCI_PERR
+#define PIC_ISR_PCI_MST_TIMEOUT		PCIBR_ISR_PCI_MST_TIMEOUT
+#define PIC_ISR_PCI_RETRY_CNT		PCIBR_ISR_PCI_RETRY_CNT
+#define PIC_ISR_XREAD_REQ_TIMEOUT	PCIBR_ISR_XREAD_REQ_TIMEOUT
+#define PIC_ISR_INT_MSK			PCIBR_ISR_INT_MSK
+#define PIC_ISR_INT(x)			PCIBR_ISR_INT(x)
+
+/*
+ * PIC ENABLE INTR register	offset 0x00000108
+ */
+#define PIC_IER_PCIX_SPLIT_MSG_PE	PCIBR_IER_PCIX_SPLIT_MSG_PE
+#define PIC_IER_PCIX_SPLIT_EMSG		PCIBR_IER_PCIX_SPLIT_EMSG
+#define PIC_IER_PCIX_SPLIT_TO		PCIBR_IER_PCIX_SPLIT_TO
+#define PIC_IER_PCIX_UNEX_COMP		PCIBR_IER_PCIX_UNEX_COMP
+#define PIC_IER_INT_RAM_PERR		PCIBR_IER_INT_RAM_PERR
+#define PIC_IER_PCIX_ARB_ERR		PCIBR_IER_PCIX_ARB_ERR
+#define PIC_IER_PCIX_REQ_TOUT		PCIBR_IER_PCIX_REQ_TOUT
+#define PIC_IER_PCIX_TABORT		PCIBR_IER_PCIX_TABORT
+#define PIC_IER_PCIX_PERR		PCIBR_IER_PCIX_PERR
+#define PIC_IER_PCIX_SERR		PCIBR_IER_PCIX_SERR
+#define PIC_IER_PCIX_MRETRY		PCIBR_IER_PCIX_MRETRY
+#define PIC_IER_PCIX_MTOUT		PCIBR_IER_PCIX_MTOUT
+#define PIC_IER_PCIX_DA_PARITY		PCIBR_IER_PCIX_DA_PARITY
+#define PIC_IER_PCIX_AD_PARITY		PCIBR_IER_PCIX_AD_PARITY
+#define PIC_IER_PMU_PAGE_FAULT		PCIBR_IER_PMU_PAGE_FAULT
+#define PIC_IER_UNEXP_RESP		PCIBR_IER_UNEXP_RESP
+#define PIC_IER_BAD_XRESP_PKT		PCIBR_IER_BAD_XRESP_PKT
+#define PIC_IER_BAD_XREQ_PKT		PCIBR_IER_BAD_XREQ_PKT
+#define PIC_IER_RESP_XTLK_ERR		PCIBR_IER_RESP_XTLK_ERR
+#define PIC_IER_REQ_XTLK_ERR		PCIBR_IER_REQ_XTLK_ERR
+#define PIC_IER_INVLD_ADDR		PCIBR_IER_INVLD_ADDR
+#define PIC_IER_UNSUPPORTED_XOP		PCIBR_IER_UNSUPPORTED_XOP
+#define PIC_IER_XREQ_FIFO_OFLOW		PCIBR_IER_XREQ_FIFO_OFLOW
+#define PIC_IER_LLP_REC_SNERR		PCIBR_IER_LLP_REC_SNERR
+#define PIC_IER_LLP_REC_CBERR		PCIBR_IER_LLP_REC_CBERR
+#define PIC_IER_LLP_RCTY		PCIBR_IER_LLP_RCTY
+#define PIC_IER_LLP_TX_RETRY		PCIBR_IER_LLP_TX_RETRY
+#define PIC_IER_LLP_TCTY		PCIBR_IER_LLP_TCTY
+#define PIC_IER_PCI_ABORT		PCIBR_IER_PCI_ABORT
+#define PIC_IER_PCI_PARITY		PCIBR_IER_PCI_PARITY
+#define PIC_IER_PCI_SERR		PCIBR_IER_PCI_SERR
+#define PIC_IER_PCI_PERR		PCIBR_IER_PCI_PERR
+#define PIC_IER_PCI_MST_TIMEOUT		PCIBR_IER_PCI_MST_TIMEOUT
+#define PIC_IER_PCI_RETRY_CNT		PCIBR_IER_PCI_RETRY_CNT
+#define PIC_IER_XREAD_REQ_TIMEOUT	PCIBR_IER_XREAD_REQ_TIMEOUT
+#define PIC_IER_INT_MSK			PCIBR_IER_INT_MSK
+#define PIC_IER_INT(x)			PCIBR_IER_INT(x)
+
+/*
+ * PIC RESET INTR register	offset 0x00000110
+ */
+#define PIC_IRR_PCIX_SPLIT_MSG_PE	PCIBR_IRR_PCIX_SPLIT_MSG_PE
+#define PIC_IRR_PCIX_SPLIT_EMSG		PCIBR_IRR_PCIX_SPLIT_EMSG
+#define PIC_IRR_PCIX_SPLIT_TO		PCIBR_IRR_PCIX_SPLIT_TO
+#define PIC_IRR_PCIX_UNEX_COMP		PCIBR_IRR_PCIX_UNEX_COMP
+#define PIC_IRR_INT_RAM_PERR		PCIBR_IRR_INT_RAM_PERR
+#define PIC_IRR_PCIX_ARB_ERR		PCIBR_IRR_PCIX_ARB_ERR
+#define PIC_IRR_PCIX_REQ_TOUT		PCIBR_IRR_PCIX_REQ_TOUT
+#define PIC_IRR_PCIX_TABORT		PCIBR_IRR_PCIX_TABORT
+#define PIC_IRR_PCIX_PERR		PCIBR_IRR_PCIX_PERR
+#define PIC_IRR_PCIX_SERR		PCIBR_IRR_PCIX_SERR
+#define PIC_IRR_PCIX_MRETRY		PCIBR_IRR_PCIX_MRETRY
+#define PIC_IRR_PCIX_MTOUT		PCIBR_IRR_PCIX_MTOUT
+#define PIC_IRR_PCIX_DA_PARITY		PCIBR_IRR_PCIX_DA_PARITY
+#define PIC_IRR_PCIX_AD_PARITY		PCIBR_IRR_PCIX_AD_PARITY
+#define PIC_IRR_PMU_PAGE_FAULT		PCIBR_IRR_PMU_PAGE_FAULT
+#define PIC_IRR_UNEXP_RESP		PCIBR_IRR_UNEXP_RESP
+#define PIC_IRR_BAD_XRESP_PKT		PCIBR_IRR_BAD_XRESP_PKT
+#define PIC_IRR_BAD_XREQ_PKT		PCIBR_IRR_BAD_XREQ_PKT
+#define PIC_IRR_RESP_XTLK_ERR		PCIBR_IRR_RESP_XTLK_ERR
+#define PIC_IRR_REQ_XTLK_ERR		PCIBR_IRR_REQ_XTLK_ERR
+#define PIC_IRR_INVLD_ADDR		PCIBR_IRR_INVLD_ADDR
+#define PIC_IRR_UNSUPPORTED_XOP		PCIBR_IRR_UNSUPPORTED_XOP
+#define PIC_IRR_XREQ_FIFO_OFLOW		PCIBR_IRR_XREQ_FIFO_OFLOW
+#define PIC_IRR_LLP_REC_SNERR		PCIBR_IRR_LLP_REC_SNERR
+#define PIC_IRR_LLP_REC_CBERR		PCIBR_IRR_LLP_REC_CBERR
+#define PIC_IRR_LLP_RCTY		PCIBR_IRR_LLP_RCTY
+#define PIC_IRR_LLP_TX_RETRY		PCIBR_IRR_LLP_TX_RETRY
+#define PIC_IRR_LLP_TCTY		PCIBR_IRR_LLP_TCTY
+#define PIC_IRR_PCI_ABORT		PCIBR_IRR_PCI_ABORT
+#define PIC_IRR_PCI_PARITY		PCIBR_IRR_PCI_PARITY
+#define PIC_IRR_PCI_SERR		PCIBR_IRR_PCI_SERR
+#define PIC_IRR_PCI_PERR		PCIBR_IRR_PCI_PERR
+#define PIC_IRR_PCI_MST_TIMEOUT		PCIBR_IRR_PCI_MST_TIMEOUT
+#define PIC_IRR_PCI_RETRY_CNT		PCIBR_IRR_PCI_RETRY_CNT
+#define PIC_IRR_XREAD_REQ_TIMEOUT	PCIBR_IRR_XREAD_REQ_TIMEOUT
+#define PIC_IRR_MULTI_CLR		PCIBR_IRR_MULTI_CLR
+#define PIC_IRR_CRP_GRP_CLR		PCIBR_IRR_CRP_GRP_CLR
+#define PIC_IRR_RESP_BUF_GRP_CLR	PCIBR_IRR_RESP_BUF_GRP_CLR
+#define PIC_IRR_REQ_DSP_GRP_CLR		PCIBR_IRR_REQ_DSP_GRP_CLR
+#define PIC_IRR_LLP_GRP_CLR		PCIBR_IRR_LLP_GRP_CLR
+#define PIC_IRR_SSRAM_GRP_CLR		PCIBR_IRR_SSRAM_GRP_CLR
+#define PIC_IRR_PCI_GRP_CLR		PCIBR_IRR_PCI_GRP_CLR
+#define PIC_IRR_GIO_GRP_CLR		PCIBR_IRR_GIO_GRP_CLR
+#define PIC_IRR_ALL_CLR			PCIBR_IRR_ALL_CLR
+
+/*
+ * PIC Intr Dev Select register	offset 0x00000120
+ */
+#define PIC_INT_DEV_SHFT(n)		PCIBR_INT_DEV_SHFT(n)
+#define PIC_INT_DEV_MASK(n)		PCIBR_INT_DEV_MASK(n)
+
+/*
+ * PIC PCI Host Intr Addr	offset 0x00000130 - 0x00000168
+ */
+#define PIC_HOST_INTR_ADDR		PIC_XTALK_ADDR_MASK
+#define PIC_HOST_INTR_FLD_SHFT		48	
+#define PIC_HOST_INTR_FLD		(0xFFull << PIC_HOST_INTR_FLD_SHFT)
+
+/*
+ * PIC DEVICE(x) register	offset 0x00000200
+ */
+#define PIC_DEV_OFF_ADDR_SHFT		PCIBR_DEV_OFF_ADDR_SHFT
+#define PIC_DEV_OFF_MASK		PCIBR_DEV_OFF_MASK
+#define PIC_DEV_DEV_IO_MEM		PCIBR_DEV_DEV_IO_MEM
+#define PIC_DEV_DEV_SWAP		PCIBR_DEV_DEV_SWAP
+#define PIC_DEV_GBR			PCIBR_DEV_GBR
+#define PIC_DEV_BARRIER			PCIBR_DEV_BARRIER
+#define PIC_DEV_COH			PCIBR_DEV_COH
+#define PIC_DEV_PRECISE			PCIBR_DEV_PRECISE
+#define PIC_DEV_PREF			PCIBR_DEV_PREF
+#define PIC_DEV_SWAP_DIR		PCIBR_DEV_SWAP_DIR
+#define PIC_DEV_RT			PCIBR_DEV_RT
+#define PIC_DEV_DEV_SIZE		PCIBR_DEV_DEV_SIZE
+#define PIC_DEV_DIR_WRGA_EN		PCIBR_DEV_DIR_WRGA_EN
+#define PIC_DEV_VIRTUAL_EN		PCIBR_DEV_VIRTUAL_EN
+#define PIC_DEV_FORCE_PCI_PAR		PCIBR_DEV_FORCE_PCI_PAR
+#define PIC_DEV_PAGE_CHK_DIS		PCIBR_DEV_PAGE_CHK_DIS
+#define PIC_DEV_ERR_LOCK_EN		PCIBR_DEV_ERR_LOCK_EN
+
+/*
+ * PIC Even & Odd RRB registers	offset 0x000000280 & 0x000000288
+ */
+/* Individual RRB masks after shifting down */
+#define PIC_RRB_EN			PCIBR_RRB_EN
+#define PIC_RRB_DEV			PCIBR_RRB_DEV
+#define PIC_RRB_VDEV			PCIBR_RRB_VDEV
+#define PIC_RRB_PDEV			PCIBR_RRB_PDEV
+
+/*
+ * PIC RRB status register 	offset 0x00000290
+ */
+#define PIC_RRB_VALID(r)		PCIBR_RRB_VALID(r)
+#define PIC_RRB_INUSE(r)		PCIBR_RRB_INUSE(r)
+
+/*
+ * PIC RRB clear register 	offset 0x00000298
+ */
+#define PIC_RRB_CLEAR(r)		PCIBR_RRB_CLEAR(r)
+
+
+/*****************************************************************************
+ ****************************** PIC DMA DEFINES ******************************
+ *****************************************************************************/
+
+/*
+ * PIC - PMU Address Transaltion Entry defines 
+ */
+#define PIC_ATE_V			PCIBR_ATE_V
+#define PIC_ATE_CO			PCIBR_ATE_CO
+#define PIC_ATE_PREC			PCIBR_ATE_PREC
+#define PIC_ATE_PREF			PCIBR_ATE_PREF
+#define PIC_ATE_BAR			PCIBR_ATE_BAR
+#define PIC_ATE_TARGETID_SHFT		8
+#define PIC_ATE_TARGETID		(0xF << PIC_ATE_TARGETID_SHFT)
+#define PIC_ATE_ADDR_SHFT		PCIBR_ATE_ADDR_SHFT
+#define PIC_ATE_ADDR_MASK		(0xFFFFFFFFF000)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define PIC_ATE_SWAPSHIFT		ATE_SWAPSHIFT
+#define PIC_SWAP_ON(x)			ATE_SWAP_ON(x)
+#define PIC_SWAP_OFF(x)			ATE_SWAP_OFF(x)
+
+/*  
+ * Bridge 32bit Bus DMA addresses  
+ */
+#define PIC_LOCAL_BASE			PCIBR_LOCAL_BASE
+#define PIC_DMA_MAPPED_BASE		PCIBR_DMA_MAPPED_BASE
+#define PIC_DMA_MAPPED_SIZE		PCIBR_DMA_MAPPED_SIZE
+#define PIC_DMA_DIRECT_BASE		PCIBR_DMA_DIRECT_BASE
+#define PIC_DMA_DIRECT_SIZE		PCIBR_DMA_DIRECT_SIZE
+
+
+/*****************************************************************************
+ ****************************** PIC PIO DEFINES ******************************
+ *****************************************************************************/
+
+/* NOTE: Bus one offset to PCI Widget Device Space. */
+#define PIC_BUS1_OFFSET				0x800000 
+
+/*
+ * Macros for Xtalk to Bridge bus (PCI) PIO.  Refer to section 5.2.1 figure
+ * 4 of the "PCI Interface Chip (PIC) Volume II Programmer's Reference" 
+ */
+/* XTALK addresses that map into PIC Bridge Bus addr space */
+#define PICBRIDGE0_PIO32_XTALK_ALIAS_BASE	0x000040000000L
+#define PICBRIDGE0_PIO32_XTALK_ALIAS_LIMIT	0x00007FFFFFFFL
+#define PICBRIDGE0_PIO64_XTALK_ALIAS_BASE	0x000080000000L
+#define PICBRIDGE0_PIO64_XTALK_ALIAS_LIMIT	0x0000BFFFFFFFL
+#define PICBRIDGE1_PIO32_XTALK_ALIAS_BASE	0x0000C0000000L
+#define PICBRIDGE1_PIO32_XTALK_ALIAS_LIMIT	0x0000FFFFFFFFL
+#define PICBRIDGE1_PIO64_XTALK_ALIAS_BASE	0x000100000000L
+#define PICBRIDGE1_PIO64_XTALK_ALIAS_LIMIT	0x00013FFFFFFFL
+
+/* XTALK addresses that map into PCI addresses */
+#define PICBRIDGE0_PCI_MEM32_BASE	PICBRIDGE0_PIO32_XTALK_ALIAS_BASE
+#define PICBRIDGE0_PCI_MEM32_LIMIT	PICBRIDGE0_PIO32_XTALK_ALIAS_LIMIT
+#define PICBRIDGE0_PCI_MEM64_BASE	PICBRIDGE0_PIO64_XTALK_ALIAS_BASE
+#define PICBRIDGE0_PCI_MEM64_LIMIT	PICBRIDGE0_PIO64_XTALK_ALIAS_LIMIT
+#define PICBRIDGE1_PCI_MEM32_BASE	PICBRIDGE1_PIO32_XTALK_ALIAS_BASE
+#define PICBRIDGE1_PCI_MEM32_LIMIT	PICBRIDGE1_PIO32_XTALK_ALIAS_LIMIT
+#define PICBRIDGE1_PCI_MEM64_BASE	PICBRIDGE1_PIO64_XTALK_ALIAS_BASE
+#define PICBRIDGE1_PCI_MEM64_LIMIT	PICBRIDGE1_PIO64_XTALK_ALIAS_LIMIT
+
+/*****************************************************************************
+ ****************************** PIC MISC DEFINES *****************************
+ *****************************************************************************/
 
-/*
- * Identification Register
- *
- * The Identification register is a read only register used by the host CPU
- * during configuration to determine the type of the widget. The format is
- * the same as defined in IEEE 1149.1 JTAG Device Identification Register.
- */
-	typedef union pic_id_reg_u {
-		picreg_t	pic_id_reg_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t rev_num  :	4; /* 31:28 */
-			picreg_t part_num :	16; /* 27:12 */
-			picreg_t mfg_num  :	11; /* 11:1 */
-			picreg_t          :	1; /* 0:0 */
-		} pic_id_reg_fld_s;
-	} pic_id_reg_u_t;
-/*
- * Status Register
- *
- * The status register is a read register which holds status information of the
- * Bus Subsection.
- */
-	typedef union pic_stat_reg_u {
-		picreg_t	pic_stat_reg_regval;
-		struct {
-			picreg_t                :	28; /* 63:36 */
-			picreg_t pci_x_speed    :	2; /* 35:34 */
-			picreg_t pci_x_active   :	1; /* 33:33 */
-			picreg_t                :	1; /* 32:32 */
-			picreg_t llp_rec_cnt    :	8; /* 31:24 */
-			picreg_t llp_tx_cnt     :	8; /* 23:16 */
-			picreg_t rx_credit_cnt  :	4; /* 15:12 */
-			picreg_t tx_credit_cnt  :	4; /* 11:8 */
-			picreg_t pci_misc_input :	8; /* 7:0 */
-		} pic_stat_reg_fld_s;
-	} pic_stat_reg_u_t;
-/*
- * Upper Address Holding Register Bus Side Errors
- *
- * The upper address holding register is a read only register which contains
- * the upper 16-bits of the address when certain error occurs (see error cases
- * chapter). Subsequent errors are not logged until the error is cleared. The
- * last logged value is held until the group is cleared and enabled.
- */
-	typedef union pic_upper_bus_err_u {
-		picreg_t	pic_upper_bus_err_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t          :	16; /* 31:16 */
-			picreg_t upp_addr :	16; /* 15:0 */
-		} pic_upper_bus_err_fld_s;
-	} pic_upper_bus_err_u_t;
-/*
- * Lower Address Holding Register Bus Side Errors
- *
- * The lower address holding register is a read only register which contains
- * the address which either can be accessed as a word or double word. Sub-
- * sequent errors are not logged until the error is cleared. The last logged
- * value is held until the group is cleared and enabled.
- */
-	typedef union pic_lower_bus_err_u {
-		picreg_t	pic_lower_bus_err_regval;
-		struct {
-			picreg_t          :	16; /* 63:48 */
-			picreg_t upp_addr :	16; /* 47:32 */
-			picreg_t low_addr :	32; /* 31:0 */
-		} pic_lower_bus_err_fld_s;
-	} pic_lower_bus_err_u_t;
-/*
- * Control Register
- *
- * The control register is a read/write register which holds control informa-
- * tion for the bus subsection.
- */
-	typedef union pic_control_reg_u {
-		picreg_t	pic_control_reg_regval;
-		struct {
-			picreg_t                :	32; /* 63:32 */
-			picreg_t                :	4; /* 31:28 */
-			picreg_t rst_pin_n      :	4; /* 27:24 */
-			picreg_t                :	1; /* 23:23 */
-			picreg_t mem_swap       :	1; /* 22:22 */
-			picreg_t page_size      :	1; /* 21:21 */
-			picreg_t                :	4; /* 20:17 */
-			picreg_t f_bad_pkt      :	1; /* 16:16 */
-			picreg_t llp_xbar_crd   :	4; /* 15:12 */
-			picreg_t clr_rllp_cnt   :	1; /* 11:11 */
-			picreg_t clr_tllp_cnt   :	1; /* 10:10 */
-			picreg_t sys_end        :	1; /* 9:9 */
-			picreg_t                :	3; /* 8:6 */
-			picreg_t pci_speed      :	2; /* 5:4 */
-			picreg_t widget_id      :	4; /* 3:0 */
-		} pic_control_reg_fld_s;
-	} pic_control_reg_u_t;
-/*
- * PCI/PCI-X Request Time-out Value Register
- *
- * This register contains the reload value for the response timer. The request
- * timer counts every 960 nS (32 PCI clocks)
- */
-	typedef union pic_pci_req_to_u {
-		picreg_t	pic_pci_req_to_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t          :	12; /* 31:20 */
-			picreg_t time_out :	20; /* 19:0 */
-		} pic_pci_req_to_fld_s;
-	} pic_pci_req_to_u_t;
-/*
- * Interrupt Destination Upper Address Register
- *
- * The interrupt destination upper address register is a read/write register
- * containing the upper 16-bits of address of the host to which the interrupt
- * is targeted. In addition the target ID is also contained in this register for
- * use in Crosstalk mode.
- */
-	typedef union pic_int_desc_upper_u {
-		picreg_t	pic_int_desc_upper_regval;
-		struct {
-			picreg_t           :	32; /* 63:32 */
-			picreg_t           :	12; /* 31:20 */
-			picreg_t target_id :	4; /* 19:16 */
-			picreg_t upp_addr  :	16; /* 15:0 */
-		} pic_int_desc_upper_fld_s;
-	} pic_int_desc_upper_u_t;
-/*
- * Interrupt Destination Lower Address Register
- *
- * The interrupt destination lower address register is a read/write register
- * which contains the entire address of the host to which the interrupt is tar-
- * geted. In addition the target ID is also contained in this register for use in
- * Crosstalk mode.
- */
-	typedef union pic_int_desc_lower_u {
-		picreg_t	pic_int_desc_lower_regval;
-		struct {
-			picreg_t           :	12; /* 63:52 */
-			picreg_t target_id :	4; /* 51:48 */
-			picreg_t upp_addr  :	16; /* 47:32 */
-			picreg_t low_addr  :	32; /* 31:0 */
-		} pic_int_desc_lower_fld_s;
-	} pic_int_desc_lower_u_t;
-/*
- * Command Word Holding Register Bus Side Errors
- *
- * The command word holding is a read register that holds the command
- * word of a Crosstalk packet when errors occur on the link side (see error
- * chapter). Errors are indicated with error bits in the interrupt status regis-
- * ter. Subsequent errors are not logged until the interrupt is cleared..
- */
-	typedef union pic_cmd_word_bus_err_u {
-		picreg_t	pic_cmd_word_bus_err_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t didn     :	4; /* 31:28 */
-			picreg_t sidn     :	4; /* 27:24 */
-			picreg_t pactyp   :	4; /* 23:20 */
-			picreg_t tnum     :	5; /* 19:15 */
-			picreg_t coherent :	1; /* 14:14 */
-			picreg_t ds       :	2; /* 13:12 */
-			picreg_t gbr      :	1; /* 11:11 */
-			picreg_t vbpm     :	1; /* 10:10 */
-			picreg_t error    :	1; /* 9:9 */
-			picreg_t barrier  :	1; /* 8:8 */
-			picreg_t          :	8; /* 7:0 */
-		} pic_cmd_word_bus_err_fld_s;
-	} pic_cmd_word_bus_err_u_t;
-/*
- * LLP Configuration Register
- *
- * This register contains the configuration information for the LLP modules
- * and is only valid on bus 0 side.
- */
-	typedef union pic_llp_cfg_u {
-		picreg_t	pic_llp_cfg_regval;
-		struct {
-			picreg_t                 :	32; /* 63:32 */
-			picreg_t                 :	6; /* 31:26 */
-			picreg_t llp_maxretry    :	10; /* 25:16 */
-			picreg_t llp_nulltimeout :	6; /* 15:10 */
-			picreg_t llp_maxburst    :	10; /* 9:0 */
-		} pic_llp_cfg_fld_s;
-	} pic_llp_cfg_u_t;
-/*
- * PCI/PCI-X Target Flush Register
- *
- * When read, this register will return a 0x00 after all previous transfers to
- * the PCI bus subsection have completed.
- */
+#define PIC_XTALK_ADDR_MASK			0x0000FFFFFFFFFFFF
 
-/*
- * Command Word Holding Register Link Side Errors
- *
- * The command word holding is a read-only register that holds the com-
- * mand word of a Crosstalk packet when request fifo overflow or unexpect-
- * ed response errors occur. Errors are indicated with error bits in the
- * interrupt status register. Subsequent errors are not logged until this inter-
- * rupt is cleared.
- */
-	typedef union pic_cmd_word_link_err_u {
-		picreg_t	pic_cmd_word_link_err_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t didn     :	4; /* 31:28 */
-			picreg_t sidn     :	4; /* 27:24 */
-			picreg_t pactyp   :	4; /* 23:20 */
-			picreg_t tnum     :	5; /* 19:15 */
-			picreg_t coherent :	1; /* 14:14 */
-			picreg_t ds       :	2; /* 13:12 */
-			picreg_t gbr      :	1; /* 11:11 */
-			picreg_t vbpm     :	1; /* 10:10 */
-			picreg_t error    :	1; /* 9:9 */
-			picreg_t barrier  :	1; /* 8:8 */
-			picreg_t          :	8; /* 7:0 */
-		} pic_cmd_word_link_err_fld_s;
-	} pic_cmd_word_link_err_u_t;
-/*
- * PCI Response Buffer Error Upper Address Holding Reg
- *
- * The response buffer error upper address holding register is a read only
- * register which contains the upper 16-bits of the address when error asso-
- * ciated with response buffer entries occur. Subsequent errors are not
- * logged until the interrupt is cleared.
- */
-	typedef union pic_pci_rbuf_err_upper_u {
-		picreg_t	pic_pci_rbuf_err_upper_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t          :	9; /* 31:23 */
-			picreg_t dev_num  :	3; /* 22:20 */
-			picreg_t buff_num :	4; /* 19:16 */
-			picreg_t upp_addr :	16; /* 15:0 */
-		} pic_pci_rbuf_err_upper_fld_s;
-	} pic_pci_rbuf_err_upper_u_t;
-/*
- * PCI Response Buffer Error Lower Address Holding Reg
- *
- * The response buffer error lower address holding register is a read only
- * register which contains the address of the error associated with response
- * buffer entries. Subsequent errors are not logged until the interrupt is
- * cleared.
- */
-	typedef union pic_pci_rbuf_err_lower_u {
-		picreg_t	pic_pci_rbuf_err_lower_regval;
-		struct {
-			picreg_t          :	9; /* 63:55 */
-			picreg_t dev_num  :	3; /* 54:52 */
-			picreg_t buff_num :	4; /* 51:48 */
-			picreg_t upp_addr :	16; /* 47:32 */
-			picreg_t low_addr :	32; /* 31:0 */
-		} pic_pci_rbuf_err_lower_fld_s;
-	} pic_pci_rbuf_err_lower_u_t;
-/*
- * Test Pin Control Register
- *
- * This register selects the output function and value to the four test pins on
- * the PIC .
- */
-	typedef union pic_test_pin_cntl_u {
-		picreg_t	pic_test_pin_cntl_regval;
-		struct {
-			picreg_t            :	32; /* 63:32 */
-			picreg_t            :	8; /* 31:24 */
-			picreg_t tdata_out  :	8; /* 23:16 */
-			picreg_t sel_tpin_7 :	2; /* 15:14 */
-			picreg_t sel_tpin_6 :	2; /* 13:12 */
-			picreg_t sel_tpin_5 :	2; /* 11:10 */
-			picreg_t sel_tpin_4 :	2; /* 9:8 */
-			picreg_t sel_tpin_3 :	2; /* 7:6 */
-			picreg_t sel_tpin_2 :	2; /* 5:4 */
-			picreg_t sel_tpin_1 :	2; /* 3:2 */
-			picreg_t sel_tpin_0 :	2; /* 1:0 */
-		} pic_test_pin_cntl_fld_s;
-	} pic_test_pin_cntl_u_t;
-/*
- * Address Holding Register Link Side Errors
- *
- * The address holding register is a read only register which contains the ad-
- * dress which either can be accessed as a word or double word. Subsequent
- * errors are not logged until the error is cleared. The last logged value is
- * held until the group is cleared and enabled.
- */
-	typedef union pic_p_addr_lkerr_u {
-		picreg_t	pic_p_addr_lkerr_regval;
-		struct {
-			picreg_t          :	16; /* 63:48 */
-			picreg_t upp_addr :	16; /* 47:32 */
-			picreg_t low_addr :	32; /* 31:0 */
-		} pic_p_addr_lkerr_fld_s;
-	} pic_p_addr_lkerr_u_t;
-/*
- * PCI Direct Mapping Register
- *
- * This register is used to relocate a 2 GByte region for PCI to Crosstalk
- * transfers.
- */
-	typedef union pic_p_dir_map_u {
-		picreg_t	pic_p_dir_map_regval;
-		struct {
-			picreg_t            :	32; /* 63:32 */
-			picreg_t            :	8; /* 31:24 */
-			picreg_t dir_w_id   :	4; /* 23:20 */
-			picreg_t            :	2; /* 19:18 */
-			picreg_t dir_add512 :	1; /* 17:17 */
-			picreg_t dir_off    :	17; /* 16:0 */
-		} pic_p_dir_map_fld_s;
-	} pic_p_dir_map_u_t;
-/*
- * PCI Page Map Fault Address Register
- *
- * This register contains the address and device number when a page map
- * fault occurred.
- */
-	typedef union pic_p_map_fault_u {
-		picreg_t	pic_p_map_fault_regval;
-		struct {
-			picreg_t             :	32; /* 63:32 */
-			picreg_t             :	10; /* 31:22 */
-			picreg_t pci_addr    :	18; /* 21:4 */
-			picreg_t             :	1; /* 3:3 */
-			picreg_t pci_dev_num :	3; /* 2:0 */
-		} pic_p_map_fault_fld_s;
-	} pic_p_map_fault_u_t;
-/*
- * Arbitration Register
- *
- * This register defines the priority and bus time out timing in PCI bus arbi-
- * tration.
- */
-	typedef union pic_p_arb_u {
-		picreg_t	pic_p_arb_regval;
-		struct {
-			picreg_t               :	32; /* 63:32 */
-			picreg_t               :	8; /* 31:24 */
-			picreg_t dev_broke     :	4; /* 23:20 */
-			picreg_t               :	2; /* 19:18 */
-			picreg_t req_wait_tick :	2; /* 17:16 */
-			picreg_t               :	4; /* 15:12 */
-			picreg_t req_wait_en   :	4; /* 11:8 */
-			picreg_t disarb        :	1; /* 7:7 */
-			picreg_t freeze_gnt    :	1; /* 6:6 */
-			picreg_t               :	1; /* 5:5 */
-			picreg_t en_bridge_hi  :	2; /* 4:3 */
-			picreg_t               :	1; /* 2:2 */
-			picreg_t en_bridge_lo  :	2; /* 1:0 */
-		} pic_p_arb_fld_s;
-	} pic_p_arb_u_t;
-/*
- * Internal Ram Parity Error Register
- *
- * This register logs information about parity errors on internal ram access.
- */
-	typedef union pic_p_ram_perr_u {
-		picreg_t	pic_p_ram_perr_regval;
-		struct {
-			picreg_t         	     :	6; /* 63:58 */
-			picreg_t ate_err_addr        :	10; /* 57:48 */
-			picreg_t         	     :	7; /* 47:41 */
-			picreg_t rd_resp_err_addr    :	9; /* 40:32 */
-			picreg_t wrt_resp_err_addr   :	8; /* 31:24 */
-			picreg_t         	     :	2; /* 23:22 */
-			picreg_t ate_err 	     :	1; /* 21:21 */
-			picreg_t rd_resp_err         :	1; /* 20:20 */
-			picreg_t wrt_resp_err        :	1; /* 19:19 */
-			picreg_t dbe_ate 	     :	3; /* 18:16 */
-			picreg_t dbe_rd  	     :	8; /* 15:8 */
-			picreg_t dbe_wrt 	     :	8; /* 7:0 */
-		} pic_p_ram_perr_fld_s;
-	} pic_p_ram_perr_u_t;
-/*
- * Time-out Register
- *
- * This register determines retry hold off and max retries allowed for PIO
- * accesses to PCI/PCI-X.
- */
-	typedef union pic_p_bus_timeout_u {
-		picreg_t	pic_p_bus_timeout_regval;
-		struct {
-			picreg_t               :	32; /* 63:32 */
-			picreg_t               :	11; /* 31:21 */
-			picreg_t pci_retry_hld :	5; /* 20:16 */
-			picreg_t               :	6; /* 15:10 */
-			picreg_t pci_retry_cnt :	10; /* 9:0 */
-		} pic_p_bus_timeout_fld_s;
-	} pic_p_bus_timeout_u_t;
-/*
- * PCI/PCI-X Type 1 Configuration Register
- *
- * This register is use during accesses to the PCI/PCI-X type 1 configuration
- * space. The bits in this register are used to supplement the address during
- * the configuration cycle to select the correct secondary bus and device.
- */
-	typedef union pic_type1_cfg_u {
-		picreg_t	pic_type1_cfg_regval;
-		struct {
-			picreg_t         :	32; /* 63:32 */
-			picreg_t         :	8; /* 31:24 */
-			picreg_t bus_num :	8; /* 23:16 */
-			picreg_t dev_num :	5; /* 15:11 */
-			picreg_t         :	11; /* 10:0 */
-		} pic_type1_cfg_fld_s;
-	} pic_type1_cfg_u_t;
-/*
- * PCI Bus Error Upper Address Holding Register
- *
- * This register holds the value of the upper address on the PCI Bus when an
- * error occurs.
- */
-	typedef union pic_p_pci_err_upper_u {
-		picreg_t	pic_p_pci_err_upper_regval;
-		struct {
-			picreg_t                :	32; /* 63:32 */
-			picreg_t                :	4; /* 31:28 */
-			picreg_t pci_xtalk_did  :	4; /* 27:24 */
-			picreg_t                :	2; /* 23:22 */
-			picreg_t pci_dac        :	1; /* 21:21 */
-			picreg_t pci_dev_master :	1; /* 20:20 */
-			picreg_t pci_vdev       :	1; /* 19:19 */
-			picreg_t pci_dev_num    :	3; /* 18:16 */
-			picreg_t pci_uaddr_err  :	16; /* 15:0 */
-		} pic_p_pci_err_upper_fld_s;
-	} pic_p_pci_err_upper_u_t;
-/*
- * PCI Bus Error Lower Address Holding Register
- *
- * This register holds the value of the lower address on the PCI Bus when an
- * error occurs.
- */
-	typedef union pic_p_pci_err_lower_u {
-		picreg_t	pic_p_pci_err_lower_regval;
-		struct {
-			picreg_t                :	4; /* 63:60 */
-			picreg_t pci_xtalk_did  :	4; /* 59:56 */
-			picreg_t                :	2; /* 55:54 */
-			picreg_t pci_dac        :	1; /* 53:53 */
-			picreg_t pci_dev_master :	1; /* 52:52 */
-			picreg_t pci_vdev       :	1; /* 51:51 */
-			picreg_t pci_dev_num    :	3; /* 50:48 */
-			picreg_t pci_uaddr_err  :	16; /* 47:32 */
-			picreg_t pci_laddr_err  :	32; /* 31:0 */
-		} pic_p_pci_err_lower_fld_s;
-	} pic_p_pci_err_lower_u_t;
-/*
- * PCI-X Error Address Register
- *
- * This register contains the address on the PCI-X bus when an error oc-
- * curred.
- */
-	typedef union pic_p_pcix_err_addr_u {
-		picreg_t	pic_p_pcix_err_addr_regval;
-		struct {
-			picreg_t pcix_err_addr :	64; /* 63:0 */
-		} pic_p_pcix_err_addr_fld_s;
-	} pic_p_pcix_err_addr_u_t;
-/*
- * PCI-X Error Attribute Register
- *
- * This register contains the attribute data on the PCI-X bus when an error
- * occurred.
- */
-	typedef union pic_p_pcix_err_attr_u {
-		picreg_t	pic_p_pcix_err_attr_regval;
-		struct {
-			picreg_t            :	16; /* 63:48 */
-			picreg_t bus_cmd    :	4; /* 47:44 */
-			picreg_t byte_cnt   :	12; /* 43:32 */
-			picreg_t            :	1; /* 31:31 */
-			picreg_t ns         :	1; /* 30:30 */
-			picreg_t ro         :	1; /* 29:29 */
-			picreg_t tag        :	5; /* 28:24 */
-			picreg_t bus_num    :	8; /* 23:16 */
-			picreg_t dev_num    :	5; /* 15:11 */
-			picreg_t fun_num    :	3; /* 10:8 */
-			picreg_t l_byte_cnt :	8; /* 7:0 */
-		} pic_p_pcix_err_attr_fld_s;
-	} pic_p_pcix_err_attr_u_t;
-/*
- * PCI-X Error Data Register
- *
- * This register contains the Data on the PCI-X bus when an error occurred.
- */
-	typedef union pic_p_pcix_err_data_u {
-		picreg_t	pic_p_pcix_err_data_regval;
-		struct {
-			picreg_t pcix_err_data :	64; /* 63:0 */
-		} pic_p_pcix_err_data_fld_s;
-	} pic_p_pcix_err_data_u_t;
-/*
- * PCI-X Read Request Timeout Error Register
- *
- * This register contains a pointer into the PCI-X read data structure.
- */
-	typedef union pic_p_pcix_read_req_to_u {
-		picreg_t	pic_p_pcix_read_req_to_regval;
-		struct {
-			picreg_t                :	55; /* 63:9 */
-			picreg_t rd_buff_loc    :	5; /* 8:4 */
-			picreg_t rd_buff_struct :	4; /* 3:0 */
-		} pic_p_pcix_read_req_to_fld_s;
-	} pic_p_pcix_read_req_to_u_t;
-/*
- * INT_STATUS Register
- *
- * This is the current interrupt status register which maintains the current
- * status of all the interrupting devices which generated a n interrupt. This
- * register is read only and all the bits are active high. A high bit at
- * INT_STATE means the corresponding INT_N pin has been asserted
- * (low).
- */
-	typedef union pic_p_int_status_u {
-		picreg_t	pic_p_int_status_regval;
-		struct {
-			picreg_t                  :	22; /* 63:42 */
-			picreg_t int_ram_perr     :	1; /* 41:41 */
-			picreg_t bus_arb_broke    :	1; /* 40:40 */
-			picreg_t pci_x_req_tout   :	1; /* 39:39 */
-			picreg_t pci_x_tabort     :	1; /* 38:38 */
-			picreg_t pci_x_perr       :	1; /* 37:37 */
-			picreg_t pci_x_serr       :	1; /* 36:36 */
-			picreg_t pci_x_mretry     :	1; /* 35:35 */
-			picreg_t pci_x_mtout      :	1; /* 34:34 */
-			picreg_t pci_x_da_parity  :	1; /* 33:33 */
-			picreg_t pci_x_ad_parity  :	1; /* 32:32 */
-			picreg_t                  :	1; /* 31:31 */
-			picreg_t pmu_page_fault   :	1; /* 30:30 */
-			picreg_t unexpected_resp  :	1; /* 29:29 */
-			picreg_t bad_xresp_packet :	1; /* 28:28 */
-			picreg_t bad_xreq_packet  :	1; /* 27:27 */
-			picreg_t resp_xtalk_error :	1; /* 26:26 */
-			picreg_t req_xtalk_error  :	1; /* 25:25 */
-			picreg_t invalid_access   :	1; /* 24:24 */
-			picreg_t unsupported_xop  :	1; /* 23:23 */
-			picreg_t xreq_fifo_oflow  :	1; /* 22:22 */
-			picreg_t llp_rec_snerror  :	1; /* 21:21 */
-			picreg_t llp_rec_cberror  :	1; /* 20:20 */
-			picreg_t llp_rcty         :	1; /* 19:19 */
-			picreg_t llp_tx_retry     :	1; /* 18:18 */
-			picreg_t llp_tcty         :	1; /* 17:17 */
-			picreg_t                  :	1; /* 16:16 */
-			picreg_t pci_abort        :	1; /* 15:15 */
-			picreg_t pci_parity       :	1; /* 14:14 */
-			picreg_t pci_serr         :	1; /* 13:13 */
-			picreg_t pci_perr         :	1; /* 12:12 */
-			picreg_t pci_master_tout  :	1; /* 11:11 */
-			picreg_t pci_retry_cnt    :	1; /* 10:10 */
-			picreg_t xread_req_tout   :	1; /* 9:9 */
-			picreg_t                  :	1; /* 8:8 */
-			picreg_t int_state        :	8; /* 7:0 */
-		} pic_p_int_status_fld_s;
-	} pic_p_int_status_u_t;
-/*
- * Interrupt Enable Register
- *
- * This register enables the reporting of interrupt to the host. Each bit in this
- * register corresponds to the same bit in Interrupt Status register. All bits
- * are zero after reset.
- */
-	typedef union pic_p_int_enable_u {
-		picreg_t	pic_p_int_enable_regval;
-		struct {
-			picreg_t                     :	22; /* 63:42 */
-			picreg_t en_int_ram_perr     :	1; /* 41:41 */
-			picreg_t en_bus_arb_broke    :	1; /* 40:40 */
-			picreg_t en_pci_x_req_tout   :	1; /* 39:39 */
-			picreg_t en_pci_x_tabort     :	1; /* 38:38 */
-			picreg_t en_pci_x_perr       :	1; /* 37:37 */
-			picreg_t en_pci_x_serr       :	1; /* 36:36 */
-			picreg_t en_pci_x_mretry     :	1; /* 35:35 */
-			picreg_t en_pci_x_mtout      :	1; /* 34:34 */
-			picreg_t en_pci_x_da_parity  :	1; /* 33:33 */
-			picreg_t en_pci_x_ad_parity  :	1; /* 32:32 */
-			picreg_t                     :	1; /* 31:31 */
-			picreg_t en_pmu_page_fault   :	1; /* 30:30 */
-			picreg_t en_unexpected_resp  :	1; /* 29:29 */
-			picreg_t en_bad_xresp_packet :	1; /* 28:28 */
-			picreg_t en_bad_xreq_packet  :	1; /* 27:27 */
-			picreg_t en_resp_xtalk_error :	1; /* 26:26 */
-			picreg_t en_req_xtalk_error  :	1; /* 25:25 */
-			picreg_t en_invalid_access   :	1; /* 24:24 */
-			picreg_t en_unsupported_xop  :	1; /* 23:23 */
-			picreg_t en_xreq_fifo_oflow  :	1; /* 22:22 */
-			picreg_t en_llp_rec_snerror  :	1; /* 21:21 */
-			picreg_t en_llp_rec_cberror  :	1; /* 20:20 */
-			picreg_t en_llp_rcty         :	1; /* 19:19 */
-			picreg_t en_llp_tx_retry     :	1; /* 18:18 */
-			picreg_t en_llp_tcty         :	1; /* 17:17 */
-			picreg_t                     :	1; /* 16:16 */
-			picreg_t en_pci_abort        :	1; /* 15:15 */
-			picreg_t en_pci_parity       :	1; /* 14:14 */
-			picreg_t en_pci_serr         :	1; /* 13:13 */
-			picreg_t en_pci_perr         :	1; /* 12:12 */
-			picreg_t en_pci_master_tout  :	1; /* 11:11 */
-			picreg_t en_pci_retry_cnt    :	1; /* 10:10 */
-			picreg_t en_xread_req_tout   :	1; /* 9:9 */
-			picreg_t                     :	1; /* 8:8 */
-			picreg_t en_int_state        :	8; /* 7:0 */
-		} pic_p_int_enable_fld_s;
-	} pic_p_int_enable_u_t;
-/*
- * Reset Interrupt Register
- *
- * A write of a "1" clears the bit and rearms the error registers. Writes also
- * clear the error view register.
- */
-	typedef union pic_p_int_rst_u {
-		picreg_t	pic_p_int_rst_regval;
-		struct {
-			picreg_t                       :	22; /* 63:42 */
-			picreg_t logv_int_ram_perr     :	1; /* 41:41 */
-			picreg_t logv_bus_arb_broke    :	1; /* 40:40 */
-			picreg_t logv_pci_x_req_tout   :	1; /* 39:39 */
-			picreg_t logv_pci_x_tabort     :	1; /* 38:38 */
-			picreg_t logv_pci_x_perr       :	1; /* 37:37 */
-			picreg_t logv_pci_x_serr       :	1; /* 36:36 */
-			picreg_t logv_pci_x_mretry     :	1; /* 35:35 */
-			picreg_t logv_pci_x_mtout      :	1; /* 34:34 */
-			picreg_t logv_pci_x_da_parity  :	1; /* 33:33 */
-			picreg_t logv_pci_x_ad_parity  :	1; /* 32:32 */
-			picreg_t                       :	1; /* 31:31 */
-			picreg_t logv_pmu_page_fault   :	1; /* 30:30 */
-			picreg_t logv_unexpected_resp  :	1; /* 29:29 */
-			picreg_t logv_bad_xresp_packet :	1; /* 28:28 */
-			picreg_t logv_bad_xreq_packet  :	1; /* 27:27 */
-			picreg_t logv_resp_xtalk_error :	1; /* 26:26 */
-			picreg_t logv_req_xtalk_error  :	1; /* 25:25 */
-			picreg_t logv_invalid_access   :	1; /* 24:24 */
-			picreg_t logv_unsupported_xop  :	1; /* 23:23 */
-			picreg_t logv_xreq_fifo_oflow  :	1; /* 22:22 */
-			picreg_t logv_llp_rec_snerror  :	1; /* 21:21 */
-			picreg_t logv_llp_rec_cberror  :	1; /* 20:20 */
-			picreg_t logv_llp_rcty         :	1; /* 19:19 */
-			picreg_t logv_llp_tx_retry     :	1; /* 18:18 */
-			picreg_t logv_llp_tcty         :	1; /* 17:17 */
-			picreg_t                       :	1; /* 16:16 */
-			picreg_t logv_pci_abort        :	1; /* 15:15 */
-			picreg_t logv_pci_parity       :	1; /* 14:14 */
-			picreg_t logv_pci_serr         :	1; /* 13:13 */
-			picreg_t logv_pci_perr         :	1; /* 12:12 */
-			picreg_t logv_pci_master_tout  :	1; /* 11:11 */
-			picreg_t logv_pci_retry_cnt    :	1; /* 10:10 */
-			picreg_t logv_xread_req_tout   :	1; /* 9:9 */
-                        picreg_t                       :        2; /* 8:7 */
-			picreg_t multi_clr             :	1; /* 6:6 */
-			picreg_t                       :	6; /* 5:0 */
-		} pic_p_int_rst_fld_s;
-	} pic_p_int_rst_u_t;
-
-/*
- * Interrupt Mode Register
- *
- * This register defines the interrupting mode of the INT_N pins.
- */
-	typedef union pic_p_int_mode_u {
-		picreg_t	pic_p_int_mode_regval;
-		struct {
-			picreg_t            :	32; /* 63:32 */
-			picreg_t            :	24; /* 31:8 */
-			picreg_t en_clr_pkt :	8; /* 7:0 */
-		} pic_p_int_mode_fld_s;
-	} pic_p_int_mode_u_t;
-/*
- * Interrupt Device Select Register
- *
- * This register associates interrupt pins with devices thus allowing buffer
- * management (flushing) when a device interrupt occurs.
- */
-	typedef union pic_p_int_device_u {
-		picreg_t	pic_p_int_device_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t          :	8; /* 31:24 */
-			picreg_t int7_dev :	3; /* 23:21 */
-			picreg_t int6_dev :	3; /* 20:18 */
-			picreg_t int5_dev :	3; /* 17:15 */
-			picreg_t int4_dev :	3; /* 14:12 */
-			picreg_t int3_dev :	3; /* 11:9 */
-			picreg_t int2_dev :	3; /* 8:6 */
-			picreg_t int1_dev :	3; /* 5:3 */
-			picreg_t int0_dev :	3; /* 2:0 */
-		} pic_p_int_device_fld_s;
-	} pic_p_int_device_u_t;
-/*
- * Host Error Interrupt Field Register
- *
- * This register tells which bit location in the host's Interrupt Status register
- * to set or reset when any error condition happens.
- */
-	typedef union pic_p_int_host_err_u {
-		picreg_t	pic_p_int_host_err_regval;
-		struct {
-			picreg_t                :	32; /* 63:32 */
-			picreg_t                :	24; /* 31:8 */
-			picreg_t bridge_err_fld :	8; /* 7:0 */
-		} pic_p_int_host_err_fld_s;
-	} pic_p_int_host_err_u_t;
-/*
- * Interrupt (x) Host Address Register
- *
- * This register allow different host address to be assigned to each interrupt
- * pin and the bit in the host.
- */
-	typedef union pic_p_int_addr_u {
-		picreg_t	pic_p_int_addr_regval;
-		struct {
-			picreg_t          :	8; /* 63:56 */
-			picreg_t int_fld  :	8; /* 55:48 */
-			picreg_t int_addr :	48; /* 47:0 */
-		} pic_p_int_addr_fld_s;
-	} pic_p_int_addr_u_t;
-/*
- * Error Interrupt View Register
- *
- * This register contains the view of which interrupt occur even if they are
- * not currently enabled. The group clear is used to clear these bits just like
- * the interrupt status register bits.
- */
-	typedef union pic_p_err_int_view_u {
-		picreg_t	pic_p_err_int_view_regval;
-		struct {
-			picreg_t                  :	22; /* 63:42 */
-			picreg_t int_ram_perr     :	1; /* 41:41 */
-			picreg_t bus_arb_broke    :	1; /* 40:40 */
-			picreg_t pci_x_req_tout   :	1; /* 39:39 */
-			picreg_t pci_x_tabort     :	1; /* 38:38 */
-			picreg_t pci_x_perr       :	1; /* 37:37 */
-			picreg_t pci_x_serr       :	1; /* 36:36 */
-			picreg_t pci_x_mretry     :	1; /* 35:35 */
-			picreg_t pci_x_mtout      :	1; /* 34:34 */
-			picreg_t pci_x_da_parity  :	1; /* 33:33 */
-			picreg_t pci_x_ad_parity  :	1; /* 32:32 */
-			picreg_t                  :	1; /* 31:31 */
-			picreg_t pmu_page_fault   :	1; /* 30:30 */
-			picreg_t unexpected_resp  :	1; /* 29:29 */
-			picreg_t bad_xresp_packet :	1; /* 28:28 */
-			picreg_t bad_xreq_packet  :	1; /* 27:27 */
-			picreg_t resp_xtalk_error :	1; /* 26:26 */
-			picreg_t req_xtalk_error  :	1; /* 25:25 */
-			picreg_t invalid_access   :	1; /* 24:24 */
-			picreg_t unsupported_xop  :	1; /* 23:23 */
-			picreg_t xreq_fifo_oflow  :	1; /* 22:22 */
-			picreg_t llp_rec_snerror  :	1; /* 21:21 */
-			picreg_t llp_rec_cberror  :	1; /* 20:20 */
-			picreg_t llp_rcty         :	1; /* 19:19 */
-			picreg_t llp_tx_retry     :	1; /* 18:18 */
-			picreg_t llp_tcty         :	1; /* 17:17 */
-			picreg_t                  :	1; /* 16:16 */
-			picreg_t pci_abort        :	1; /* 15:15 */
-			picreg_t pci_parity       :	1; /* 14:14 */
-			picreg_t pci_serr         :	1; /* 13:13 */
-			picreg_t pci_perr         :	1; /* 12:12 */
-			picreg_t pci_master_tout  :	1; /* 11:11 */
-			picreg_t pci_retry_cnt    :	1; /* 10:10 */
-			picreg_t xread_req_tout   :	1; /* 9:9 */
-			picreg_t                  :	9; /* 8:0 */
-		} pic_p_err_int_view_fld_s;
-	} pic_p_err_int_view_u_t;
+#define PIC_INTERNAL_ATES			1024 
+#define PIC_WR_REQ_BUFSIZE			256
 
 
-/*
- * Multiple Interrupt Register
- *
- * This register indicates if any interrupt occurs more than once without be-
- * ing cleared.
- */
-	typedef union pic_p_mult_int_u {
-		picreg_t	pic_p_mult_int_regval;
-		struct {
-			picreg_t                  :	22; /* 63:42 */
-			picreg_t int_ram_perr     :	1; /* 41:41 */
-			picreg_t bus_arb_broke    :	1; /* 40:40 */
-			picreg_t pci_x_req_tout   :	1; /* 39:39 */
-			picreg_t pci_x_tabort     :	1; /* 38:38 */
-			picreg_t pci_x_perr       :	1; /* 37:37 */
-			picreg_t pci_x_serr       :	1; /* 36:36 */
-			picreg_t pci_x_mretry     :	1; /* 35:35 */
-			picreg_t pci_x_mtout      :	1; /* 34:34 */
-			picreg_t pci_x_da_parity  :	1; /* 33:33 */
-			picreg_t pci_x_ad_parity  :	1; /* 32:32 */
-			picreg_t                  :	1; /* 31:31 */
-			picreg_t pmu_page_fault   :	1; /* 30:30 */
-			picreg_t unexpected_resp  :	1; /* 29:29 */
-			picreg_t bad_xresp_packet :	1; /* 28:28 */
-			picreg_t bad_xreq_packet  :	1; /* 27:27 */
-			picreg_t resp_xtalk_error :	1; /* 26:26 */
-			picreg_t req_xtalk_error  :	1; /* 25:25 */
-			picreg_t invalid_access   :	1; /* 24:24 */
-			picreg_t unsupported_xop  :	1; /* 23:23 */
-			picreg_t xreq_fifo_oflow  :	1; /* 22:22 */
-			picreg_t llp_rec_snerror  :	1; /* 21:21 */
-			picreg_t llp_rec_cberror  :	1; /* 20:20 */
-			picreg_t llp_rcty         :	1; /* 19:19 */
-			picreg_t llp_tx_retry     :	1; /* 18:18 */
-			picreg_t llp_tcty         :	1; /* 17:17 */
-			picreg_t                  :	1; /* 16:16 */
-			picreg_t pci_abort        :	1; /* 15:15 */
-			picreg_t pci_parity       :	1; /* 14:14 */
-			picreg_t pci_serr         :	1; /* 13:13 */
-			picreg_t pci_perr         :	1; /* 12:12 */
-			picreg_t pci_master_tout  :	1; /* 11:11 */
-			picreg_t pci_retry_cnt    :	1; /* 10:10 */
-			picreg_t xread_req_tout   :	1; /* 9:9 */
-			picreg_t                  :	1; /* 8:8 */
-			picreg_t int_state        :	8; /* 7:0 */
-		} pic_p_mult_int_fld_s;
-	} pic_p_mult_int_u_t;
-/*
- * Force Always Interrupt (x) Register
- *
- * A write to this data independent write only register will force a set inter-
- * rupt to occur as if the interrupt line had transitioned. If the interrupt line
- * is already active an addition set interrupt packet is set. All buffer flush op-
- * erations also occur on this operation.
- */
-
-
-/*
- * Force Interrupt (x) Register
- *
- * A write to this data independent write only register in conjunction with
- * the assertion of the corresponding interrupt line will force a set interrupt
- * to occur as if the interrupt line had transitioned. The interrupt line must
- * be active for this operation to generate a set packet, otherwise the write
- * PIO is ignored. All buffer flush operations also occur when the set packet
- * is sent on this operation.
- */
-
-
-/*
- * Device Registers
- *
- * The Device registers contain device specific and mapping information.
- */
-	typedef union pic_device_reg_u {
-		picreg_t	pic_device_reg_regval;
-		struct {
-			picreg_t               :	32; /* 63:32 */
-			picreg_t               :	2; /* 31:30 */
-			picreg_t en_virtual1   :	1; /* 29:29 */
-			picreg_t en_error_lock :	1; /* 28:28 */
-			picreg_t en_page_chk   :	1; /* 27:27 */
-			picreg_t force_pci_par :	1; /* 26:26 */
-			picreg_t en_virtual0   :	1; /* 25:25 */
-			picreg_t               :	1; /* 24:24 */
-			picreg_t dir_wrt_gen   :	1; /* 23:23 */
-			picreg_t dev_size      :	1; /* 22:22 */
-			picreg_t real_time     :	1; /* 21:21 */
-			picreg_t               :	1; /* 20:20 */
-			picreg_t swap_direct   :	1; /* 19:19 */
-			picreg_t prefetch      :	1; /* 18:18 */
-			picreg_t precise       :	1; /* 17:17 */
-			picreg_t coherent      :	1; /* 16:16 */
-			picreg_t barrier       :	1; /* 15:15 */
-			picreg_t gbr           :	1; /* 14:14 */
-			picreg_t dev_swap      :	1; /* 13:13 */
-			picreg_t dev_io_mem    :	1; /* 12:12 */
-			picreg_t dev_off       :	12; /* 11:0 */
-		} pic_device_reg_fld_s;
-	} pic_device_reg_u_t;
-/*
- * Device (x) Write Request Buffer Flush
- *
- * When read, this register will return a 0x00 after the write buffer associat-
- * ed with the device has been flushed. (PCI Only)
- */
-
-
-/*
- * Even Device Read Response Buffer Register (PCI Only)
- *
- * This register is use to allocate the read response buffers for the even num-
- * bered devices. (0,2)
- */
-	typedef union pic_p_even_resp_u {
-		picreg_t	pic_p_even_resp_regval;
-		struct {
-			picreg_t              :	32; /* 63:32 */
-			picreg_t buff_14_en   :	1; /* 31:31 */
-			picreg_t buff_14_vdev :	2; /* 30:29 */
-			picreg_t buff_14_pdev :	1; /* 28:28 */
-			picreg_t buff_12_en   :	1; /* 27:27 */
-			picreg_t buff_12_vdev :	2; /* 26:25 */
-			picreg_t buff_12_pdev :	1; /* 24:24 */
-			picreg_t buff_10_en   :	1; /* 23:23 */
-			picreg_t buff_10_vdev :	2; /* 22:21 */
-			picreg_t buff_10_pdev :	1; /* 20:20 */
-			picreg_t buff_8_en    :	1; /* 19:19 */
-			picreg_t buff_8_vdev  :	2; /* 18:17 */
-			picreg_t buff_8_pdev  :	1; /* 16:16 */
-			picreg_t buff_6_en    :	1; /* 15:15 */
-			picreg_t buff_6_vdev  :	2; /* 14:13 */
-			picreg_t buff_6_pdev  :	1; /* 12:12 */
-			picreg_t buff_4_en    :	1; /* 11:11 */
-			picreg_t buff_4_vdev  :	2; /* 10:9 */
-			picreg_t buff_4_pdev  :	1; /* 8:8 */
-			picreg_t buff_2_en    :	1; /* 7:7 */
-			picreg_t buff_2_vdev  :	2; /* 6:5 */
-			picreg_t buff_2_pdev  :	1; /* 4:4 */
-			picreg_t buff_0_en    :	1; /* 3:3 */
-			picreg_t buff_0_vdev  :	2; /* 2:1 */
-			picreg_t buff_0_pdev  :	1; /* 0:0 */
-		} pic_p_even_resp_fld_s;
-	} pic_p_even_resp_u_t;
-/*
- * Odd Device Read Response Buffer Register (PCI Only)
- *
- * This register is use to allocate the read response buffers for the odd num-
- * bered devices. (1,3))
- */
-	typedef union pic_p_odd_resp_u {
-		picreg_t	pic_p_odd_resp_regval;
-		struct {
-			picreg_t              :	32; /* 63:32 */
-			picreg_t buff_15_en   :	1; /* 31:31 */
-			picreg_t buff_15_vdev :	2; /* 30:29 */
-			picreg_t buff_15_pdev :	1; /* 28:28 */
-			picreg_t buff_13_en   :	1; /* 27:27 */
-			picreg_t buff_13_vdev :	2; /* 26:25 */
-			picreg_t buff_13_pdev :	1; /* 24:24 */
-			picreg_t buff_11_en   :	1; /* 23:23 */
-			picreg_t buff_11_vdev :	2; /* 22:21 */
-			picreg_t buff_11_pdev :	1; /* 20:20 */
-			picreg_t buff_9_en    :	1; /* 19:19 */
-			picreg_t buff_9_vdev  :	2; /* 18:17 */
-			picreg_t buff_9_pdev  :	1; /* 16:16 */
-			picreg_t buff_7_en    :	1; /* 15:15 */
-			picreg_t buff_7_vdev  :	2; /* 14:13 */
-			picreg_t buff_7_pdev  :	1; /* 12:12 */
-			picreg_t buff_5_en    :	1; /* 11:11 */
-			picreg_t buff_5_vdev  :	2; /* 10:9 */
-			picreg_t buff_5_pdev  :	1; /* 8:8 */
-			picreg_t buff_3_en    :	1; /* 7:7 */
-			picreg_t buff_3_vdev  :	2; /* 6:5 */
-			picreg_t buff_3_pdev  :	1; /* 4:4 */
-			picreg_t buff_1_en    :	1; /* 3:3 */
-			picreg_t buff_1_vdev  :	2; /* 2:1 */
-			picreg_t buff_1_pdev  :	1; /* 0:0 */
-		} pic_p_odd_resp_fld_s;
-	} pic_p_odd_resp_u_t;
-/*
- * Read Response Buffer Status Register (PCI Only)
- *
- * This read only register contains the current response buffer status.
- */
-	typedef union pic_p_resp_status_u {
-		picreg_t	pic_p_resp_status_regval;
-		struct {
-			picreg_t           :	32; /* 63:32 */
-			picreg_t rrb_valid :	16; /* 31:16 */
-			picreg_t rrb_inuse :	16; /* 15:0 */
-		} pic_p_resp_status_fld_s;
-	} pic_p_resp_status_u_t;
-/*
- * Read Response Buffer Clear Register (PCI Only)
- *
- * A write to this register clears the current contents of the buffer.
- */
-	typedef union pic_p_resp_clear_u {
-		picreg_t	pic_p_resp_clear_regval;
-		struct {
-			picreg_t           :	32; /* 63:32 */
-			picreg_t           :	16; /* 31:16 */
-			picreg_t rrb_clear :	16; /* 15:0 */
-		} pic_p_resp_clear_fld_s;
-	} pic_p_resp_clear_u_t;
-/*
- * PCI Read Response Buffer (x) Upper Address Match
- *
- * The PCI Bridge read response buffer upper address register is a read only
- * register which contains the upper 16-bits of the address and status used to
- * select the buffer for a PCI transaction.
- */
-	typedef union pic_p_buf_upper_addr_match_u {
-		picreg_t	pic_p_buf_upper_addr_match_regval;
-		struct {
-			picreg_t          :	32; /* 63:32 */
-			picreg_t filled   :	1; /* 31:31 */
-			picreg_t armed    :	1; /* 30:30 */
-			picreg_t flush    :	1; /* 29:29 */
-			picreg_t xerr     :	1; /* 28:28 */
-			picreg_t pkterr   :	1; /* 27:27 */
-			picreg_t timeout  :	1; /* 26:26 */
-			picreg_t prefetch :	1; /* 25:25 */
-			picreg_t precise  :	1; /* 24:24 */
-			picreg_t dw_be    :	8; /* 23:16 */
-			picreg_t upp_addr :	16; /* 15:0 */
-		} pic_p_buf_upper_addr_match_fld_s;
-	} pic_p_buf_upper_addr_match_u_t;
-/*
- * PCI Read Response Buffer (x) Lower Address Match
- *
- * The PCI Bridge read response buffer lower address Match register is a
- * read only register which contains the address and status used to select the
- * buffer for a PCI transaction.
- */
-	typedef union pic_p_buf_lower_addr_match_u {
-		picreg_t	pic_p_buf_lower_addr_match_regval;
-		struct {
-			picreg_t filled   :	1; /* 63:63 */
-			picreg_t armed    :	1; /* 62:62 */
-			picreg_t flush    :	1; /* 61:61 */
-			picreg_t xerr     :	1; /* 60:60 */
-			picreg_t pkterr   :	1; /* 59:59 */
-			picreg_t timeout  :	1; /* 58:58 */
-			picreg_t prefetch :	1; /* 57:57 */
-			picreg_t precise  :	1; /* 56:56 */
-			picreg_t dw_be    :	8; /* 55:48 */
-			picreg_t upp_addr :	16; /* 47:32 */
-			picreg_t low_addr :	32; /* 31:0 */
-		} pic_p_buf_lower_addr_match_fld_s;
-	} pic_p_buf_lower_addr_match_u_t;
-/*
- * PCI Buffer (x) Flush Count with Data Touch Register
- *
- * This counter is incremented each time the corresponding response buffer
- * is flushed after at least a single data element in the buffer is used. A word
- * write to this address clears the count.
- */
-	typedef union pic_flush_w_touch_u {
-		picreg_t	pic_flush_w_touch_regval;
-		struct {
-			picreg_t           :	32; /* 63:32 */
-			picreg_t           :	16; /* 31:16 */
-			picreg_t touch_cnt :	16; /* 15:0 */
-		} pic_flush_w_touch_fld_s;
-	} pic_flush_w_touch_u_t;
-/*
- * PCI Buffer (x) Flush Count w/o Data Touch Register
- *
- * This counter is incremented each time the corresponding response buffer
- * is flushed without any data element in the buffer being used. A word
- * write to this address clears the count.
- */
-	typedef union pic_flush_wo_touch_u {
-		picreg_t	pic_flush_wo_touch_regval;
-		struct {
-			picreg_t             :	32; /* 63:32 */
-			picreg_t             :	16; /* 31:16 */
-			picreg_t notouch_cnt :	16; /* 15:0 */
-		} pic_flush_wo_touch_fld_s;
-	} pic_flush_wo_touch_u_t;
-/*
- * PCI Buffer (x) Request in Flight Count Register
- *
- * This counter is incremented on each bus clock while the request is in-
- * flight. A word write to this address clears the count. ]
- */
-	typedef union pic_inflight_u {
-		picreg_t	pic_inflight_regval;
-		struct {
-			picreg_t              :	32; /* 63:32 */
-			picreg_t              :	16; /* 31:16 */
-			picreg_t inflight_cnt :	16; /* 15:0 */
-		} pic_inflight_fld_s;
-	} pic_inflight_u_t;
-/*
- * PCI Buffer (x) Prefetch Request Count Register
- *
- * This counter is incremented each time the request using this buffer was
- * generated from the prefetcher. A word write to this address clears the
- * count.
- */
-	typedef union pic_prefetch_u {
-		picreg_t	pic_prefetch_regval;
-		struct {
-			picreg_t              :	32; /* 63:32 */
-			picreg_t              :	16; /* 31:16 */
-			picreg_t prefetch_cnt :	16; /* 15:0 */
-		} pic_prefetch_fld_s;
-	} pic_prefetch_u_t;
-/*
- * PCI Buffer (x) Total PCI Retry Count Register
- *
- * This counter is incremented each time a PCI bus retry occurs and the ad-
- * dress matches the tag for the selected buffer. The buffer must also has this
- * request in-flight. A word write to this address clears the count.
- */
-	typedef union pic_total_pci_retry_u {
-		picreg_t	pic_total_pci_retry_regval;
-		struct {
-			picreg_t           :	32; /* 63:32 */
-			picreg_t           :	16; /* 31:16 */
-			picreg_t retry_cnt :	16; /* 15:0 */
-		} pic_total_pci_retry_fld_s;
-	} pic_total_pci_retry_u_t;
-/*
- * PCI Buffer (x) Max PCI Retry Count Register
- *
- * This counter is contains the maximum retry count for a single request
- * which was in-flight for this buffer. A word write to this address clears the
- * count.
- */
-	typedef union pic_max_pci_retry_u {
-		picreg_t	pic_max_pci_retry_regval;
-		struct {
-			picreg_t               :	32; /* 63:32 */
-			picreg_t               :	16; /* 31:16 */
-			picreg_t max_retry_cnt :	16; /* 15:0 */
-		} pic_max_pci_retry_fld_s;
-	} pic_max_pci_retry_u_t;
-/*
- * PCI Buffer (x) Max Latency Count Register
- *
- * This counter is contains the maximum count (in bus clocks) for a single
- * request which was in-flight for this buffer. A word write to this address
- * clears the count.
- */
-	typedef union pic_max_latency_u {
-		picreg_t	pic_max_latency_regval;
-		struct {
-			picreg_t                 :	32; /* 63:32 */
-			picreg_t                 :	16; /* 31:16 */
-			picreg_t max_latency_cnt :	16; /* 15:0 */
-		} pic_max_latency_fld_s;
-	} pic_max_latency_u_t;
-/*
- * PCI Buffer (x) Clear All Register
- *
- * Any access to this register clears all the count values for the (x) registers.
- */
-
-
-/*
- * PCI-X Registers
- *
- * This register contains the address in the read buffer structure. There are
- * 16 read buffer structures.
- */
-	typedef union pic_rd_buf_addr_u {
-		picreg_t	pic_rd_buf_addr_regval;
-		struct {
-			picreg_t pcix_err_addr :	64; /* 63:0 */
-		} pic_rd_buf_addr_fld_s;
-	} pic_rd_buf_addr_u_t;
-/*
- * PCI-X Read Buffer (x) Attribute Register
- *
- * This register contains the attribute data in the read buffer structure. There
- * are  16 read buffer structures.
- */
-	typedef union pic_px_read_buf_attr_u {
-		picreg_t	pic_px_read_buf_attr_regval;
-		struct {
-			picreg_t                :	16; /* 63:48 */
-			picreg_t bus_cmd        :	4; /* 47:44 */
-			picreg_t byte_cnt       :	12; /* 43:32 */
-			picreg_t entry_valid    :	1; /* 31:31 */
-			picreg_t ns             :	1; /* 30:30 */
-			picreg_t ro             :	1; /* 29:29 */
-			picreg_t tag            :	5; /* 28:24 */
-			picreg_t bus_num        :	8; /* 23:16 */
-			picreg_t dev_num        :	5; /* 15:11 */
-			picreg_t fun_num        :	3; /* 10:8 */
-			picreg_t                :	2; /* 7:6 */
-			picreg_t f_buffer_index :	6; /* 5:0 */
-		} pic_px_read_buf_attr_fld_s;
-	} pic_px_read_buf_attr_u_t;
-/*
- * PCI-X Write Buffer (x) Address Register
- *
- * This register contains the address in the write buffer structure. There are
- * 8 write buffer structures.
- */
-	typedef union pic_px_write_buf_addr_u {
-		picreg_t	pic_px_write_buf_addr_regval;
-		struct {
-			picreg_t pcix_err_addr :	64; /* 63:0 */
-		} pic_px_write_buf_addr_fld_s;
-	} pic_px_write_buf_addr_u_t;
-/*
- * PCI-X Write Buffer (x) Attribute Register
- *
- * This register contains the attribute data in the write buffer structure.
- * There are 8 write buffer structures.
- */
-	typedef union pic_px_write_buf_attr_u {
-		picreg_t	pic_px_write_buf_attr_regval;
-		struct {
-			picreg_t                :	16; /* 63:48 */
-			picreg_t bus_cmd        :	4; /* 47:44 */
-			picreg_t byte_cnt       :	12; /* 43:32 */
-			picreg_t entry_valid    :	1; /* 31:31 */
-			picreg_t ns             :	1; /* 30:30 */
-			picreg_t ro             :	1; /* 29:29 */
-			picreg_t tag            :	5; /* 28:24 */
-			picreg_t bus_num        :	8; /* 23:16 */
-			picreg_t dev_num        :	5; /* 15:11 */
-			picreg_t fun_num        :	3; /* 10:8 */
-			picreg_t                :	2; /* 7:6 */
-			picreg_t f_buffer_index :	6; /* 5:0 */
-		} pic_px_write_buf_attr_fld_s;
-	} pic_px_write_buf_attr_u_t;
-/*
- * PCI-X Write Buffer (x) Valid Register
- *
- * This register contains the valid or inuse cache lines for this buffer struc-
- * ture.
- */
-	typedef union pic_px_write_buf_valid_u {
-		picreg_t	pic_px_write_buf_valid_regval;
-		struct {
-			picreg_t                :	32; /* 63:32 */
-			picreg_t wrt_valid_buff :	32; /* 31:0 */
-		} pic_px_write_buf_valid_fld_s;
-	} pic_px_write_buf_valid_u_t;
-
 #endif				/* __ASSEMBLY__ */
-#endif                          /* _ASM_SN_PCI_PIC_H */
+#endif                          /* _ASM_IA64_SN_PCI_PIC_H */
diff -Nru a/include/asm-ia64/sn/pci/tiocp.h b/include/asm-ia64/sn/pci/tiocp.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/pci/tiocp.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,588 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_TIOCP_H
+#define _ASM_IA64_SN_PCI_TIOCP_H
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <asm/sn/xtalk/xwidget.h>	/* generic widget header */
+#include <asm/sn/xtalk/corelet.h>	/* generic corelet header */
+#else
+#include <linux/config.h>
+#include <xtalk/xwidget.h>
+#include <xtalk/corelet.h>
+#endif
+
+
+/*****************************************************************************
+ ************************** TIOCP PART & REV DEFINES *************************
+ *****************************************************************************/
+
+#define TIOCP_PART_NUM_CP0       0xE000
+#define TIOCP_PART_NUM_CP1       0xE010
+#define TIOCP_MFGR_NUM           0x24
+#define TIOCP_REV_A  0x1
+#define TIOCP_REV_B  0x2
+
+#define IS_TIOCP_CP0(wid) (XWIDGET_PART_NUM(wid)==TIOCP_PART_NUM_CP0 && \
+			XWIDGET_MFG_NUM(wid) == TIOCP_MFGR_NUM)
+#define IS_TIOCP_CP1(wid) (XWIDGET_PART_NUM(wid)==TIOCP_PART_NUM_CP1 && \
+			XWIDGET_MFG_NUM(wid) == TIOCP_MFGR_NUM)
+#define IS_TIOCP_BRIDGE(wid) (IS_TIOCP_CP0(wid) || IS_TIOCP_CP1(wid))
+
+
+#define IS_TIOCP_PART_REV_A(rev) \
+	((rev == (TIOCP_PART_NUM_CP0 << 4 | TIOCP_REV_A)) || \
+	 (rev == (TIOCP_PART_NUM_CP1 << 4 | TIOCP_REV_A)))
+#define IS_TIOCP_PART_REV_B(rev) \
+	((rev == (TIOCP_PART_NUM_CP0 << 4 | TIOCP_REV_B)) || \
+	 (rev == (TIOCP_PART_NUM_CP1 << 4 | TIOCP_REV_B)))
+
+
+/*****************************************************************************
+ *********************** TIOCP MMR structure mapping ***************************
+ *****************************************************************************/
+
+#ifndef __ASSEMBLY__
+typedef uint64_t tiocp_reg_t;
+typedef uint64_t tiocp_ate_t;
+
+typedef volatile struct tiocp_s {
+
+    /* 0x000000-0x00FFFF -- Local Registers */
+
+    /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
+    tiocp_reg_t		cp_id;				/* 0x000000 */
+    tiocp_reg_t		cp_stat;			/* 0x000008 */
+    tiocp_reg_t		cp_err_upper;			/* 0x000010 */
+    tiocp_reg_t		cp_err_lower;			/* 0x000018 */
+    #define cp_err cp_err_lower
+    tiocp_reg_t		cp_control;			/* 0x000020 */
+    tiocp_reg_t		cp_req_timeout;			/* 0x000028 */
+    tiocp_reg_t		cp_intr_upper;			/* 0x000030 */
+    tiocp_reg_t		cp_intr_lower;			/* 0x000038 */
+    #define cp_intr cp_intr_lower
+    tiocp_reg_t		cp_err_cmdword;			/* 0x000040 */
+    tiocp_reg_t		_pad_000048;			/* 0x000048 */
+    tiocp_reg_t		cp_tflush;			/* 0x000050 */
+
+    /* 0x000058-0x00007F -- Bridge-specific Configuration */
+    tiocp_reg_t		cp_aux_err;			/* 0x000058 */
+    tiocp_reg_t		cp_resp_upper;			/* 0x000060 */
+    tiocp_reg_t		cp_resp_lower;			/* 0x000068 */
+    #define cp_resp cp_resp_lower
+    tiocp_reg_t		cp_tst_pin_ctrl;		/* 0x000070 */
+    tiocp_reg_t		cp_addr_lkerr;			/* 0x000078 */
+
+    /* 0x000080-0x00008F -- PMU & MAP */
+    tiocp_reg_t		cp_dir_map;			/* 0x000080 */
+    tiocp_reg_t		_pad_000088;			/* 0x000088 */
+
+    /* 0x000090-0x00009F -- SSRAM */
+    tiocp_reg_t		cp_map_fault;			/* 0x000090 */
+    tiocp_reg_t		_pad_000098;			/* 0x000098 */
+
+    /* 0x0000A0-0x0000AF -- Arbitration */
+    tiocp_reg_t		cp_arb;				/* 0x0000A0 */
+    tiocp_reg_t		_pad_0000A8;			/* 0x0000A8 */
+
+    /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+    tiocp_reg_t		cp_ate_parity_err;		/* 0x0000B0 */
+    tiocp_reg_t		_pad_0000B8;			/* 0x0000B8 */
+
+    /* 0x0000C0-0x0000FF -- PCI/GIO */
+    tiocp_reg_t		cp_bus_timeout;			/* 0x0000C0 */
+    tiocp_reg_t		cp_pci_cfg;			/* 0x0000C8 */
+    tiocp_reg_t		cp_pci_err_upper;		/* 0x0000D0 */
+    tiocp_reg_t		cp_pci_err_lower;		/* 0x0000D8 */
+    #define cp_pci_err cp_pci_err_lower
+    tiocp_reg_t		_pad_0000E0[4];			/* 0x0000{E0..F8} */
+
+    /* 0x000100-0x0001FF -- Interrupt */
+    tiocp_reg_t		cp_int_status;			/* 0x000100 */
+    tiocp_reg_t		cp_int_enable;			/* 0x000108 */
+    tiocp_reg_t		cp_int_rst_stat;		/* 0x000110 */
+    tiocp_reg_t		cp_int_mode;			/* 0x000118 */
+    tiocp_reg_t		cp_int_device;			/* 0x000120 */
+    tiocp_reg_t		cp_int_host_err;		/* 0x000128 */
+    tiocp_reg_t		cp_int_addr[8];			/* 0x0001{30,,,68} */
+    tiocp_reg_t		cp_err_int_view;		/* 0x000170 */
+    tiocp_reg_t		cp_mult_int;			/* 0x000178 */
+    tiocp_reg_t		cp_force_always[8];		/* 0x0001{80,,,B8} */
+    tiocp_reg_t		cp_force_pin[8];		/* 0x0001{C0,,,F8} */
+
+    /* 0x000200-0x000298 -- Device */
+    tiocp_reg_t		cp_device[4];			/* 0x0002{00,,,18} */
+    tiocp_reg_t		_pad_000220[4];			/* 0x0002{20,,,38} */
+    tiocp_reg_t		cp_wr_req_buf[4];		/* 0x0002{40,,,58} */
+    tiocp_reg_t		_pad_000260[4];			/* 0x0002{60,,,78} */
+    tiocp_reg_t		cp_rrb_map[2];			/* 0x0002{80,,,88} */
+    #define cp_even_resp cp_rrb_map[0]			/* 0x000280 */
+    #define cp_odd_resp  cp_rrb_map[1]			/* 0x000288 */
+    tiocp_reg_t		cp_resp_status;			/* 0x000290 */
+    tiocp_reg_t		cp_resp_clear;			/* 0x000298 */
+
+    tiocp_reg_t		_pad_0002A0[12];		/* 0x0002{A0..F8} */
+
+    /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+    struct {
+	tiocp_reg_t	upper;				/* 0x0003{00,,,F0} */
+	tiocp_reg_t	lower;				/* 0x0003{08,,,F8} */
+    } cp_buf_addr_match[16];
+
+    /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+    struct {
+	tiocp_reg_t	flush_w_touch;			/* 0x000{400,,,5C0} */
+	tiocp_reg_t	flush_wo_touch;			/* 0x000{408,,,5C8} */
+	tiocp_reg_t	inflight;			/* 0x000{410,,,5D0} */
+	tiocp_reg_t	prefetch;			/* 0x000{418,,,5D8} */
+	tiocp_reg_t	total_pci_retry;		/* 0x000{420,,,5E0} */
+	tiocp_reg_t	max_pci_retry;			/* 0x000{428,,,5E8} */
+	tiocp_reg_t	max_latency;			/* 0x000{430,,,5F0} */
+	tiocp_reg_t	clear_all;			/* 0x000{438,,,5F8} */
+    } cp_buf_count[8];
+
+    
+    /* 0x000600-0x0009FF -- PCI/X registers */
+    tiocp_reg_t		cp_pcix_bus_err_addr;		/* 0x000600 */
+    tiocp_reg_t		cp_pcix_bus_err_attr;		/* 0x000608 */
+    tiocp_reg_t		cp_pcix_bus_err_data;		/* 0x000610 */
+    tiocp_reg_t		cp_pcix_pio_split_addr;		/* 0x000618 */
+    tiocp_reg_t		cp_pcix_pio_split_attr;		/* 0x000620 */
+    tiocp_reg_t		cp_pcix_dma_req_err_attr;	/* 0x000628 */
+    tiocp_reg_t		cp_pcix_dma_req_err_addr;	/* 0x000630 */
+    tiocp_reg_t		cp_pcix_timeout;		/* 0x000638 */
+
+    tiocp_reg_t		_pad_000640[24];		/* 0x000{640,,,6F8} */
+
+    /* 0x000700-0x000737 -- Debug Registers */
+    tiocp_reg_t		cp_ct_debug_ctl;		/* 0x000700 */
+    tiocp_reg_t		cp_br_debug_ctl;		/* 0x000708 */
+    tiocp_reg_t		cp_mux3_debug_ctl;		/* 0x000710 */
+    tiocp_reg_t		cp_mux4_debug_ctl;		/* 0x000718 */
+    tiocp_reg_t		cp_mux5_debug_ctl;		/* 0x000720 */
+    tiocp_reg_t		cp_mux6_debug_ctl;		/* 0x000728 */
+    tiocp_reg_t		cp_mux7_debug_ctl;		/* 0x000730 */
+
+    tiocp_reg_t		_pad_000738[89];		/* 0x000{738,,,9F8} */
+
+    /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+    struct {
+	tiocp_reg_t	cp_buf_addr;			/* 0x000{A00,,,AF0} */
+	tiocp_reg_t	cp_buf_attr;			/* 0X000{A08,,,AF8} */
+    } cp_pcix_read_buf_64[16];
+
+    struct {
+	tiocp_reg_t	cp_buf_addr;			/* 0x000{B00,,,BE0} */
+	tiocp_reg_t	cp_buf_attr;			/* 0x000{B08,,,BE8} */
+	tiocp_reg_t	cp_buf_valid;			/* 0x000{B10,,,BF0} */
+	tiocp_reg_t	__pad1;				/* 0x000{B18,,,BF8} */
+    } cp_pcix_write_buf_64[8];
+
+    /* End of Local Registers -- Start of Address Map space */
+
+    char		_pad_000c00[0x010000 - 0x000c00];
+
+    /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
+    tiocp_ate_t		cp_int_ate_ram[1024];		/* 0x010000-0x011FF8 */
+
+    char		_pad_012000[0x14000 - 0x012000];
+
+    /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
+    tiocp_ate_t		cp_int_ate_ram_mp[1024];	/* 0x014000-0x015FF8 */
+
+    char		_pad_016000[0x18000 - 0x016000];
+
+    /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
+    tiocp_reg_t		cp_wr_req_lower[256];		/* 0x18000 - 0x187F8 */
+    tiocp_reg_t		cp_wr_req_upper[256];		/* 0x18800 - 0x18FF8 */
+    tiocp_reg_t		cp_wr_req_parity[256];		/* 0x19000 - 0x197F8 */
+
+    char		_pad_019800[0x1C000 - 0x019800];
+
+    /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
+    tiocp_reg_t		cp_rd_resp_lower[512];		/* 0x1C000 - 0x1CFF8 */
+    tiocp_reg_t		cp_rd_resp_upper[512];		/* 0x1D000 - 0x1DFF8 */
+    tiocp_reg_t		cp_rd_resp_parity[512];		/* 0x1E000 - 0x1EFF8 */
+
+    char		_pad_01F000[0x20000 - 0x01F000];
+
+    /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used)  */
+    char		_pad_020000[0x021000 - 0x20000];
+
+    /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
+    union {
+	uchar_t		c[0x1000 / 1];			/* 0x02{0000,,,7FFF} */
+	uint16_t	s[0x1000 / 2];			/* 0x02{0000,,,7FFF} */
+	uint32_t	l[0x1000 / 4];			/* 0x02{0000,,,7FFF} */
+	uint64_t	d[0x1000 / 8];			/* 0x02{0000,,,7FFF} */
+	union {
+	    uchar_t	c[0x100 / 1];
+	    uint16_t	s[0x100 / 2];
+	    uint32_t	l[0x100 / 4];
+	    uint64_t	d[0x100 / 8];
+	} f[8];
+    } cp_type0_cfg_dev[7];				/* 0x02{1000,,,7FFF} */
+
+    /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+    union {
+	uchar_t		c[0x1000 / 1];			/* 0x028000-0x029000 */
+	uint16_t	s[0x1000 / 2];			/* 0x028000-0x029000 */
+	uint32_t	l[0x1000 / 4];			/* 0x028000-0x029000 */
+	uint64_t	d[0x1000 / 8];			/* 0x028000-0x029000 */
+	union {
+	    uchar_t	c[0x100 / 1];
+	    uint16_t	s[0x100 / 2];
+	    uint32_t	l[0x100 / 4];
+	    uint64_t	d[0x100 / 8];
+	} f[8];
+    } cp_type1_cfg;					/* 0x028000-0x029000 */
+
+    char		_pad_029000[0x030000-0x029000];
+
+    /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+    union {
+	uchar_t		c[8 / 1];
+	uint16_t	s[8 / 2];
+	uint32_t	l[8 / 4];
+	uint64_t	d[8 / 8];
+    } cp_pci_iack;					/* 0x030000-0x030007 */
+
+    char		_pad_030007[0x040000-0x030008];
+
+    /* 0x040000-0x030007 -- PCIX Special Cycle */
+    union {
+	uchar_t		c[8 / 1];
+	uint16_t	s[8 / 2];
+	uint32_t	l[8 / 4];
+	uint64_t	d[8 / 8];
+    } cp_pcix_cycle;					/* 0x040000-0x040007 */
+} tiocp_t;
+
+#endif	/* __ASSEMBLY__ */
+
+/*****************************************************************************
+ ************************** TIOCP BRIDGE MMR DEFINES *************************
+ *****************************************************************************/
+
+/*
+ * TIOCP STATUS register	offset 0x00000008
+ */
+#define TIOCP_STAT_RX_REQ_CNT_SHFT	PCIBR_STAT_RX_CREDIT_SHFT
+#define TIOCP_STAT_RX_REQ_CNT		PCIBR_STAT_RX_CREDIT
+#define TIOCP_STAT_PCIX_ACTIVE_SHFT	PCIBR_STAT_PCIX_ACTIVE_SHFT
+#define TIOCP_STAT_PCIX_ACTIVE		PCIBR_STAT_PCIX_ACTIVE
+#define TIOCP_STAT_PCIX_SPEED_SHFT	PCIBR_STAT_PCIX_SPEED_SHFT
+#define TIOCP_STAT_PCIX_SPEED		PCIBR_STAT_PCIX_SPEED
+#define TIOCP_STAT_TX_RSP_CNT_SHFT	36
+#define TIOCP_STAT_TX_RSP_CNT		(0xFull << TIOCP_STAT_TX_RSP_CNT_SHFT)
+#define TIOCP_STAT_TX_REQ_CNT_SHFT	40
+#define TIOCP_STAT_TX_REQ_CNT		(0xFull << TIOCP_STAT_TX_REQ_CNT_SHFT)
+
+/*
+ * TIOCP CONTROL register	offset 0x00000020
+ */
+#define TIOCP_CTRL_CORELET_ID_SHFT	0
+#define TIOCP_CTRL_CORELET_ID		(0x3 << TIOCP_CTRL_CORELET_ID_SHFT)
+#define TIOCP_CTRL_PCI_SPEED_SHFT	PCIBR_CTRL_PCI_SPEED_SHFT
+#define TIOCP_CTRL_PCI_SPEED		PCIBR_CTRL_PCI_SPEED
+#define TIOCP_CTRL_SYS_END_SHFT		PCIBR_CTRL_SYS_END_SHFT
+#define TIOCP_CTRL_SYS_END		PCIBR_CTRL_SYS_END
+#define TIOCP_CTRL_CRED_LIM_SHFT	12
+#define TIOCP_CTRL_CRED_LIM		(0x3 << TIOCP_CTRL_CRED_LIM_SHIFT)
+#define TIOCP_CTRL_PAGE_SIZE_SHFT	PCIBR_CTRL_PAGE_SIZE_SHFT
+#define TIOCP_CTRL_PAGE_SIZE		PCIBR_CTRL_PAGE_SIZE
+#define TIOCP_CTRL_MEM_SWAP_SHFT	PCIBR_CTRL_MEM_SWAP_SHFT
+#define TIOCP_CTRL_MEM_SWAP		PCIBR_CTRL_MEM_SWAP
+#define TIOCP_CTRL_RST_SHFT		PCIBR_CTRL_RST_SHFT
+#define TIOCP_CTRL_RST_PIN(x)		PCIBR_CTRL_RST_PIN(x)
+#define TIOCP_CTRL_RST(n)		PCIBR_CTRL_RST(n)
+#define TIOCP_CTRL_RST_MASK		PCIBR_CTRL_RST_MASK
+#define TIOCP_CTRL_PAR_EN_REQ_SHFT	PCIBR_CTRL_PAR_EN_REQ_SHFT
+#define TIOCP_CTRL_PAR_EN_REQ		PCIBR_CTRL_PAR_EN_REQ
+#define TIOCP_CTRL_PAR_EN_RESP_SHFT	PCIBR_CTRL_PAR_EN_RESP_SHFT
+#define TIOCP_CTRL_PAR_EN_RESP		PCIBR_CTRL_PAR_EN_RESP
+#define TIOCP_CTRL_PAR_EN_ATE_SHFT	PCIBR_CTRL_PAR_EN_ATE_SHFT
+#define TIOCP_CTRL_PAR_EN_ATE		PCIBR_CTRL_PAR_EN_ATE
+#define TIOCP_CTRL_FUN_NUM_MASK		PCIBR_CTRL_FUN_NUM_MASK
+#define TIOCP_CTRL_FUN_NUM(x)		PCIBR_CTRL_FUN_NUM(x)
+#define TIOCP_CTRL_DEV_NUM_MASK		PCIBR_CTRL_BUS_NUM_MASK
+#define TIOCP_CTRL_DEV_NUM(x)		PCIBR_CTRL_DEV_NUM(x)
+#define TIOCP_CTRL_BUS_NUM_MASK		PCIBR_CTRL_BUS_NUM_MASK
+#define TIOCP_CTRL_BUS_NUM(x)		PCIBR_CTRL_BUS_NUM(x)
+#define TIOCP_CTRL_RELAX_ORDER_SHFT	PCIBR_CTRL_RELAX_ORDER_SHFT
+#define TIOCP_CTRL_RELAX_ORDER		PCIBR_CTRL_RELAX_ORDER
+#define TIOCP_CTRL_NO_SNOOP_SHFT	PCIBR_CTRL_NO_SNOOP_SHFT
+#define TIOCP_CTRL_NO_SNOOP		PCIBR_CTRL_NO_SNOOP
+
+/*
+ * TIOCP PCI Responce Buffer	offset 0x00000068
+ */
+#define TIOCP_RSP_BUF_ADDR		TIOCP_CTALK_ADDR_MASK
+#define TIOCP_RSP_BUF_NUM_SHFT		56
+#define TIOCP_RSP_BUF_NUM		(0xFull << TIOCP_RSP_BUF_NUM_SHFT)
+#define TIOCP_RSP_BUF_DEV_NUM_SHFT	60
+#define TIOCP_RSP_BUF_DEV_NUM		(0x3ull << TIOCP_RSP_BUF_DEV_NUM_SHFT)
+
+/*
+ * TIOCP PCI DIRECT Mapping	offset 0x00000080
+ */
+#define TIOCP_DIRMAP_DIROFF_SHFT	PCIBR_DIRMAP_DIROFF_SHFT
+#define TIOCP_DIRMAP_DIROFF		PCIBR_DIRMAP_DIROFF
+#define TIOCP_DIRMAP_ADD512_SHFT	PCIBR_DIRMAP_ADD512_SHFT
+#define TIOCP_DIRMAP_ADD512		PCIBR_DIRMAP_ADD512
+#define TIOCP_DIRMAP_DIROFF_UP_SHFT	20
+#define TIOCP_DIRMAP_DIROFF_UP		(0x3F << TIOCP_DIRMAP_DIROFF_UP_SHFT)
+
+#define TIOCP_DIRMAP_OFF_ADDRSHFT	PCIBR_DIRMAP_OFF_ADDRSHFT
+/*
+ * TIOCP Intr Status register	offset 0x00000100
+ */
+#define TIOCP_ISR_CWRT_REQ_TOUT		(0x1ull << 47)
+#define TIOCP_ISR_CTALK_PROT_ERR	(0x1ull << 46)
+#define TIOCP_ISR_PCIX_SPLIT_MSG_PE	PCIBR_ISR_PCIX_SPLIT_MSG_PE
+#define TIOCP_ISR_PCIX_SPLIT_EMSG	PCIBR_ISR_PCIX_SPLIT_EMSG
+#define TIOCP_ISR_PCIX_SPLIT_TO		PCIBR_ISR_PCIX_SPLIT_TO
+#define TIOCP_ISR_PCIX_UNEX_COMP	PCIBR_ISR_PCIX_UNEX_COMP
+#define TIOCP_ISR_INT_RAM_PERR		PCIBR_ISR_INT_RAM_PERR
+#define TIOCP_ISR_PCIX_ARB_ERR		PCIBR_ISR_PCIX_ARB_ERR
+#define TIOCP_ISR_PCIX_REQ_TOUT		PCIBR_ISR_PCIX_REQ_TOUT
+#define TIOCP_ISR_PCIX_TABORT		PCIBR_ISR_PCIX_TABORT
+#define TIOCP_ISR_PCIX_PERR		PCIBR_ISR_PCIX_PERR
+#define TIOCP_ISR_PCIX_SERR		PCIBR_ISR_PCIX_SERR
+#define TIOCP_ISR_PCIX_MRETRY		PCIBR_ISR_PCIX_MRETRY
+#define TIOCP_ISR_PCIX_MTOUT		PCIBR_ISR_PCIX_MTOUT
+#define TIOCP_ISR_PCIX_DA_PARITY	PCIBR_ISR_PCIX_DA_PARITY
+#define TIOCP_ISR_PCIX_AD_PARITY	PCIBR_ISR_PCIX_AD_PARITY
+#define TIOCP_ISR_PMU_PAGE_FAULT	PCIBR_ISR_PMU_PAGE_FAULT
+#define TIOCP_ISR_UNEXP_RESP		PCIBR_ISR_UNEXP_RESP
+#define TIOCP_ISR_BAD_XRESP_PKT		PCIBR_ISR_BAD_XRESP_PKT
+#define TIOCP_ISR_BAD_XREQ_PKT		PCIBR_ISR_BAD_XREQ_PKT
+#define TIOCP_ISR_RESP_XTLK_ERR		PCIBR_ISR_RESP_XTLK_ERR
+#define TIOCP_ISR_REQ_XTLK_ERR		PCIBR_ISR_REQ_XTLK_ERR
+#define TIOCP_ISR_INVLD_ADDR		PCIBR_ISR_INVLD_ADDR
+#define TIOCP_ISR_UNSUPPORTED_XOP	PCIBR_ISR_UNSUPPORTED_XOP
+#define TIOCP_ISR_XREQ_FIFO_OFLOW	PCIBR_ISR_XREQ_FIFO_OFLOW
+#define TIOCP_ISR_PCI_ABORT		PCIBR_ISR_PCI_ABORT
+#define TIOCP_ISR_PCI_PARITY		PCIBR_ISR_PCI_PARITY
+#define TIOCP_ISR_PCI_SERR		PCIBR_ISR_PCI_SERR
+#define TIOCP_ISR_PCI_PERR		PCIBR_ISR_PCI_PERR
+#define TIOCP_ISR_PCI_MST_TIMEOUT	PCIBR_ISR_PCI_MST_TIMEOUT
+#define TIOCP_ISR_PCI_RETRY_CNT		PCIBR_ISR_PCI_RETRY_CNT
+#define TIOCP_ISR_XREAD_REQ_TIMEOUT	PCIBR_ISR_XREAD_REQ_TIMEOUT
+#define TIOCP_ISR_INT_MSK		PCIBR_ISR_INT_MSK
+#define TIOCP_ISR_INT(x)		PCIBR_ISR_INT(x)
+
+/*
+ * TIOCP Enable Intr register	offset 0x00000108
+ */
+#define TIOCP_IER_CWRT_REQ_TOUT		(0x1ull << 47)
+#define TIOCP_IER_CTALK_PROT_ERR	(0x1ull << 46)
+#define TIOCP_IER_PCIX_SPLIT_MSG_PE	PCIBR_IER_PCIX_SPLIT_MSG_PE
+#define TIOCP_IER_PCIX_SPLIT_EMSG	PCIBR_IER_PCIX_SPLIT_EMSG
+#define TIOCP_IER_PCIX_SPLIT_TO		PCIBR_IER_PCIX_SPLIT_TO
+#define TIOCP_IER_PCIX_UNEX_COMP	PCIBR_IER_PCIX_UNEX_COMP
+#define TIOCP_IER_INT_RAM_PERR		PCIBR_IER_INT_RAM_PERR
+#define TIOCP_IER_PCIX_ARB_ERR		PCIBR_IER_PCIX_ARB_ERR
+#define TIOCP_IER_PCIX_REQ_TOUT		PCIBR_IER_PCIX_REQ_TOUT
+#define TIOCP_IER_PCIX_TABORT		PCIBR_IER_PCIX_TABORT
+#define TIOCP_IER_PCIX_PERR		PCIBR_IER_PCIX_PERR
+#define TIOCP_IER_PCIX_SERR		PCIBR_IER_PCIX_SERR
+#define TIOCP_IER_PCIX_MRETRY		PCIBR_IER_PCIX_MRETRY
+#define TIOCP_IER_PCIX_MTOUT		PCIBR_IER_PCIX_MTOUT
+#define TIOCP_IER_PCIX_DA_PARITY	PCIBR_IER_PCIX_DA_PARITY
+#define TIOCP_IER_PCIX_AD_PARITY	PCIBR_IER_PCIX_AD_PARITY
+#define TIOCP_IER_PMU_PAGE_FAULT	PCIBR_IER_PMU_PAGE_FAULT
+#define TIOCP_IER_UNEXP_RESP		PCIBR_IER_UNEXP_RESP
+#define TIOCP_IER_BAD_XRESP_PKT		PCIBR_IER_BAD_XRESP_PKT
+#define TIOCP_IER_BAD_XREQ_PKT		PCIBR_IER_BAD_XREQ_PKT
+#define TIOCP_IER_RESP_XTLK_ERR		PCIBR_IER_RESP_XTLK_ERR
+#define TIOCP_IER_REQ_XTLK_ERR		PCIBR_IER_REQ_XTLK_ERR
+#define TIOCP_IER_INVLD_ADDR		PCIBR_IER_INVLD_ADDR
+#define TIOCP_IER_UNSUPPORTED_XOP	PCIBR_IER_UNSUPPORTED_XOP
+#define TIOCP_IER_XREQ_FIFO_OFLOW	PCIBR_IER_XREQ_FIFO_OFLOW
+#define TIOCP_IER_PCI_ABORT		PCIBR_IER_PCI_ABORT
+#define TIOCP_IER_PCI_PARITY		PCIBR_IER_PCI_PARITY
+#define TIOCP_IER_PCI_SERR		PCIBR_IER_PCI_SERR
+#define TIOCP_IER_PCI_PERR		PCIBR_IER_PCI_PERR
+#define TIOCP_IER_PCI_MST_TIMEOUT	PCIBR_IER_PCI_MST_TIMEOUT
+#define TIOCP_IER_PCI_RETRY_CNT		PCIBR_IER_PCI_RETRY_CNT
+#define TIOCP_IER_XREAD_REQ_TIMEOUT	PCIBR_IER_XREAD_REQ_TIMEOUT
+#define TIOCP_IER_INT_MSK		PCIBR_IER_INT_MSK
+#define TIOCP_IER_INT(x)		PCIBR_IER_INT(x)
+
+/*
+ * TIOCP Reset Intr register	offset 0x00000110
+ */
+#define TIOCP_IRR_CWRT_REQ_TOUT		(0x1ull << 47)
+#define TIOCP_IRR_CTALK_PROT_ERR	(0x1ull << 46)
+#define TIOCP_IRR_PCIX_SPLIT_MSG_PE	PCIBR_IRR_PCIX_SPLIT_MSG_PE
+#define TIOCP_IRR_PCIX_SPLIT_EMSG	PCIBR_IRR_PCIX_SPLIT_EMSG
+#define TIOCP_IRR_PCIX_SPLIT_TO		PCIBR_IRR_PCIX_SPLIT_TO
+#define TIOCP_IRR_PCIX_UNEX_COMP	PCIBR_IRR_PCIX_UNEX_COMP
+#define TIOCP_IRR_INT_RAM_PERR		PCIBR_IRR_INT_RAM_PERR
+#define TIOCP_IRR_PCIX_ARB_ERR		PCIBR_IRR_PCIX_ARB_ERR
+#define TIOCP_IRR_PCIX_REQ_TOUT		PCIBR_IRR_PCIX_REQ_TOUT
+#define TIOCP_IRR_PCIX_TABORT		PCIBR_IRR_PCIX_TABORT
+#define TIOCP_IRR_PCIX_PERR		PCIBR_IRR_PCIX_PERR
+#define TIOCP_IRR_PCIX_SERR		PCIBR_IRR_PCIX_SERR
+#define TIOCP_IRR_PCIX_MRETRY		PCIBR_IRR_PCIX_MRETRY
+#define TIOCP_IRR_PCIX_MTOUT		PCIBR_IRR_PCIX_MTOUT
+#define TIOCP_IRR_PCIX_DA_PARITY	PCIBR_IRR_PCIX_DA_PARITY
+#define TIOCP_IRR_PCIX_AD_PARITY	PCIBR_IRR_PCIX_AD_PARITY
+#define TIOCP_IRR_PMU_PAGE_FAULT	PCIBR_IRR_PMU_PAGE_FAULT
+#define TIOCP_IRR_UNEXP_RESP		PCIBR_IRR_UNEXP_RESP
+#define TIOCP_IRR_BAD_XRESP_PKT		PCIBR_IRR_BAD_XRESP_PKT
+#define TIOCP_IRR_BAD_XREQ_PKT		PCIBR_IRR_BAD_XREQ_PKT
+#define TIOCP_IRR_RESP_XTLK_ERR		PCIBR_IRR_RESP_XTLK_ERR
+#define TIOCP_IRR_REQ_XTLK_ERR		PCIBR_IRR_REQ_XTLK_ERR
+#define TIOCP_IRR_INVLD_ADDR		PCIBR_IRR_INVLD_ADDR
+#define TIOCP_IRR_UNSUPPORTED_XOP	PCIBR_IRR_UNSUPPORTED_XOP
+#define TIOCP_IRR_XREQ_FIFO_OFLOW	PCIBR_IRR_XREQ_FIFO_OFLOW
+#define TIOCP_IRR_LLP_REC_SNERR		PCIBR_IRR_LLP_REC_SNERR
+#define TIOCP_IRR_LLP_REC_CBERR		PCIBR_IRR_LLP_REC_CBERR
+#define TIOCP_IRR_LLP_RCTY		PCIBR_IRR_LLP_RCTY
+#define TIOCP_IRR_LLP_TX_RETRY		PCIBR_IRR_LLP_TX_RETRY
+#define TIOCP_IRR_LLP_TCTY		PCIBR_IRR_LLP_TCTY
+#define TIOCP_IRR_PCI_ABORT		PCIBR_IRR_PCI_ABORT
+#define TIOCP_IRR_PCI_PARITY		PCIBR_IRR_PCI_PARITY
+#define TIOCP_IRR_PCI_SERR		PCIBR_IRR_PCI_SERR
+#define TIOCP_IRR_PCI_PERR		PCIBR_IRR_PCI_PERR
+#define TIOCP_IRR_PCI_MST_TIMEOUT	PCIBR_IRR_PCI_MST_TIMEOUT
+#define TIOCP_IRR_PCI_RETRY_CNT		PCIBR_IRR_PCI_RETRY_CNT
+#define TIOCP_IRR_XREAD_REQ_TIMEOUT	PCIBR_IRR_XREAD_REQ_TIMEOUT
+#define TIOCP_IRR_MULTI_CLR		PCIBR_IRR_MULTI_CLR
+#define TIOCP_IRR_CRP_GRP_CLR		PCIBR_IRR_CRP_GRP_CLR
+#define TIOCP_IRR_RESP_BUF_GRP_CLR	PCIBR_IRR_RESP_BUF_GRP_CLR
+#define TIOCP_IRR_REQ_DSP_GRP_CLR	PCIBR_IRR_REQ_DSP_GRP_CLR
+#define TIOCP_IRR_LLP_GRP_CLR		PCIBR_IRR_LLP_GRP_CLR
+#define TIOCP_IRR_SSRAM_GRP_CLR		PCIBR_IRR_SSRAM_GRP_CLR
+#define TIOCP_IRR_PCI_GRP_CLR		PCIBR_IRR_PCI_GRP_CLR
+#define TIOCP_IRR_GIO_GRP_CLR		PCIBR_IRR_GIO_GRP_CLR
+#define TIOCP_IRR_ALL_CLR		PCIBR_IRR_ALL_CLR
+
+/*
+ * TIOCP Intr Dev Select reg	offset 0x00000120
+ */
+#define TIOCP_INT_DEV_SHFT(n)		PCIBR_INT_DEV_SHFT(n)
+#define TIOCP_INT_DEV_MASK(n)		PCIBR_INT_DEV_MASK(n)
+
+/*
+ * TIOCP PCI Host Intr Addr	offset 0x00000130 - 0x00000168
+ */
+#define TIOCP_HOST_INTR_ADDR		TIOCP_CTALK_ADDR_MASK
+#define TIOCP_HOST_INTR_FLD_SHFT	56
+#define TIOCP_HOST_INTR_FLD		(0xFFull << TIOCP_HOST_INTR_FLD_SHFT)
+
+/*
+ * TIOCP DEVICE(x) register	offset 0x00000200
+ */
+#define TIOCP_DEV_OFF_ADDR_SHFT		PCIBR_DEV_OFF_ADDR_SHFT
+#define TIOCP_DEV_OFF_MASK		PCIBR_DEV_OFF_MASK
+#define TIOCP_DEV_DEV_IO_MEM		PCIBR_DEV_DEV_IO_MEM
+#define TIOCP_DEV_DEV_SWAP		PCIBR_DEV_DEV_SWAP
+#define TIOCP_DEV_CTALK_ARB		(1ull << 14)
+#define TIOCP_DEV_BARRIER		PCIBR_DEV_BARRIER
+#define TIOCP_DEV_PRECISE		PCIBR_DEV_PRECISE
+#define TIOCP_DEV_PREF			PCIBR_DEV_PREF
+#define TIOCP_DEV_SWAP_DIR		PCIBR_DEV_SWAP_DIR
+#define TIOCP_DEV_RT			PCIBR_DEV_RT
+#define TIOCP_DEV_DEV_SIZE		PCIBR_DEV_DEV_SIZE
+#define TIOCP_DEV_DIR_WRGA_EN		PCIBR_DEV_DIR_WRGA_EN
+#define TIOCP_DEV_VIRTUAL_EN		PCIBR_DEV_VIRTUAL_EN
+#define TIOCP_DEV_FORCE_PCI_PAR		PCIBR_DEV_FORCE_PCI_PAR
+#define TIOCP_DEV_PAGE_CHK_DIS		PCIBR_DEV_PAGE_CHK_DIS
+#define TIOCP_DEV_ERR_LOCK_EN		PCIBR_DEV_ERR_LOCK_EN
+#define TIOCP_DEV_PIO_OP		(1ull << 29)
+
+
+/*
+ * Even & Odd RRB registers	offset 0x000000280 & 0x000000288
+ */
+/* Individual RRB masks after shifting down */
+#define TIOCP_RRB_EN			PCIBR_RRB_EN
+#define TIOCP_RRB_DEV			PCIBR_RRB_DEV
+#define TIOCP_RRB_VDEV			PCIBR_RRB_VDEV
+#define TIOCP_RRB_PDEV			PCIBR_RRB_PDEV
+
+
+/*
+ * TIOCP RRB status register	offset 0x00000290
+ */
+#define TIOCP_RRB_VALID(r)		PCIBR_RRB_VALID(r)
+#define TIOCP_RRB_INUSE(r)		PCIBR_RRB_INUSE(r)
+
+
+/*
+ * TIOCP RRB clear register	offset 0x00000298
+ */
+#define TIOCP_RRB_CLEAR(r)		PCIBR_RRB_CLEAR(r)
+
+
+/*****************************************************************************
+ ***************************** TIOCP DMA DEFINES *****************************
+ *****************************************************************************/
+
+/*
+ * TIOCP - PMU Address Transaltion Entry defines
+ */
+#define TIOCP_ATE_V			PCIBR_ATE_V
+#define TIOCP_ATE_PIO			(0x1 << 1)
+#define TIOCP_ATE_PREC			PCIBR_ATE_PREC
+#define TIOCP_ATE_PREF			PCIBR_ATE_PREF
+#define TIOCP_ATE_BAR			PCIBR_ATE_BAR
+#define TIOCP_ATE_PARITY		(0x1 << 5)
+#define TIOCP_ATE_ADDR_SHFT		PCIBR_ATE_ADDR_SHFT
+#define TIOCP_ATE_ADDR_MASK		(0x3FFFFFFFFFF000)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define TIOCP_ATE_SWAPSHIFT		ATE_SWAPSHIFT
+#define TIOCP_SWAP_ON(x)		ATE_SWAP_ON(x)
+#define TIOCP_SWAP_OFF(x)		ATE_SWAP_OFF(x)
+
+/* 
+ * Bridge 32bit Bus DMA addresses 
+ */
+#define TIOCP_LOCAL_BASE		PCIBR_LOCAL_BASE
+#define TIOCP_DMA_MAPPED_BASE		PCIBR_DMA_MAPPED_BASE
+#define TIOCP_DMA_MAPPED_SIZE		PCIBR_DMA_MAPPED_SIZE
+#define TIOCP_DMA_DIRECT_BASE		PCIBR_DMA_DIRECT_BASE
+#define TIOCP_DMA_DIRECT_SIZE		PCIBR_DMA_DIRECT_SIZE
+
+
+/*****************************************************************************
+ ***************************** TIOCP PIO DEFINES *****************************
+ *****************************************************************************/
+
+/* 
+ * Macros for Xtalk to Bridge bus (PCI) PIO
+ */
+
+/* XTALK addresses that map into TIOCP Bus addr space */
+#define TIOCP_BRIDGE_PIO32_XTALK_ALIAS_BASE	0x000040000000L
+#define TIOCP_BRIDGE_PIO32_XTALK_ALIAS_LIMIT	0x00007FFFFFFFL
+#define TIOCP_BRIDGE_PIO64_XTALK_ALIAS_BASE	0x000080000000L
+#define TIOCP_BRIDGE_PIO64_XTALK_ALIAS_LIMIT	0x0000BFFFFFFFL
+
+/* XTALK addresses that map into PCI addresses */
+#define TIOCP_BRIDGE_PCI_MEM32_BASE	TIOCP_BRIDGE_PIO32_XTALK_ALIAS_BASE
+#define TIOCP_BRIDGE_PCI_MEM32_LIMIT	TIOCP_BRIDGE_PIO32_XTALK_ALIAS_LIMIT
+#define TIOCP_BRIDGE_PCI_MEM64_BASE	TIOCP_BRIDGE_PIO64_XTALK_ALIAS_BASE
+#define TIOCP_BRIDGE_PCI_MEM64_LIMIT	TIOCP_BRIDGE_PIO64_XTALK_ALIAS_LIMIT
+
+
+/*****************************************************************************
+ ***************************** TIOCP MISC DEFINES ****************************
+ *****************************************************************************/
+
+#define TIOCP_CTALK_ADDR_MASK		0x003FFFFFFFFFFFFF
+
+#define	TIOCP_INTERNAL_ATES		1024 
+#define TIOCP_WR_REQ_BUFSIZE		256
+
+#endif 	/* _ASM_IA64_SN_PCI_TIOCP_H */
diff -Nru a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
--- a/include/asm-ia64/sn/pda.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/pda.h	Thu Nov  6 13:42:35 2003
@@ -10,7 +10,6 @@
 
 #include <linux/config.h>
 #include <linux/cache.h>
-#include <linux/numa.h>
 #include <asm/percpu.h>
 #include <asm/system.h>
 #include <asm/processor.h>
@@ -57,10 +56,11 @@
 
 	unsigned long	sn_soft_irr[4];
 	unsigned long	sn_in_service_ivecs[4];
-	short		cnodeid_to_nasid_table[MAX_NUMNODES];
+	short		cnodeid_to_nasid_table[MAX_NUMNODES];	
 	int		sn_lb_int_war_ticks;
 	int		sn_last_irq;
 	int		sn_first_irq;
+	int		sn_num_irqs;			/* number of irqs targeted for this cpu */
 } pda_t;
 
 
diff -Nru a/include/asm-ia64/sn/serialio.h b/include/asm-ia64/sn/serialio.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/serialio.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,462 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SERIALIO_H
+#define _ASM_IA64_SN_SERIALIO_H
+
+/*
+ * Definitions for the modular serial i/o driver.
+ *
+ * The modular serial i/o driver is a driver which has the hardware
+ * dependent and hardware independent parts separated into separate
+ * modules. The upper half is responsible for all hardware independent
+ * operations, specifically the interface to the kernel. An upper half
+ * may implement a streams interface, character interface, or whatever
+ * interface it wishes to the kernel. The same upper half may access any
+ * physical hardware through a set of standardized entry points into the
+ * lower level, which deals directly with the hardware. Whereas a
+ * separate upper layer exists for each kernel interface type (streams,
+ * character, polling etc), a separate lower level exists for each
+ * hardware type supported. Any upper and lower layer pair may be
+ * connected to form a complete driver. This file defines the interface
+ * between the two
+ */
+
+/* Definitions needed per port by both layers. Each lower layer
+ * declares a set of per-port private data areas describing each
+ * physical port, and by definition the first member of that private
+ * data is the following structure. Thus a pointer to the lower
+ * layer's private data is interchangeable with a pointer to the
+ * common private data, and the upper layer does not allocate anything
+ * so it does need to know anything about the physical configuration
+ * of the machine. This structure may also contain any hardware
+ * independent info that must be persistent across device closes.
+ */
+typedef struct sioport {
+    /* calling vectors */
+    struct serial_calldown	*sio_calldown;
+    struct serial_callup	*sio_callup;
+
+    void	*sio_upper;	/* upper layer's private data area */
+
+    vertex_hdl_t sio_vhdl;	/* vertex handle of the hardware independent
+				 * portion of this port (e.g. tty/1 without
+				 * the d,m,f, etc)
+				 */
+    spinlock_t  sio_lock;
+} sioport_t;
+
+/* bits for sio_flags */
+#define SIO_HWGRAPH_INITED	0x1
+#define SIO_SPINLOCK_HELD	0x2
+#define SIO_MUTEX_HELD		0x4
+#define SIO_LOCKS_MASK (SIO_SPINLOCK_HELD | SIO_MUTEX_HELD)
+
+#if DEBUG
+/* bits for sio_lockcalls, one per downcall except du_write which is
+ * not called by an upper layer.
+ */
+#define L_OPEN		0x0001
+#define L_CONFIG	0x0002
+#define L_ENABLE_HFC	0x0004
+#define L_SET_EXTCLK	0x0008
+#define L_WRITE		0x0010
+#define L_BREAK		0x0020
+#define L_READ		0x0040
+#define L_NOTIFICATION	0x0080
+#define L_RX_TIMEOUT	0x0100
+#define L_SET_DTR	0x0200
+#define L_SET_RTS	0x0400
+#define L_QUERY_DCD	0x0800
+#define L_QUERY_CTS	0x1000
+#define L_SET_PROTOCOL	0x2000
+#define L_ENABLE_TX	0x4000
+
+#define L_LOCK_ALL	(~0)
+
+/* debug lock assertion: each lower layer entry point does an
+ * assertion with the following macro, passing in the port passed to
+ * the entry point and the bit corresponding to which entry point it
+ * is. If the upper layer has turned on the bit for that entry point,
+ * sio_port_islocked is called, thus an upper layer may specify that
+ * it is ok for a particular downcall to be made without the port lock
+ * held.
+ */
+#define L_LOCKED(port, flag) (((port)->sio_lockcalls & (flag)) == 0 || \
+			      sio_port_islocked(port))
+#endif
+
+/* flags for next_char_state */
+#define NCS_BREAK 1
+#define NCS_PARITY 2
+#define NCS_FRAMING 4
+#define NCS_OVERRUN 8
+
+/* protocol types for DOWN_SET_PROTOCOL */
+enum sio_proto {
+    PROTO_RS232,
+    PROTO_RS422
+};
+
+/* calldown vector. This is a set of entry points into a lower layer
+ * module, providing black-box access to the hardware by the upper
+ * layer
+ */
+struct serial_calldown {
+
+    /* hardware configuration */
+    int (*down_open)		(sioport_t *port);
+    int (*down_config)		(sioport_t *port, int baud, int byte_size,
+				 int stop_bits, int parenb, int parodd);
+    int (*down_enable_hfc)	(sioport_t *port, int enable);
+    int (*down_set_extclk)	(sioport_t *port, int clock_factor);
+
+    /* data transmission */
+    int (*down_write)		(sioport_t *port, char *buf, int len);
+    int (*down_du_write)	(sioport_t *port, char *buf, int len);
+    void (*down_du_flush)	(sioport_t *port);
+    int (*down_break)		(sioport_t *port, int brk);
+    int (*down_enable_tx)	(sioport_t *port, int enb);
+
+    /* data reception */
+    int (*down_read)		(sioport_t *port, char *buf, int len);
+    
+    /* event notification */
+    int (*down_notification)	(sioport_t *port, int mask, int on);
+    int (*down_rx_timeout)	(sioport_t *port, int timeout);
+
+    /* modem control */
+    int (*down_set_DTR)		(sioport_t *port, int dtr);
+    int (*down_set_RTS)		(sioport_t *port, int rts);
+    int (*down_query_DCD)	(sioport_t *port);
+    int (*down_query_CTS)	(sioport_t *port);
+
+    /* transmission protocol */
+    int (*down_set_protocol)	(sioport_t *port, enum sio_proto protocol);
+
+    /* memory mapped user driver support */
+    int (*down_mapid)		(sioport_t *port, void *arg);
+    int (*down_map)		(sioport_t *port, uint64_t *vt, off_t off);
+    void (*down_unmap)		(sioport_t *port);
+    int (*down_set_sscr)	(sioport_t *port, int arg, int flag);
+};
+
+/*
+ * Macros used by the upper layer to access the lower layer. Unless
+ * otherwise noted, all integer functions should return 0 on success
+ * or 1 if the hardware does not support the requested operation. In
+ * the case of non-support, the upper layer may work around the problem
+ * where appropriate or just notify the user.
+ * For hardware which supports detaching, these functions should
+ * return -1 if the hardware is no longer present.
+ */
+
+/* open a device. Do whatever initialization/resetting necessary */
+#define DOWN_OPEN(p) \
+    ((p)->sio_calldown->down_open(p))
+
+/* configure the hardware with the given baud rate, number of stop
+ * bits, byte size and parity
+ */
+#define DOWN_CONFIG(p, a, b, c, d, e) \
+    ((p)->sio_calldown->down_config(p, a, b, c, d, e))
+
+/* Enable hardware flow control. If the hardware does not support
+ * this, the upper layer will emulate HFC by manipulating RTS and CTS
+ */
+#define DOWN_ENABLE_HFC(p, enb) \
+    ((p)->sio_calldown->down_enable_hfc(p, enb))
+
+/* Set external clock to the given clock factor. If cf is zero,
+ * internal clock is used. If cf is non-zero external clock is used
+ * and the clock is cf times the baud.
+ */
+#define DOWN_SET_EXTCLK(p, cf) \
+    ((p)->sio_calldown->down_set_extclk(p, cf))
+
+/* Return the number of active ports. Since it is not associated with
+ * any 'real' port - it is an extern rather.
+ */
+extern unsigned int ioc4_down_num_ports(void);
+#define DOWN_GET_NUMBER_OF_PORTS() ioc4_down_num_ports()
+
+/* Write bytes to the device. The number of bytes actually written is
+ * returned. The upper layer will continue to call this function until
+ * it has no more data to send or until 0 is returned, indicating that
+ * no more bytes may be sent until some have drained.
+ */
+#define DOWN_WRITE(p, buf, len) \
+    ((p)->sio_calldown->down_write(p, buf, len))
+
+/* Same as DOWN_WRITE, but called only from synchronous du output
+ * routines. Allows lower layer the option of implementing kernel
+ * printfs differently than ordinary console output.
+ */
+#define DOWN_DU_WRITE(p, buf, len) \
+    ((p)->sio_calldown->down_du_write(p, buf, len))
+
+/* Flushes previous down_du_write() calls.  Needed on serial controllers
+ * that can heavily buffer output like IOC3 for conbuf_flush().
+ */
+#define DOWN_DU_FLUSH(p) \
+     ((p)->sio_calldown->down_du_flush(p))
+
+/* Set the output break condition high or low */
+#define DOWN_BREAK(p, brk) \
+    ((p)->sio_calldown->down_break(p, brk))
+
+/* Enable/disable TX for soft flow control */
+#define DOWN_ENABLE_TX(p) \
+    ((p)->sio_calldown->down_enable_tx(p, 1))
+#define DOWN_DISABLE_TX(p) \
+    ((p)->sio_calldown->down_enable_tx(p, 0))
+
+/* Read bytes from the device. The number of bytes actually read is
+ * returned. All bytes returned by a single call have the same error
+ * status. Thus if the device has 10 bytes queued for input and byte 5
+ * has a parity error, the first call to DOWN_READ will return bytes 0-4
+ * only. A subsequent call to DOWN_READ will first cause a call to
+ * UP_PARITY_ERROR to notify the upper layer that the next byte has an
+ * error, and then the call to DOWN_READ returns byte 5 alone. A
+ * subsequent call to DOWN_READ returns bytes 6-9. The upper layer
+ * continues to call DOWN_READ until 0 is returned, or until it runs out
+ * of buffer space to receive the chars.
+ */
+#define DOWN_READ(p, buf, len) \
+    ((p)->sio_calldown->down_read(p, buf, len))
+
+/* Turn on/off event notification for the specified events. Notification
+ * status is unchanged for those events not specified.
+ */
+#define DOWN_NOTIFICATION(p, mask, on) \
+    ((p)->sio_calldown->down_notification(p, mask, on))
+
+/* Notification types. 1 per upcall. The upper layer can specify
+ * exactly which upcalls it wishes to receive. UP_DETACH is mandatory
+ * when applicable and cannot be enabled/disabled.
+ */
+#define N_DATA_READY	0x01
+#define N_OUTPUT_LOWAT	0x02
+#define N_BREAK		0x04
+#define N_PARITY_ERROR	0x08
+#define N_FRAMING_ERROR	0x10
+#define N_OVERRUN_ERROR	0x20
+#define N_DDCD		0x40
+#define N_DCTS		0x80
+
+#define N_ALL_INPUT	(N_DATA_READY | N_BREAK |			\
+			 N_PARITY_ERROR | N_FRAMING_ERROR |		\
+			 N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+#define N_ALL_OUTPUT	N_OUTPUT_LOWAT
+
+#define N_ALL_ERRORS	(N_PARITY_ERROR | N_FRAMING_ERROR | N_OVERRUN_ERROR)
+
+#define N_ALL		(N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK |	\
+			 N_PARITY_ERROR | N_FRAMING_ERROR |		\
+			 N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+/* Instruct the lower layer that the upper layer would like to be
+ * notified every t ticks when data is being received. If data is
+ * streaming in, the lower layer should buffer enough data that
+ * notification is not required more often than requested, and set a
+ * timeout so that notification does not occur less often than
+ * requested. If the lower layer does not support such operations, it
+ * should return 1, indicating that the upper layer should emulate these
+ * functions in software.
+ */
+#define DOWN_RX_TIMEOUT(p, t) \
+    ((p)->sio_calldown->down_rx_timeout(p, t))
+
+/* Set the output value of DTR */
+#define DOWN_SET_DTR(p, dtr) \
+    ((p)->sio_calldown->down_set_DTR(p, dtr))
+
+/* Set the output value of RTS */
+#define DOWN_SET_RTS(p, rts) \
+    ((p)->sio_calldown->down_set_RTS(p, rts))
+
+/* Query current input value of DCD */
+#define DOWN_QUERY_DCD(p) \
+    ((p)->sio_calldown->down_query_DCD(p))
+
+/* Query current input value of CTS */
+#define DOWN_QUERY_CTS(p) \
+    ((p)->sio_calldown->down_query_CTS(p))
+
+/* Set transmission protocol */
+#define DOWN_SET_PROTOCOL(p, proto) \
+    ((p)->sio_calldown->down_set_protocol(p, proto))
+
+/* Query mapped interface type */
+#define DOWN_GET_MAPID(p, arg) \
+    ((p)->sio_calldown->down_mapid(p, arg))
+
+/* Perform mapping to user address space */
+#define DOWN_MAP(p, vt, off) \
+    ((p)->sio_calldown->down_map(p, vt, off))
+
+/* Cleanup after mapped port is closed */
+#define DOWN_UNMAP(p) \
+    ((p)->sio_calldown->down_unmap(p))
+
+/* Set/Reset ioc3 sscr register */
+#define DOWN_SET_SSCR(p, arg, flag) \
+    ((p)->sio_calldown->down_set_sscr(p, arg, flag))
+
+
+/* The callup struct. This is a set of entry points providing
+ * black-box access to the upper level kernel interface by the
+ * hardware handling code. These entry points are used for event
+ * notification 
+ */
+struct serial_callup {
+    void (*up_data_ready)	(sioport_t *port);
+    void (*up_output_lowat)	(sioport_t *port);
+    void (*up_ncs)		(sioport_t *port, int ncs);
+    void (*up_dDCD)		(sioport_t *port, int dcd);
+    void (*up_dCTS)		(sioport_t *port, int cts);
+    void (*up_detach)		(sioport_t *port);
+};
+
+/*
+ * Macros used by the lower layer to access the upper layer for event
+ * notificaiton. These functions are generally called in response to
+ * an interrupt. Since the port lock may be released across UP calls,
+ * we must check the callup vector each time. However since the port
+ * lock is held during DOWN calls (from which these UP calls are made)
+ * there is no danger of the sio_callup vector being cleared between
+ * where it is checked and where it is used in the macro
+ */
+
+/* Notify the upper layer that there are input bytes available and
+ * DOWN_READ may now be called
+ */
+#define UP_DATA_READY(p) \
+    ((p)->sio_callup ? (p)->sio_callup->up_data_ready(p):(void)0)
+
+/* Notify the upper layer that the lower layer has freed up some
+ * output buffer space and DOWN_WRITE may now be called
+ */
+#define UP_OUTPUT_LOWAT(p) \
+    ((p)->sio_callup ? (p)->sio_callup->up_output_lowat(p):(void)0)
+
+/* Notify the upper layer that the next char returned by DOWN_READ
+ * has the indicated special status. (see NCS_* above)
+ */
+#define UP_NCS(p, ncs) \
+    ((p)->sio_callup ? (p)->sio_callup->up_ncs(p, ncs):(void)0)
+
+/* Notify the upper layer of the new DCD input value */
+#define UP_DDCD(p, dcd) \
+    ((p)->sio_callup ? (p)->sio_callup->up_dDCD(p, dcd):(void)0)
+
+/* Notify the upper layer of the new CTS input value */
+#define UP_DCTS(p, cts) \
+    ((p)->sio_callup ? (p)->sio_callup->up_dCTS(p, cts):(void)0)
+
+/* notify the upper layer that the lower layer hardware has been detached 
+ * Since the port lock is NOT held when this macro is executed, we must
+ * guard against the sio_callup vector being cleared between when we check
+ * it and when we make the upcall, so we use a local copy.
+ */
+#define UP_DETACH(p) \
+{ \
+    struct serial_callup *up; \
+    if ((up = (p)->sio_callup)) \
+	up->up_detach(p); \
+}
+
+/* Port locking protocol:
+ * Any time a DOWN call is made into one of the lower layer entry points,
+ * the corresponding port is already locked and remains locked throughout
+ * that downcall. When a lower layer routine makes an UP call, the port
+ * is assumed to be locked on entry to the upper layer routine, but the
+ * upper layer routine may release and reacquire the lock if it wishes.
+ * Thus the lower layer routine should not rely on the port lock being
+ * held across upcalls. Further, since the port may be disconnected
+ * any time the port lock is not held, an UP call may cause subsequent
+ * UP calls to become noops since the upcall vector will be zeroed when
+ * the port is closed. Thus, any lower layer routine making UP calls must
+ * be prepared to deal with the possibility that any UP calls it makes
+ * are noops.
+ *
+ * The only time a lower layer routine should manipulate the port lock
+ * is the lower layer interrupt handler, which should acquire the lock
+ * during its critical execution.
+ * 
+ * Any function which assumes that the port is or isn't locked should
+ * use the function sio_port_islocked in an ASSERT statement to verify
+ * this assumption
+ */
+
+#if DEBUG
+extern int sio_port_islocked(sioport_t *);
+#endif
+
+#define SIO_LOCK_PORT(port, flags)	spin_lock_irqsave(&port->sio_lock, flags)
+#define SIO_UNLOCK_PORT(port, flags)	spin_unlock_irqrestore(&port->sio_lock, flags)
+
+/* kernel debugger support */
+#ifdef _LANGUAGE_C
+extern int console_is_tport;
+#define CNTRL_A		'\001'
+#if DEBUG
+#ifndef DEBUG_CHAR
+#define DEBUG_CHAR	CNTRL_A
+#endif
+#else
+#define DEBUG_CHAR	CNTRL_A
+#endif
+#endif
+
+
+extern void ioc4_serial_initport(sioport_t *, int);
+
+
+/* flags to notify sio_initport() which type of nodes are
+ * desired for a particular hardware type
+ */
+#define NODE_TYPE_D		0x01 /* standard plain streams interface */
+#define NODE_TYPE_MODEM		0x02 /* modem streams interface */
+#define NODE_TYPE_FLOW_MODEM	0x04 /* modem/flow control streams */
+#define NODE_TYPE_CHAR		0x08 /* character interface */
+#define NODE_TYPE_MIDI		0x10 /* midi interface */
+#define NODE_TYPE_D_RS422	0x20 /* RS422 without flow control */
+#define NODE_TYPE_FLOW_RS422	0x40 /* RS422 with flow control */
+
+#define NODE_TYPE_USER		0x80 /* user mapped interface */
+#define NODE_TYPE_TIMESTAMPED	0x100 /* user mapped interface */
+
+#define NODE_TYPE_ALL_RS232	(NODE_TYPE_D | NODE_TYPE_MODEM | \
+				 NODE_TYPE_FLOW_MODEM | NODE_TYPE_CHAR | \
+				 NODE_TYPE_MIDI | NODE_TYPE_TIMESTAMPED)
+#define NODE_TYPE_ALL_RS422	(NODE_TYPE_D_RS422 | NODE_TYPE_FLOW_RS422 | \
+				 NODE_TYPE_TIMESTAMPED)
+
+/* Flags for devflags field of miditype structure */
+#define MIDIDEV_EXTERNAL 0             /* lower half initializes devflags to this for an external device */
+#define MIDIDEV_INTERNAL 0x2
+
+#define MIDIDEV_UNREGISTERED -1    /* Initialization for portidx field of miditype structure */
+
+typedef struct miditype_s{
+  int devflags;                    /* DEV_EXTERNAL, DEV_INTERNAL */
+  int portidx;  
+  void *midi_upper;                      
+  sioport_t *port;
+} miditype_t;
+
+typedef struct tsiotype_s{
+  void *tsio_upper;                      
+  sioport_t *port;
+  int portidx;
+  int urbidx;
+} tsiotype_t;
+
+#endif /* _ASM_IA64_SN_SERIALIO_H */
diff -Nru a/include/asm-ia64/sn/sgi.h b/include/asm-ia64/sn/sgi.h
--- a/include/asm-ia64/sn/sgi.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sgi.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -7,28 +6,35 @@
  * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
  */
 
+/*
+ * This file is a bit of a dumping ground. A lot of things that don't really
+ * have a home, but which make life easier, end up here.
+ */
 
 #ifndef _ASM_IA64_SN_SGI_H
 #define _ASM_IA64_SN_SGI_H
 
 #include <linux/config.h>
-
 #include <asm/sn/types.h>
-#include <asm/uaccess.h>		/* for copy_??_user */
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <asm/sn/hwgfs.h>
-
 typedef hwgfs_handle_t vertex_hdl_t;
 
-typedef int64_t  __psint_t;	/* needed by klgraph.c */
 
-typedef enum { B_FALSE, B_TRUE } boolean_t;
+/* 2.6isms */
+#define VM_NONCACHED 0
+/* end 2.6isms */
+
+/* Nice general name length that lots of people like to use */
+#ifndef MAXDEVNAME
+#define MAXDEVNAME 256
+#endif
 
 
 /*
-** Possible return values from graph routines.
-*/
+ * Possible return values from graph routines.
+ */
 typedef enum graph_error_e {
 	GRAPH_SUCCESS,		/* 0 */
 	GRAPH_DUP,		/* 1 */
@@ -40,87 +46,10 @@
 	GRAPH_IN_USE		/* 7 */
 } graph_error_t;
 
-#define KM_SLEEP   0x0000
-#define KM_NOSLEEP 0x0001		/* needed by kmem_alloc_node(), kmem_zalloc()
-					 * calls */
-#define VM_NOSLEEP 0x0001		/* needed kmem_alloc_node(), kmem_zalloc_node
-					 * calls */
-#define XG_WIDGET_PART_NUM      0xC102          /* KONA/xt_regs.h     XG_XT_PART_NUM_VALUE */
-
-typedef uint64_t vhandl_t;
-
-
-#define NBPP PAGE_SIZE
-#define _PAGESZ PAGE_SIZE
-
-#ifndef MAXDEVNAME
-#define MAXDEVNAME 256
-#endif
-
-#define HUB_PIO_CONVEYOR 0x1
 #define CNODEID_NONE ((cnodeid_t)-1)
-#define XTALK_PCI_PART_NUM "030-1275-"
-#define kdebug 0
-
-
-#define COPYIN(a, b, c)		copy_from_user(b,a,c)
-#define COPYOUT(a, b, c)	copy_to_user(b,a,c)
-
-#define BZERO(a,b)		memset(a, 0, b)
-
-#define kern_malloc(x)		kmalloc(x, GFP_KERNEL)
-#define kern_free(x)		kfree(x)
-
-typedef cpuid_t cpu_cookie_t;
 #define CPU_NONE		(-1)
+#define GRAPH_VERTEX_NONE ((vertex_hdl_t)-1)
 
-/*
- * mutext support mapping
- */
-
-#define mutex_spinlock_init(s)	spin_lock_init(s)
-inline static unsigned long
-mutex_spinlock(spinlock_t *sem) {
-	unsigned long flags = 0;
-//	spin_lock_irqsave(sem, flags);
-	spin_lock(sem);
-	return(flags);
-}
-// #define mutex_spinunlock(s,t)	spin_unlock_irqrestore(s,t)
-#define mutex_spinunlock(s,t)	spin_unlock(s)
-
-
-#define mutex_t			struct semaphore
-#define mutex_init(s)		init_MUTEX(s)
-#define mutex_init_locked(s)	init_MUTEX_LOCKED(s)
-#define mutex_lock(s)		down(s)
-#define mutex_unlock(s)		up(s)
-
-#define io_splock(s)		mutex_spinlock(s)
-#define io_spunlock(s,t)	spin_unlock(s)
-
-#define spin_lock_destroy(s)
-
-#if defined(DISABLE_ASSERT)
-#define ASSERT(expr)
-#define ASSERT_ALWAYS(expr)
-#else
-#define ASSERT(expr)  do {	\
-        if(!(expr)) { \
-		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
-			#expr,__FILE__,__FUNCTION__,__LINE__); \
-		panic("Assertion panic\n"); 	\
-        } } while(0)
-
-#define ASSERT_ALWAYS(expr)	do {\
-        if(!(expr)) { \
-		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
-			#expr,__FILE__,__FUNCTION__,__LINE__); \
-		panic("Assertion always panic\n"); 	\
-        } } while(0)
-#endif	/* DISABLE_ASSERT */
-
-#define PRINT_PANIC		panic
 
 /* print_register() defs */
 
@@ -147,31 +76,28 @@
 
 extern void print_register(unsigned long long, struct reg_desc *);
 
-/******************************************
- * Definitions that do not exist in linux *
- ******************************************/
-
-#define DELAY(a)
-
-/************************************************
- * Routines redefined to use linux equivalents. *
- ************************************************/
-
-/* #define FIXME(s) printk("FIXME: [ %s ] in %s at %s:%d\n", s, __FUNCTION__, __FILE__, __LINE__) */
-
-#define FIXME(s)
-
-/* move to stubs.c yet */
-#define dev_to_vhdl(dev) 0
-#define get_timestamp() 0
-#define us_delay(a)
-#define v_mapphys(a,b,c) 0    // printk("Fixme: v_mapphys - soft->base 0x%p\n", b);
-#define splhi()  0
-#define splx(s)
-
-extern void * snia_kmem_alloc_node(register size_t, register int, cnodeid_t);
-extern void * snia_kmem_zalloc(size_t, int);
-extern void * snia_kmem_zalloc_node(register size_t, register int, cnodeid_t );
-extern int is_specified(char *);
+
+/*
+ * No code is complete without an Assertion macro
+ */
+
+#if defined(DISABLE_ASSERT)
+#define ASSERT(expr)
+#define ASSERT_ALWAYS(expr)
+#else
+#define ASSERT(expr)  do {	\
+        if(!(expr)) { \
+		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
+			#expr,__FILE__,__FUNCTION__,__LINE__); \
+		panic("Assertion panic\n"); 	\
+        } } while(0)
+
+#define ASSERT_ALWAYS(expr)	do {\
+        if(!(expr)) { \
+		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
+			#expr,__FILE__,__FUNCTION__,__LINE__); \
+		panic("Assertion always panic\n"); 	\
+        } } while(0)
+#endif	/* DISABLE_ASSERT */
 
 #endif /* _ASM_IA64_SN_SGI_H */
diff -Nru a/include/asm-ia64/sn/simulator.h b/include/asm-ia64/sn/simulator.h
--- a/include/asm-ia64/sn/simulator.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/simulator.h	Thu Nov  6 13:42:35 2003
@@ -16,11 +16,13 @@
 #define IS_RUNNING_ON_SIMULATOR() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
 
 #define SIMULATOR_SLEEP()	asm("nop.i 0x8beef")
+#define SIMULATOR_PAUSE()	asm("nop.i 0x9beef")
 
 #else
 
 #define IS_RUNNING_ON_SIMULATOR()	(0)
 #define SIMULATOR_SLEEP()
+#define SIMULATOR_PAUSE()
 
 #endif
 
diff -Nru a/include/asm-ia64/sn/sn2/addrs.h b/include/asm-ia64/sn/sn2/addrs.h
--- a/include/asm-ia64/sn/sn2/addrs.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sn2/addrs.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
 /*
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -35,6 +34,27 @@
  *   NodeOffset: byte offset
  */
 
+/* TIO address format:
+ *  4 4        3 3 3 3 3             0
+ *  9 8        8 7 6 5 4
+ * +-+----------+-+---+--------------+
+ * |0| Node ID  |0|CID| Node offset  |
+ * +-+----------+-+---+--------------+
+ *
+ * Node ID: if bit 38 == 1, is ICE.
+ * Bit 37: Must be zero.
+ * CID: Chiplet ID:
+ *     b'01: TIO LB (Indicates TIO MMR access.)
+ *     b'11: TIO ICE (indicates coretalk space access.)
+ * Node offset: byte offest.
+ */
+
+/*
+ * Note that in both of the above address formats, bit
+ * 35 set indicates that the reference is to the 
+ * shub or tio MMRs.
+ */
+
 #ifndef __ASSEMBLY__
 typedef union ia64_sn2_pa {
 	struct {
@@ -55,7 +75,12 @@
 #define LOCAL_MMR_SPACE		0xc000008000000000	/* Local MMR space */
 #define LOCAL_PHYS_MMR_SPACE	0x8000008000000000	/* Local PhysicalMMR space */
 #define LOCAL_MEM_SPACE		0xc000010000000000	/* Local Memory space */
+/* It so happens that setting bit 35 indicates a reference to the SHUB or TIO
+ * MMR space.  
+ */
 #define GLOBAL_MMR_SPACE	0xc000000800000000	/* Global MMR space */
+#define TIO_MMR_SPACE		0xc000000800000000	/* TIO MMR space */
+#define ICE_MMR_SPACE		0xc000000000000000	/* ICE MMR space */
 #define GLOBAL_PHYS_MMR_SPACE	0x0000000800000000	/* Global Physical MMR space */
 #define GET_SPACE		0xe000001000000000	/* GET space */
 #define AMO_SPACE		0xc000002000000000	/* AMO space */
@@ -66,6 +91,7 @@
 #define PHYS_MEM_SPACE		0x0000003000000000	/* physical memory space */
 
 /* SN2 address macros */
+/* NID_SHFT has the right value for both SHUB and TIO addresses.*/
 #define NID_SHFT		38
 #define LOCAL_MMR_ADDR(a)	(UNCACHED | LOCAL_MMR_SPACE | (a))
 #define LOCAL_MMR_PHYS_ADDR(a)	(UNCACHED_PHYS | LOCAL_PHYS_MMR_SPACE | (a))
@@ -85,6 +111,7 @@
  */
 #define CAC_BASE		CACHEABLE_MEM_SPACE			/* cacheable memory space */
 #define IO_BASE			(UNCACHED | GLOBAL_MMR_SPACE)		/* lower 4G maps II's XIO space */
+#define TIO_BASE		(UNCACHED | ICE_MMR_SPACE)		/* lower 4G maps TIO space */
 #define AMO_BASE		(UNCACHED | AMO_SPACE)			/* fetch & op space */
 #define MSPEC_BASE		AMO_BASE				/* fetch & op space */
 #define UNCAC_BASE		(UNCACHED | CACHEABLE_MEM_SPACE)	/* uncached global memory */
@@ -101,15 +128,16 @@
 #define TO_UALIAS(x)            (UALIAS_BASE | TO_NODE_ADDRSPACE(x))
 #define NODE_SIZE_BITS		36	/* node offset : bits <35:0> */
 #define BWIN_SIZE_BITS		29	/* big window size: 512M */
+#define TIO_BWIN_SIZE_BITS	30	/* big window size: 1G */
 #define NASID_BITS		11	/* bits <48:38> */
 #define NASID_BITMASK		(0x7ffULL)
 #define NASID_SHFT		NID_SHFT
 #define NASID_META_BITS		0	/* ???? */
 #define NASID_LOCAL_BITS	7	/* same router as SN1 */
 
-#define NODE_ADDRSPACE_SIZE     (UINT64_CAST 1 << NODE_SIZE_BITS)
-#define NASID_MASK              (UINT64_CAST NASID_BITMASK << NASID_SHFT)
-#define NASID_GET(_pa)          (int) ((UINT64_CAST (_pa) >>            \
+#define NODE_ADDRSPACE_SIZE     ((uint64_t) 1 << NODE_SIZE_BITS)
+#define NASID_MASK              ((uint64_t) NASID_BITMASK << NASID_SHFT)
+#define NASID_GET(_pa)          (int) (((uint64_t) (_pa) >>            \
                                         NASID_SHFT) & NASID_BITMASK)
 #define PHYS_TO_DMA(x)          ( ((x & NASID_MASK) >> 2) |             \
                                   (x & (NODE_ADDRSPACE_SIZE - 1)) )
@@ -130,11 +158,14 @@
         : RAW_NODE_SWIN_BASE(nasid, widget))
 #else
 #define NODE_SWIN_BASE(nasid, widget) \
-     (NODE_IO_BASE(nasid) + (UINT64_CAST (widget) << SWIN_SIZE_BITS))
+     (NODE_IO_BASE(nasid) + ((uint64_t) (widget) << SWIN_SIZE_BITS))
 #define LOCAL_SWIN_BASE(widget) \
-	(UNCACHED | LOCAL_MMR_SPACE | ((UINT64_CAST (widget) << SWIN_SIZE_BITS)))
+	(UNCACHED | LOCAL_MMR_SPACE | (((uint64_t) (widget) << SWIN_SIZE_BITS)))
 #endif /* __ASSEMBLY__ */
 
+#define TIO_SWIN_BASE(nasid, widget)					\
+	(TIO_IO_BASE(nasid) + ((uint64_t) (widget) << TIO_SWIN_SIZE_BITS))
+
 /*
  * The following definitions pertain to the IO special address
  * space.  They define the location of the big and little windows
@@ -142,15 +173,26 @@
  */
 
 #define BWIN_INDEX_BITS         3
-#define BWIN_SIZE               (UINT64_CAST 1 << BWIN_SIZE_BITS)
+#define BWIN_SIZE               ((uint64_t) 1 << BWIN_SIZE_BITS)
 #define BWIN_SIZEMASK           (BWIN_SIZE - 1)
 #define BWIN_WIDGET_MASK        0x7
 #define NODE_BWIN_BASE0(nasid)  (NODE_IO_BASE(nasid) + BWIN_SIZE)
 #define NODE_BWIN_BASE(nasid, bigwin)   (NODE_BWIN_BASE0(nasid) +       \
-                        (UINT64_CAST (bigwin) << BWIN_SIZE_BITS))
+                        ((uint64_t) (bigwin) << BWIN_SIZE_BITS))
 
 #define BWIN_WIDGETADDR(addr)   ((addr) & BWIN_SIZEMASK)
 #define BWIN_WINDOWNUM(addr)    (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+
+#define TIO_BWIN_INDEX_BITS         3
+#define TIO_BWIN_SIZE               ((uint64_t) 1 << TIO_BWIN_SIZE_BITS)
+#define TIO_BWIN_SIZEMASK           (TIO_BWIN_SIZE - 1)
+#define TIO_BWIN_WIDGET_MASK        0x3
+#define TIO_BWIN_BASE0(nasid)  (TIO_IO_BASE(nasid) + TIO_BWIN_SIZE)
+#define TIO_BWIN_BASE(nasid, bigwin)   (TIO_BWIN_BASE0(nasid) +       \
+                        ((uint64_t) (bigwin) << TIO_BWIN_SIZE_BITS))
+
+#define TIO_BWIN_WIDGETADDR(addr)   ((addr) & TIO_BWIN_SIZEMASK)
+#define TIO_BWIN_WINDOWNUM(addr)    (((addr) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WIDGET_MASK)
 
 /*
  * Verify if addr belongs to large window address of node with "nasid"
diff -Nru a/include/asm-ia64/sn/sn2/arch.h b/include/asm-ia64/sn/sn2/arch.h
--- a/include/asm-ia64/sn/sn2/arch.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sn2/arch.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: arch.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -28,7 +27,7 @@
  * Effectively, it's the maximum number of compact node ids (cnodeid_t).
  * This is not necessarily the same as MAX_NASIDS.
  */
-#define MAX_COMPACT_NODES       128
+#define MAX_COMPACT_NODES       2048
 
 /*
  * MAX_REGIONS refers to the maximum number of hardware partitioned regions.
diff -Nru a/include/asm-ia64/sn/sn2/iceio.h b/include/asm-ia64/sn/sn2/iceio.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/sn2/iceio.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,162 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SN2_TIOIO_H
+#define _ASM_IA64_SN_SN2_TIOIO_H
+
+#include <asm/sn/arch.h>
+
+#define TIO_WIDGET_ID_MAX 0x3
+#define TIO_NUM_IITES		0x7
+#define TIO_NUM_BIG_WINDOW	TIO_NUM_IITES
+
+
+/* PIO MANAGEMENT */
+
+extern hub_piomap_t
+tio_piomap_alloc(vertex_hdl_t dev,      /* set up mapping for this device */
+                device_desc_t dev_desc, /* device descriptor */
+                iopaddr_t xtalk_addr,   /* map for this xtalk_addr range */
+                size_t byte_count,
+                size_t byte_count_max,  /* maximum size of a mapping */
+                unsigned flags);                /* defined in sys/pio.h */
+
+extern void tio_piomap_free(hub_piomap_t hub_piomap);
+
+extern caddr_t
+tio_piomap_addr(hub_piomap_t hub_piomap,        /* mapping resources */
+                iopaddr_t xtalk_addr,           /* map for this xtalk addr */
+                size_t byte_count);             /* map this many bytes */
+
+extern void
+tio_piomap_done(hub_piomap_t hub_piomap);
+
+extern caddr_t
+tio_piotrans_addr(      vertex_hdl_t dev,       /* translate to this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        iopaddr_t xtalk_addr,   /* Crosstalk address */
+                        size_t byte_count,      /* map this many bytes */
+                        unsigned flags);        /* (currently unused) */
+
+/* DMA MANAGEMENT */
+
+extern hub_dmamap_t
+tio_dmamap_alloc(       vertex_hdl_t dev,       /* set up mappings for dev */
+                        device_desc_t dev_desc, /* device descriptor */
+                        size_t byte_count_max,  /* max size of a mapping */
+                        unsigned flags);        /* defined in dma.h */
+
+extern void
+tio_dmamap_free(hub_dmamap_t dmamap);
+
+extern iopaddr_t
+tio_dmamap_addr(        hub_dmamap_t dmamap,    /* use mapping resources */
+                        paddr_t paddr,          /* map for this address */
+                        size_t byte_count);     /* map this many bytes */
+
+extern alenlist_t
+tio_dmamap_list(        hub_dmamap_t dmamap,    /* use mapping resources */
+                        alenlist_t alenlist,    /* map this Addr/Length List */
+                        unsigned flags);
+
+extern void
+tio_dmamap_done(        hub_dmamap_t dmamap);   /* done w/ mapping resources */
+
+extern iopaddr_t
+tio_dmatrans_addr(      vertex_hdl_t dev,       /* translate for this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        paddr_t paddr,          /* system physical address */
+                        size_t byte_count,      /* length */
+                        unsigned flags);                /* defined in dma.h */
+
+extern alenlist_t
+tio_dmatrans_list(      vertex_hdl_t dev,       /* translate for this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        alenlist_t palenlist,   /* system addr/length list */
+                        unsigned flags);                /* defined in dma.h */
+
+extern void
+tio_dmamap_drain(       hub_dmamap_t map);
+
+extern void
+tio_dmaaddr_drain(      vertex_hdl_t vhdl,
+                        paddr_t addr,
+                        size_t bytes);
+
+extern void
+tio_dmalist_drain(      vertex_hdl_t vhdl,
+                        alenlist_t list);
+
+
+/* INTERRUPT MANAGEMENT */
+
+extern hub_intr_t
+tio_intr_alloc( vertex_hdl_t dev,               /* which device */
+                device_desc_t dev_desc,         /* device descriptor */
+                vertex_hdl_t owner_dev);        /* owner of this interrupt */
+
+extern hub_intr_t
+tio_intr_alloc_nothd(vertex_hdl_t dev,          /* which device */
+                device_desc_t dev_desc,         /* device descriptor */
+                vertex_hdl_t owner_dev);        /* owner of this interrupt */
+
+extern void
+tio_intr_free(hub_intr_t intr_hdl);
+
+extern int
+tio_intr_connect(       hub_intr_t intr_hdl,    /* xtalk intr resource hndl */
+			intr_func_t intr_func,          /* xtalk intr handler */
+			void *intr_arg,                 /* arg to intr handler */
+                        xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
+                        void *setfunc_arg);     /* arg to setfunc */
+
+extern void
+tio_intr_disconnect(hub_intr_t intr_hdl);
+
+
+/* CONFIGURATION MANAGEMENT */
+
+extern void
+tio_provider_startup(vertex_hdl_t hub);
+
+extern void
+tio_provider_shutdown(vertex_hdl_t hub);
+
+#define HUB_PIO_CONVEYOR        0x1     /* PIO in conveyor belt mode */
+#define HUB_PIO_FIRE_N_FORGET   0x2     /* PIO in fire-and-forget mode */
+
+/* Flags that make sense to hub_widget_flags_set */
+#define HUB_WIDGET_FLAGS        (                               \
+				 HUB_PIO_CONVEYOR       |       \
+				 HUB_PIO_FIRE_N_FORGET          \
+				)
+
+
+
+/* Set the PIO mode for a widget. */
+extern int      tio_widget_flags_set(nasid_t            nasid,
+                                     xwidgetnum_t       widget_num,
+                                     hub_widget_flags_t flags);
+
+/* Error Handling. */
+extern int tio_ioerror_handler(vertex_hdl_t, int, int, struct io_error_s *);
+extern int kl_ioerror_handler(cnodeid_t, cnodeid_t, cpuid_t,
+                              int, paddr_t, caddr_t, ioerror_mode_t);
+extern int tio_error_devenable(vertex_hdl_t, int, int);
+
+
+/* hubdev */
+extern void hubdev_init(void);
+extern void hubdev_register(int (*attach_method)(vertex_hdl_t));
+extern int hubdev_unregister(int (*attach_method)(vertex_hdl_t));
+extern int hubdev_docallouts(vertex_hdl_t hub);
+
+extern caddr_t hubdev_prombase_get(vertex_hdl_t hub);
+extern cnodeid_t hubdev_cnodeid_get(vertex_hdl_t hub);
+
+#endif /* _ASM_IA64_SN_SN2_TIOIO_H */
diff -Nru a/include/asm-ia64/sn/sn2/intr.h b/include/asm-ia64/sn/sn2/intr.h
--- a/include/asm-ia64/sn/sn2/intr.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sn2/intr.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: intr.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -18,10 +17,11 @@
 #define SGI_XBOW_ERROR			(0x32)
 #define SGI_PCIBR_ERROR			(0x33)
 #define SGI_ACPI_SCI_INT		(0x34)
+#define SGI_TIOCA_ERROR			(0x35)
 #define SGI_XPC_NOTIFY			(0xe7)
 
-#define IA64_SN2_FIRST_DEVICE_VECTOR	(0x34)
-#define IA64_SN2_LAST_DEVICE_VECTOR	(0xe7)
+#define IA64_SN2_FIRST_DEVICE_VECTOR	(0x36)
+#define IA64_SN2_LAST_DEVICE_VECTOR	(0xe6)
 
 #define SN2_IRQ_RESERVED        (0x1)
 #define SN2_IRQ_CONNECTED       (0x2)
diff -Nru a/include/asm-ia64/sn/sn2/shubio.h b/include/asm-ia64/sn/sn2/shubio.h
--- a/include/asm-ia64/sn/sn2/shubio.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sn2/shubio.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: shubio.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -3626,8 +3625,9 @@
 extern int hub_ioerror_handler(vertex_hdl_t, int, int, struct io_error_s *);
 extern int kl_ioerror_handler(cnodeid_t, cnodeid_t, cpuid_t,
                               int, paddr_t, caddr_t, ioerror_mode_t);
-extern int hub_error_devenable(vertex_hdl_t, int, int);
-extern int  hub_dma_enabled(vertex_hdl_t);
+extern caddr_t hubdev_prombase_get(vertex_hdl_t hub);
+
+extern cnodeid_t hubdev_cnodeid_get(vertex_hdl_t hub);
 
 #endif /* __ASSEMBLY__ */
 #endif /* _KERNEL */
diff -Nru a/include/asm-ia64/sn/sn2/sn_private.h b/include/asm-ia64/sn/sn2/sn_private.h
--- a/include/asm-ia64/sn/sn2/sn_private.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/sn2/sn_private.h	Thu Nov  6 13:42:35 2003
@@ -1,5 +1,4 @@
-/* $Id: sn_private.h,v 1.1 2002/02/28 17:31:26 marcelo Exp $
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -24,7 +23,6 @@
 extern __psunsigned_t get_master_bridge_base(void);
 extern void set_master_bridge_base(void);
 extern int check_nasid_equiv(nasid_t, nasid_t);
-extern nasid_t get_console_nasid(void);
 extern char get_console_pcislot(void);
 
 extern int is_master_baseio_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid);
@@ -53,6 +51,7 @@
 extern int intr_connect_level(cpuid_t cpu, int bit);
 extern int intr_disconnect_level(cpuid_t cpu, int bit);
 extern cpuid_t intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit);
+extern cpuid_t tio_intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit);
 extern void intr_block_bit(cpuid_t cpu, int bit);
 extern void intr_unblock_bit(cpuid_t cpu, int bit);
 extern void setrtvector(intr_func_t);
@@ -93,6 +92,7 @@
 /* init.c */
 extern cnodeid_t get_compact_nodeid(void);	/* get compact node id */
 extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
+extern void *per_cpu_init(void);
 extern int is_fine_dirmode(void);
 extern void update_node_information(cnodeid_t);
  
@@ -155,7 +155,6 @@
  */
 struct hub_intr_s {
 	struct xtalk_intr_s	i_xtalk_info;	/* standard crosstalk intr info */
-	ilvl_t			i_swlevel;	/* software level for blocking intr */
 	cpuid_t			i_cpuid;	/* which cpu */
 	int			i_bit;		/* which bit */
 	int			i_flags;
@@ -211,6 +210,8 @@
 extern void xswitch_vertex_init(vertex_hdl_t xswitch);
 
 extern xtalk_provider_t hub_provider;
+extern xtalk_provider_t tio_provider;
+extern int numionodes;
 
 /* du.c */
 int ducons_write(char *buf, int len);
diff -Nru a/include/asm-ia64/sn/sn2/tio.h b/include/asm-ia64/sn/sn2/tio.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/sn2/tio.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,45 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SN2_TIO_H
+#define _ASM_IA64_SN_SN2_TIO_H
+
+#define	TIO_MMR_ADDR_MOD
+
+#define TIO_ITTE_BASE   0xb0008800        /* base of translation table entries */
+#define TIO_ITTE(bigwin)        (TIO_ITTE_BASE + 8*(bigwin))
+
+#define TIO_ITTE_OFFSET_BITS    8       /* size of offset field */
+#define TIO_ITTE_OFFSET_MASK    ((1<<TIO_ITTE_OFFSET_BITS)-1)
+#define TIO_ITTE_OFFSET_SHIFT   0
+
+#define TIO_ITTE_WIDGET_BITS    2       /* size of widget field */
+#define TIO_ITTE_WIDGET_MASK    ((1<<TIO_ITTE_WIDGET_BITS)-1)
+#define TIO_ITTE_WIDGET_SHIFT   12
+#define TIO_ITTE_VALID_MASK	0x1
+#define TIO_ITTE_VALID_SHIFT	16
+
+
+#define TIO_INT0        TIO_MMR_ADDR_MOD(0x0000000090001000)
+#define TIO_INT1        TIO_MMR_ADDR_MOD(0x0000000090001100)
+
+
+#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
+        REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
+                (((((addr) >> TIO_BWIN_SIZE_BITS) & \
+                   TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
+                (((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
+		(( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
+
+#define TIO_ITTE_DISABLE(nasid, bigwin) \
+        TIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
+                     (bigwin), 0, 0)
+
+#define TIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), TIO_ITTE(bigwin))
+
+#endif /*  _ASM_IA64_SN_SN2_TIO_H */
diff -Nru a/include/asm-ia64/sn/tio/tioca.h b/include/asm-ia64/sn/tio/tioca.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/tio/tioca.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,482 @@
+#ifndef _ASM_IA64_SN_TIO_TIOCA_H
+#define _ASM_IA64_SN_TIO_TIOCA__
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#define TIOCA_PART_NUM	0xE020
+#define TIOCA_MFGR_NUM	0x24
+#define TIOCA_REV_A	0x1
+
+typedef uint64_t	tioca_reg_t;
+
+/*
+ * Register layout for TIO:CA.  See below for bitmasks for each register.
+ */
+
+typedef volatile struct tioca_s {
+	tioca_reg_t	ca_id;				/* 0x000000 */
+	tioca_reg_t	ca_control1;			/* 0x000008 */
+	tioca_reg_t	ca_control2;			/* 0x000010 */
+	tioca_reg_t	ca_status1;			/* 0x000018 */
+	tioca_reg_t	ca_status2;			/* 0x000020 */
+	tioca_reg_t	ca_gart_aperature;		/* 0x000028 */
+	tioca_reg_t	ca_gfx_detach;			/* 0x000030 */
+	tioca_reg_t	ca_inta_dest_addr;		/* 0x000038 */
+	tioca_reg_t	ca_intb_dest_addr;		/* 0x000040 */
+	tioca_reg_t	ca_err_int_dest_addr;		/* 0x000048 */
+	tioca_reg_t	ca_int_status;			/* 0x000050 */
+	tioca_reg_t	ca_int_status_alias;		/* 0x000058 */
+	tioca_reg_t	ca_mult_error;			/* 0x000060 */
+	tioca_reg_t	ca_mult_error_alias;		/* 0x000068 */
+	tioca_reg_t	ca_first_error;			/* 0x000070 */
+	tioca_reg_t	ca_int_mask;			/* 0x000078 */
+	tioca_reg_t	ca_crm_pkterr_type;		/* 0x000080 */
+	tioca_reg_t	ca_crm_pkterr_type_alias;	/* 0x000088 */
+	tioca_reg_t	ca_crm_ct_error_detail_1;	/* 0x000090 */
+	tioca_reg_t	ca_crm_ct_error_detail_2;	/* 0x000098 */
+	tioca_reg_t	ca_crm_tnumto;			/* 0x0000A0 */
+	tioca_reg_t	ca_gart_err;			/* 0x0000A8 */
+	tioca_reg_t	ca_pcierr_type;			/* 0x0000B0 */
+	tioca_reg_t	ca_pcierr_addr;			/* 0x0000B8 */
+
+	tioca_reg_t	ca_pad_0000C0[3];		/* 0x0000{C0..D0} */
+
+	tioca_reg_t	ca_pci_rd_buf_flush;		/* 0x0000D8 */
+	tioca_reg_t	ca_pci_dma_addr_extn;		/* 0x0000E0 */
+	tioca_reg_t	ca_agp_dma_addr_extn;		/* 0x0000E8 */
+	tioca_reg_t	ca_force_inta;			/* 0x0000F0 */
+	tioca_reg_t	ca_force_intb;			/* 0x0000F8 */
+	tioca_reg_t	ca_debug_vector_sel;		/* 0x000100 */
+	tioca_reg_t	ca_debug_mux_core_sel;		/* 0x000108 */
+	tioca_reg_t	ca_debug_mux_pci_sel;		/* 0x000110 */
+	tioca_reg_t	ca_debug_domain_sel;		/* 0x000118 */
+
+	tioca_reg_t	ca_pad_000120[28];		/* 0x0001{20..F8} */
+
+	tioca_reg_t	ca_gart_ptr_table;		/* 0x200 */
+	tioca_reg_t	ca_gart_tlb_addr[8];		/* 0x2{08..40} */
+} tioca_t, *tioca_p;
+
+/*
+ * Mask/shift definitions for TIO:CA registers.  The convention here is
+ * to mainly use the names as they appear in the "TIO AEGIS Programmers'
+ * Reference" with a CA_ prefix added.  Some exceptions were made to fix
+ * duplicate field names or to generalize fields that are common to
+ * different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
+ * example).
+ *
+ * Fields consisting of a single bit have a single #define have a single
+ * macro declaration to mask the bit.  Fields consisting of multiple bits
+ * have two declarations: one to mask the proper bits in a register, and 
+ * a second with the suffix "_SHFT" to identify how far the mask needs to
+ * be shifted right to get its base value.
+ */
+
+/* ==== ca_control1 */
+#define CA_SYS_BIG_END			(1ull << 0)
+#define CA_DMA_AGP_SWAP			(1ull << 1)
+#define CA_DMA_PCI_SWAP			(1ull << 2)
+#define CA_PIO_IO_SWAP			(1ull << 3)
+#define CA_PIO_MEM_SWAP			(1ull << 4)
+#define CA_GFX_WR_SWAP			(1ull << 5)
+#define CA_AGP_FW_ENABLE		(1ull << 6)
+#define CA_AGP_CAL_CYCLE		(0x7ull << 7)
+#define CA_AGP_CAL_CYCLE_SHFT		7
+#define CA_AGP_CAL_PRSCL_BYP		(1ull << 10)
+#define CA_AGP_INIT_CAL_ENB		(1ull << 11)
+#define CA_INJ_ADDR_PERR		(1ull << 12)
+#define CA_INJ_DATA_PERR		(1ull << 13)
+	/* bits 15:14 unused */
+#define CA_PCIM_IO_NBE_AD		(0x7ull << 16)
+#define CA_PCIM_IO_NBE_AD_SHFT		16
+#define CA_PCIM_FAST_BTB_ENB		(1ull << 19)
+	/* bits 23:20 unused */
+#define CA_PIO_ADDR_OFFSET		(0xffull << 24)
+#define CA_PIO_ADDR_OFFSET_SHFT		24
+	/* bits 35:32 unused */
+#define CA_AGPDMA_OP_COMBDELAY		(0x1full << 36)
+#define CA_AGPDMA_OP_COMBDELAY_SHFT	36
+	/* bit 41 unused */
+#define CA_AGPDMA_OP_ENB_COMBDELAY	(1ull << 42)
+#define	CA_PCI_INT_LPCNT		(0xffull << 44)
+#define CA_PCI_INT_LPCNT_SHFT		44
+	/* bits 63:52 unused */
+
+/* ==== ca_control2 */
+#define CA_AGP_LATENCY_TO		(0xffull << 0)
+#define CA_AGP_LATENCY_TO_SHFT		0
+#define CA_PCI_LATENCY_TO		(0xffull << 8)
+#define CA_PCI_LATENCY_TO_SHFT		8
+#define CA_PCI_MAX_RETRY		(0x3ffull << 16)
+#define CA_PCI_MAX_RETRY_SHFT		16
+	/* bits 27:26 unused */
+#define CA_RT_INT_EN			(0x3ull << 28)
+#define CA_RT_INT_EN_SHFT			28
+#define CA_MSI_INT_ENB			(1ull << 30)
+#define CA_PCI_ARB_ERR_ENB		(1ull << 31)
+#define CA_GART_MEM_PARAM		(0x3ull << 32)
+#define CA_GART_MEM_PARAM_SHFT		32
+#define CA_GART_RD_PREFETCH_ENB		(1ull << 34)
+#define CA_GART_WR_PREFETCH_ENB		(1ull << 35)
+#define CA_GART_FLUSH_TLB		(1ull << 36)
+	/* bits 39:37 unused */
+#define CA_CRM_TNUMTO_PERIOD		(0x1fffull << 40)
+#define CA_CRM_TNUMTO_PERIOD_SHFT	40
+	/* bits 55:53 unused */
+#define CA_CRM_TNUMTO_ENB		(1ull << 56)
+#define CA_CRM_PRESCALER_BYP		(1ull << 57)
+	/* bits 59:58 unused */
+#define CA_CRM_MAX_CREDIT		(0x7ull << 60)
+#define CA_CRM_MAX_CREDIT_SHFT		60
+	/* bit 63 unused */
+
+/* ==== ca_status1 */
+#define CA_CORELET_ID			(0x3ull << 0)
+#define CA_CORELET_ID_SHFT		0
+#define CA_INTA_N			(1ull << 2)
+#define CA_INTB_N			(1ull << 3)
+#define CA_CRM_CREDIT_AVAIL		(0x7ull << 4)
+#define CA_CRM_CREDIT_AVAIL_SHFT	4
+	/* bit 7 unused */
+#define CA_CRM_SPACE_AVAIL		(0x7full << 8)
+#define CA_CRM_SPACE_AVAIL_SHFT		8
+	/* bit 15 unused */
+#define CA_GART_TLB_VAL			(0xffull << 16)
+#define CA_GART_TLB_VAL_SHFT		16
+	/* bits 63:24 unused */
+
+/* ==== ca_status2 */
+#define CA_GFX_CREDIT_AVAIL		(0xffull << 0)
+#define CA_GFX_CREDIT_AVAIL_SHFT	0
+#define CA_GFX_OPQ_AVAIL		(0xffull << 8)
+#define CA_GFX_OPQ_AVAIL_SHFT		8
+#define CA_GFX_WRBUFF_AVAIL		(0xffull << 16)
+#define CA_GFX_WRBUFF_AVAIL_SHFT	16
+#define CA_ADMA_OPQ_AVAIL		(0xffull << 24)
+#define CA_ADMA_OPQ_AVAIL_SHFT		24
+#define CA_ADMA_WRBUFF_AVAIL		(0xffull << 32)
+#define CA_ADMA_WRBUFF_AVAIL_SHFT	32
+#define CA_ADMA_RDBUFF_AVAIL		(0x7full << 40)
+#define CA_ADMA_RDBUFF_AVAIL_SHFT	40
+#define CA_PCI_PIO_OP_STAT		(1ull << 47)
+#define CA_PDMA_OPQ_AVAIL		(0xfull << 48)
+#define CA_PDMA_OPQ_AVAIL_SHFT		48
+#define CA_PDMA_WRBUFF_AVAIL		(0xfull << 52)
+#define CA_PDMA_WRBUFF_AVAIL_SHFT	52
+#define CA_PDMA_RDBUFF_AVAIL		(0x3ull << 56)
+#define CA_PDMA_RDBUFF_AVAIL_SHFT	56
+	/* bits 63:58 unused */
+
+/* ==== ca_gart_aperature */
+#define CA_GART_AP_ENB_AGP		(1ull << 0)
+#define CA_GART_PAGE_SIZE		(1ull << 1)
+#define CA_GART_AP_ENB_PCI		(1ull << 2)
+	/* bits 11:3 unused */
+#define CA_GART_AP_SIZE			(0x3ffull << 12)
+#define CA_GART_AP_SIZE_SHFT		12
+#define CA_GART_AP_BASE			(0x3ffffffffffull << 22)
+#define CA_GART_AP_BASE_SHFT		22
+
+/* ==== ca_inta_dest_addr
+   ==== ca_intb_dest_addr 
+   ==== ca_err_int_dest_addr */
+	/* bits 2:0 unused */
+#define CA_INT_DEST_ADDR		(0x7ffffffffffffull << 3)
+#define CA_INT_DEST_ADDR_SHFT		3
+	/* bits 55:54 unused */
+#define CA_INT_DEST_VECT		(0xffull << 56)
+#define CA_INT_DEST_VECT_SHFT		56
+
+/* ==== ca_int_status */
+/* ==== ca_int_status_alias */
+/* ==== ca_mult_error */
+/* ==== ca_mult_error_alias */
+/* ==== ca_first_error */
+/* ==== ca_int_mask */
+#define CA_PCI_ERR			(1ull << 0)
+	/* bits 3:1 unused */
+#define CA_GART_FETCH_ERR		(1ull << 4)
+#define CA_GFX_WR_OVFLW			(1ull << 5)
+#define CA_PIO_REQ_OVFLW		(1ull << 6)
+#define CA_CRM_PKTERR			(1ull << 7)
+#define CA_CRM_DVERR			(1ull << 8)
+#define CA_TNUMTO			(1ull << 9)
+#define CA_CXM_RSP_CRED_OVFLW		(1ull << 10)
+#define CA_CXM_REQ_CRED_OVFLW		(1ull << 11)
+#define CA_PIO_INVALID_ADDR		(1ull << 12)
+#define CA_PCI_ARB_TO			(1ull << 13)
+#define CA_AGP_REQ_OFLOW		(1ull << 14)
+#define CA_SBA_TYPE1_ERR		(1ull << 15)
+	/* bit 16 unused */
+#define CA_INTA				(1ull << 17)
+#define CA_INTB				(1ull << 18)
+#define CA_MULT_INTA			(1ull << 19)
+#define CA_MULT_INTB			(1ull << 20)
+#define CA_GFX_CREDIT_OVFLW		(1ull << 21)
+	/* bits 63:22 unused */
+
+/* ==== ca_crm_pkterr_type */
+/* ==== ca_crm_pkterr_type_alias */
+#define CA_CRM_PKTERR_SBERR_HDR		(1ull << 0)
+#define CA_CRM_PKTERR_DIDN		(1ull << 1)
+#define CA_CRM_PKTERR_PACTYPE		(1ull << 2)
+#define CA_CRM_PKTERR_INV_TNUM		(1ull << 3)
+#define CA_CRM_PKTERR_ADDR_RNG		(1ull << 4)
+#define CA_CRM_PKTERR_ADDR_ALGN		(1ull << 5)
+#define CA_CRM_PKTERR_HDR_PARAM		(1ull << 6)
+#define CA_CRM_PKTERR_CW_ERR		(1ull << 7)
+#define CA_CRM_PKTERR_SBERR_NH		(1ull << 8)
+#define CA_CRM_PKTERR_EARLY_TERM	(1ull << 9)
+#define CA_CRM_PKTERR_EARLY_TAIL	(1ull << 10)
+#define CA_CRM_PKTERR_MSSNG_TAIL	(1ull << 11)
+#define CA_CRM_PKTERR_MSSNG_HDR		(1ull << 12)
+	/* bits 15:13 unused */
+#define CA_FIRST_CRM_PKTERR_SBERR_HDR	(1ull << 16)
+#define CA_FIRST_CRM_PKTERR_DIDN	(1ull << 17)
+#define CA_FIRST_CRM_PKTERR_PACTYPE	(1ull << 18)
+#define CA_FIRST_CRM_PKTERR_INV_TNUM	(1ull << 19)
+#define CA_FIRST_CRM_PKTERR_ADDR_RNG	(1ull << 20)
+#define CA_FIRST_CRM_PKTERR_ADDR_ALGN	(1ull << 21)
+#define CA_FIRST_CRM_PKTERR_HDR_PARAM	(1ull << 22)
+#define CA_FIRST_CRM_PKTERR_CW_ERR	(1ull << 23)
+#define CA_FIRST_CRM_PKTERR_SBERR_NH	(1ull << 24)
+#define CA_FIRST_CRM_PKTERR_EARLY_TERM	(1ull << 25)
+#define CA_FIRST_CRM_PKTERR_EARLY_TAIL	(1ull << 26)
+#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL	(1ull << 27)
+#define CA_FIRST_CRM_PKTERR_MSSNG_HDR	(1ull << 28)
+	/* bits 63:29 unused */
+
+/* ==== ca_crm_ct_error_detail_1 */
+#define CA_PKT_TYPE			(0xfull << 0)
+#define CA_PKT_TYPE_SHFT		0
+#define CA_SRC_ID			(0x3ull << 4)
+#define CA_SRC_ID_SHFT			4
+#define CA_DATA_SZ			(0x3ull << 6)
+#define CA_DATA_SZ_SHFT			6
+#define CA_TNUM				(0xffull << 8)
+#define CA_TNUM_SHFT			8
+#define CA_DW_DATA_EN			(0xffull << 16)
+#define CA_DW_DATA_EN_SHFT		16
+#define CA_GFX_CRED			(0xffull << 24)
+#define CA_GFX_CRED_SHFT		24
+#define CA_MEM_RD_PARAM			(0x3ull << 32)
+#define CA_MEM_RD_PARAM_SHFT		32
+#define CA_PIO_OP			(1ull << 34)
+#define CA_CW_ERR			(1ull << 35)
+	/* bits 62:36 unused */
+#define CA_VALID			(1ull << 63)
+
+/* ==== ca_crm_ct_error_detail_2 */
+	/* bits 2:0 unused */
+#define CA_PKT_ADDR			(0x1fffffffffffffull << 3)
+#define CA_PKT_ADDR_SHFT		3
+	/* bits 63:56 unused */
+
+/* ==== ca_crm_tnumto */
+#define CA_CRM_TNUMTO_VAL		(0xffull << 0)
+#define CA_CRM_TNUMTO_VAL_SHFT		0
+#define CA_CRM_TNUMTO_WR		(1ull << 8)
+	/* bits 63:9 unused */
+
+/* ==== ca_gart_err */
+#define CA_GART_ERR_SOURCE		(0x3ull << 0)
+#define CA_GART_ERR_SOURCE_SHFT		0
+	/* bits 3:2 unused */
+#define CA_GART_ERR_ADDR		(0xfffffffffull << 4)
+#define CA_GART_ERR_ADDR_SHFT		4
+	/* bits 63:40 unused */
+
+/* ==== ca_pcierr_type */
+#define CA_PCIERR_DATA			(0xffffffffull << 0)
+#define CA_PCIERR_DATA_SHFT		0
+#define CA_PCIERR_ENB			(0xfull << 32)
+#define CA_PCIERR_ENB_SHFT		32
+#define CA_PCIERR_CMD			(0xfull << 36)
+#define CA_PCIERR_CMD_SHFT		36
+#define CA_PCIERR_A64			(1ull << 40)
+#define CA_PCIERR_SLV_SERR		(1ull << 41)
+#define CA_PCIERR_SLV_WR_PERR		(1ull << 42)
+#define CA_PCIERR_SLV_RD_PERR		(1ull << 43)
+#define CA_PCIERR_MST_SERR		(1ull << 44)
+#define CA_PCIERR_MST_WR_PERR		(1ull << 45)
+#define CA_PCIERR_MST_RD_PERR		(1ull << 46)
+#define CA_PCIERR_MST_MABT		(1ull << 47)
+#define CA_PCIERR_MST_TABT		(1ull << 48)
+#define CA_PCIERR_MST_RETRY_TOUT	(1ull << 49)
+	/* bits 63:50 unused */
+
+/* ==== ca_pci_dma_addr_extn */
+#define CA_UPPER_NODE_OFFSET		(0x3full << 0)
+#define CA_UPPER_NODE_OFFSET_SHFT	0
+	/* bits 7:6 unused */
+#define CA_CHIPLET_ID			(0x3ull << 8)
+#define CA_CHIPLET_ID_SHFT		8
+	/* bits 11:10 unused */
+#define CA_PCI_DMA_NODE_ID		(0xffffull << 12)
+#define CA_PCI_DMA_NODE_ID_SHFT		12
+	/* bits 27:26 unused */
+#define CA_PCI_DMA_PIO_MEM_TYPE		(1ull << 28)
+	/* bits 63:29 unused */
+
+
+/* ==== ca_agp_dma_addr_extn */
+	/* bits 19:0 unused */
+#define CA_AGP_DMA_NODE_ID		(0xffffull << 20)
+#define CA_AGP_DMA_NODE_ID_SHFT		20
+	/* bits 27:26 unused */
+#define CA_AGP_DMA_PIO_MEM_TYPE		(1ull << 28)
+	/* bits 63:29 unused */
+
+/* ==== ca_debug_vector_sel */
+#define CA_DEBUG_MN_VSEL		(0xfull << 0)
+#define CA_DEBUG_MN_VSEL_SHFT		0
+#define CA_DEBUG_PP_VSEL		(0xfull << 4)
+#define CA_DEBUG_PP_VSEL_SHFT		4
+#define CA_DEBUG_GW_VSEL		(0xfull << 8)
+#define CA_DEBUG_GW_VSEL_SHFT		8
+#define CA_DEBUG_GT_VSEL		(0xfull << 12)
+#define CA_DEBUG_GT_VSEL_SHFT		12
+#define CA_DEBUG_PD_VSEL		(0xfull << 16)
+#define CA_DEBUG_PD_VSEL_SHFT		16
+#define CA_DEBUG_AD_VSEL		(0xfull << 20)
+#define CA_DEBUG_AD_VSEL_SHFT		20
+#define CA_DEBUG_CX_VSEL		(0xfull << 24)
+#define CA_DEBUG_CX_VSEL_SHFT		24
+#define CA_DEBUG_CR_VSEL		(0xfull << 28)
+#define CA_DEBUG_CR_VSEL_SHFT		28
+#define CA_DEBUG_BA_VSEL		(0xfull << 32)
+#define CA_DEBUG_BA_VSEL_SHFT		32
+#define CA_DEBUG_PE_VSEL		(0xfull << 36)
+#define CA_DEBUG_PE_VSEL_SHFT		36
+#define CA_DEBUG_BO_VSEL		(0xfull << 40)
+#define CA_DEBUG_BO_VSEL_SHFT		40
+#define CA_DEBUG_BI_VSEL		(0xfull << 44)
+#define CA_DEBUG_BI_VSEL_SHFT		44
+#define CA_DEBUG_AS_VSEL		(0xfull << 48)
+#define CA_DEBUG_AS_VSEL_SHFT		48
+#define CA_DEBUG_PS_VSEL		(0xfull << 52)
+#define CA_DEBUG_PS_VSEL_SHFT		52
+#define CA_DEBUG_PM_VSEL		(0xfull << 56)
+#define CA_DEBUG_PM_VSEL_SHFT		56
+	/* bits 63:60 unused */
+
+/* ==== ca_debug_mux_core_sel */
+/* ==== ca_debug_mux_pci_sel */
+#define CA_DEBUG_MSEL0			(0x7ull << 0)
+#define CA_DEBUG_MSEL0_SHFT		0
+	/* bit 3 unused */
+#define CA_DEBUG_NSEL0			(0x7ull << 4)
+#define CA_DEBUG_NSEL0_SHFT		4
+	/* bit 7 unused */
+#define CA_DEBUG_MSEL1			(0x7ull << 8)
+#define CA_DEBUG_MSEL1_SHFT		8
+	/* bit 11 unused */
+#define CA_DEBUG_NSEL1			(0x7ull << 12)
+#define CA_DEBUG_NSEL1_SHFT		12
+	/* bit 15 unused */
+#define CA_DEBUG_MSEL2			(0x7ull << 16)
+#define CA_DEBUG_MSEL2_SHFT		16
+	/* bit 19 unused */
+#define CA_DEBUG_NSEL2			(0x7ull << 20)
+#define CA_DEBUG_NSEL2_SHFT		20
+	/* bit 23 unused */
+#define CA_DEBUG_MSEL3			(0x7ull << 24)
+#define CA_DEBUG_MSEL3_SHFT		24
+	/* bit 27 unused */
+#define CA_DEBUG_NSEL3			(0x7ull << 28)
+#define CA_DEBUG_NSEL3_SHFT		28
+	/* bit 31 unused */
+#define CA_DEBUG_MSEL4			(0x7ull << 32)
+#define CA_DEBUG_MSEL4_SHFT		32
+	/* bit 35 unused */
+#define CA_DEBUG_NSEL4			(0x7ull << 36)
+#define CA_DEBUG_NSEL4_SHFT		36
+	/* bit 39 unused */
+#define CA_DEBUG_MSEL5			(0x7ull << 40)
+#define CA_DEBUG_MSEL5_SHFT		40
+	/* bit 43 unused */
+#define CA_DEBUG_NSEL5			(0x7ull << 44)
+#define CA_DEBUG_NSEL5_SHFT		44
+	/* bit 47 unused */
+#define CA_DEBUG_MSEL6			(0x7ull << 48)
+#define CA_DEBUG_MSEL6_SHFT		48
+	/* bit 51 unused */
+#define CA_DEBUG_NSEL6			(0x7ull << 52)
+#define CA_DEBUG_NSEL6_SHFT		52
+	/* bit 55 unused */
+#define CA_DEBUG_MSEL7			(0x7ull << 56)
+#define CA_DEBUG_MSEL7_SHFT		56
+	/* bit 59 unused */
+#define CA_DEBUG_NSEL7			(0x7ull << 60)
+#define CA_DEBUG_NSEL7_SHFT		60
+	/* bit 63 unused */
+
+
+/* ==== ca_debug_domain_sel */
+#define CA_DEBUG_DOMAIN_L		(1ull << 0)
+#define CA_DEBUG_DOMAIN_H		(1ull << 1)
+	/* bits 63:2 unused */
+
+/* ==== ca_gart_ptr_table */
+#define CA_GART_PTR_VAL			(1ull << 0)
+	/* bits 11:1 unused */
+#define CA_GART_PTR_ADDR		(0xfffffffffffull << 12)
+#define CA_GART_PTR_ADDR_SHFT		12
+	/* bits 63:56 unused */
+
+/* ==== ca_gart_tlb_addr[0-7] */
+#define CA_GART_TLB_ADDR		(0xffffffffffffffull << 0)
+#define CA_GART_TLB_ADDR_SHFT		0
+	/* bits 62:56 unused */
+#define CA_GART_TLB_ENTRY_VAL		(1ull << 63)
+
+/*
+ * PIO address space ranges for TIO:CA
+ */
+
+/* CA internal registers */
+#define CA_PIO_ADMIN			0x00000000
+#define CA_PIO_ADMIN_LEN		0x00010000
+
+/* GFX Write Buffer - Diagnostics */
+#define CA_PIO_GFX			0x00010000
+#define CA_PIO_GFX_LEN			0x00010000
+
+/* AGP DMA Write Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAWRITE		0x00020000
+#define CA_PIO_AGP_DMAWRITE_LEN		0x00010000
+
+/* AGP DMA READ Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAREAD		0x00030000
+#define CA_PIO_AGP_DMAREAD_LEN		0x00010000
+
+/* PCI Config Type 0 */
+#define CA_PIO_PCI_TYPE0_CONFIG		0x01000000
+#define CA_PIO_PCI_TYPE0_CONFIG_LEN	0x01000000
+
+/* PCI Config Type 1 */
+#define CA_PIO_PCI_TYPE1_CONFIG		0x02000000
+#define CA_PIO_PCI_TYPE1_CONFIG_LEN	0x01000000
+
+/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
+#define CA_PIO_PCI_IO			0x03000000
+#define CA_PIO_PCI_IO_LEN		0x05000000
+
+/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
+/*	use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM_OFFSET		0x08000000
+#define CA_PIO_PCI_MEM_OFFSET_LEN	0x08000000
+
+/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
+/*	use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM			0x40000000
+#define CA_PIO_PCI_MEM_LEN		0xc0000000
+
+#endif  /* _ASM_IA64_SN_TIO_TIOCA_H */
diff -Nru a/include/asm-ia64/sn/tio/tioca_private.h b/include/asm-ia64/sn/tio/tioca_private.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/tio/tioca_private.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,61 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_TIO_TIOCA_PRIVATE_H
+#define _ASM_IA64_SN_TIO_TIOCA_PRIVATE_H
+
+/*
+ * tioca_private.h -- private definitions for tioca
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <asm/sn/pci/pciio_private.h>
+
+struct tioca_intr_s {
+    struct pciio_intr_s     bi_pi;
+#define	bi_flags	bi_pi.pi_flags	/* TIOCA_INTR flags */
+#define	bi_dev		bi_pi.pi_dev	/* associated agp card */
+#define	bi_lines	bi_pi.pi_lines	/* which PCI interrupt line(s) */
+#define	bi_func		bi_pi.pi_func	/* handler function (when connected) */
+#define	bi_arg		bi_pi.pi_arg	/* handler parameter (when connected) */
+#define bi_mustruncpu	bi_pi.pi_mustruncpu /* Where we must run. */
+#define bi_irq		bi_pi.pi_irq	/* IRQ assigned. */
+#define bi_cpu		bi_pi.pi_cpu	/* cpu assigned. */
+    unsigned                bi_ibits;	/* TIOCA intr bits for lines A & B */
+    tioca_soft_p            bi_soft;	/* shortcut to soft info */
+    xtalk_intr_t            bi_intr;
+};
+
+typedef struct tioca_info_s {
+	struct pciio_info_s	f_c;	/* MUST BE FIRST */
+#define f_vertex        f_c.c_vertex    /* back pointer to vertex */
+#define f_bus           f_c.c_bus       /* which bus the card is in */
+#define f_slot          f_c.c_slot      /* which slot the card is in */
+#define f_func          f_c.c_func      /* which func (on multi-func cards) */
+#define f_vendor        f_c.c_vendor    /* PCI card "vendor" code */
+#define f_device        f_c.c_device    /* PCI card "device" code */
+#define f_master        f_c.c_master    /* PCI bus provider */
+#define f_mfast         f_c.c_mfast     /* cached fastinfo from c_master */
+#define f_pops          f_c.c_pops      /* cached provider from c_master */
+#define f_efunc         f_c.c_efunc     /* error handling function */
+#define f_einfo         f_c.c_einfo     /* first parameter for efunc */
+#define f_window        f_c.c_window    /* state of BASE regs */
+#define f_rwindow       f_c.c_rwindow   /* expansion ROM BASE regs */
+#define f_rbase         f_c.c_rbase     /* expansion ROM base */
+#define f_rsize         f_c.c_rsize     /* expansion ROM size */
+#define f_piospace      f_c.c_piospace  /* additional I/O spaces allocated */
+
+	/*
+	 * AGP extended capabilities
+	 */
+
+	char		*f_cfgptr;	/* cached ptr to config space */
+	uint8_t		f_agpcap;	/* AGP cap offset in cfg space */
+} *tioca_info_t;
+
+#endif				/* _ASM_IA64_SN_TIO_TIOCA_PRIVATE_H */
diff -Nru a/include/asm-ia64/sn/tio/tioca_soft.h b/include/asm-ia64/sn/tio/tioca_soft.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/tio/tioca_soft.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_TIOCA_SOFT_H
+#define _ASM_IA64_SN_TIO_TIOCA_SOFT_H
+
+/*
+ * Software structures relating to TIO:CA
+ */
+
+/*
+ * Soft structure representing a TIO:CA hw element
+ */
+
+typedef struct tioca_soft_s {
+	vertex_hdl_t		ca_systemhdl;
+	vertex_hdl_t		ca_vhdl;
+	tioca_t			*ca_base;	/* PIO addr for registers */
+	tioca_reg_t		ca_control1;	/* shadow */
+	tioca_reg_t		ca_control2;	/* shadow */
+	tioca_reg_t		ca_int_mask;
+	char 			*ca_name;
+	struct {
+        	xtalk_intr_t	ca_xtalk_intr;
+        	uint32_t	ca_int_bit;
+	} ca_intr[2];
+	xtalk_intr_t		ca_err_intr;
+	spinlock_t		ca_lock;
+} tioca_soft_t, *tioca_soft_p;
+
+/*
+ * Internal routines
+ */
+
+/* ca_linux.c */
+void		*tioca_mem_alloc(size_t, uint);
+void		tioca_mem_free(void *, size_t);
+tioca_soft_p	tioca_hdl_to_soft(vertex_hdl_t hdl);
+vertex_hdl_t	tioca_soft_to_hdl(tioca_soft_p soft);
+void		tioca_dump(tioca_p);
+
+/* ca_driver.c */
+tioca_soft_p	tioca_hwinit(vertex_hdl_t, tioca_p);
+void		tioca_agp_enable(vertex_hdl_t);
+
+/* ca_pci.c */
+extern int	tioca_pci_init(tioca_soft_p soft);
+extern int	tioca_agp_init(tioca_soft_p soft);
+
+/*
+ * Debugging support
+ */
+
+extern uint32_t	tioca_debug_mask;
+#define TIOCA_DEBUG(level_mask, fmt)					\
+	if ((tioca_debug_mask & (level_mask)) == (level_mask)) {	\
+		printk fmt ;						\
+	}
+
+/*
+ * General debug levels 
+ */
+
+#define CA_DBG_ALWAYS	0x00000001
+#define CA_DBG_VERBOSE	0x00000002
+#define CA_DBG_ERROR	0x00000004
+
+/*
+ * General debug categories
+ */
+
+#define CA_DBG_CONFIG	0x00000100
+
+#define CA_DBG_DEFAULT	(CA_DBG_ALWAYS|CA_DBG_CONFIG|CA_DBG_ERROR)	/* default mask */
+#endif /* _ASM_IA64_SN_TIO_TIOCA_SOFT_H */
diff -Nru a/include/asm-ia64/sn/xtalk/corelet.h b/include/asm-ia64/sn/xtalk/corelet.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/include/asm-ia64/sn/xtalk/corelet.h	Thu Nov  6 13:42:35 2003
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_CORELET_H
+#define _ASM_IA64_SN_XTALK_CORELET_H
+
+/* ERR_CMD_WORD for Coretalk packet*/
+#define CTALK_VALID                     0x8000000000000000
+#define CTALK_CW_ERR                    0x0000000800000000
+#define CTALK_PIO_OP                    0x0000000400000000
+#define CTALK_RD_PARM                   0x0000000300000000
+#define CTALK_DW_DATA_EN                0x0000000000FF0000
+#define CTALK_TNUM                      0x000000000000FF00
+#define CTALK_DATA_SIZE                 0x00000000000000C0
+#define CTALK_PACTYP                    0x000000000000000F
+
+#endif  /* _ASM_IA64_SN_XTALK_CORELET_H */
+
diff -Nru a/include/asm-ia64/sn/xtalk/xbow_info.h b/include/asm-ia64/sn/xtalk/xbow_info.h
--- a/include/asm-ia64/sn/xtalk/xbow_info.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/xtalk/xbow_info.h	Thu Nov  6 13:42:35 2003
@@ -1,35 +1,16 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
-#ifndef _ASM_SN_XTALK_XBOW_INFO_H
-#define _ASM_SN_XTALK_XBOW_INFO_H
+#ifndef _ASM_IA64_SN_XTALK_XBOW_INFO_H
+#define _ASM_IA64_SN_XTALK_XBOW_INFO_H
 
 #include <linux/types.h>
 
 #define XBOW_PERF_MODES	       0x03
-#define XBOW_PERF_COUNTERS     0x02
-
-#define XBOW_MONITOR_NONE      0x00
-#define XBOW_MONITOR_SRC_LINK  0x01
-#define XBOW_MONITOR_DEST_LINK 0x02
-#define XBOW_MONITOR_INP_PKT   0x03
-#define XBOW_MONITOR_MULTIPLEX 0x04
-
-#define XBOW_LINK_MULTIPLEX    0x20
-
-#define XBOW_PERF_TIMEOUT	4
-#define XBOW_STATS_TIMEOUT	HZ
-
-typedef struct xbow_perf_link {
-    uint64_t              xlp_cumulative[XBOW_PERF_MODES];
-    unsigned char           xlp_link_alive;
-} xbow_perf_link_t;
-
 
 typedef struct xbow_link_status {
     uint64_t              rx_err_count;
@@ -37,32 +18,4 @@
 } xbow_link_status_t;
 
 
-
-typedef struct xbow_perf {
-    uint32_t              xp_current;
-    unsigned char           xp_link;
-    unsigned char           xp_mode;
-    unsigned char           xp_curlink;
-    unsigned char           xp_curmode;
-    volatile uint32_t    *xp_perf_reg;
-} xbow_perf_t;
-
-extern void             xbow_update_perf_counters(vertex_hdl_t);
-extern xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
-extern int              xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
-
-#define XBOWIOC_PERF_ENABLE	  	1
-#define XBOWIOC_PERF_DISABLE	 	2
-#define XBOWIOC_PERF_GET	 	3
-#define XBOWIOC_LLP_ERROR_ENABLE 	4
-#define XBOWIOC_LLP_ERROR_DISABLE	5
-#define XBOWIOC_LLP_ERROR_GET	 	6
-
-
-struct xbow_perfarg_t {
-    int                     link;
-    int                     mode;
-    int                     counter;
-};
-
-#endif				/* _ASM_SN_XTALK_XBOW_INFO_H */
+#endif				/* _ASM_IA64_SN_XTALK_XBOW_INFO_H */
diff -Nru a/include/asm-ia64/sn/xtalk/xtalk.h b/include/asm-ia64/sn/xtalk/xtalk.h
--- a/include/asm-ia64/sn/xtalk/xtalk.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/xtalk/xtalk.h	Thu Nov  6 13:42:35 2003
@@ -1,16 +1,17 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
-#ifndef _ASM_SN_XTALK_XTALK_H
-#define _ASM_SN_XTALK_XTALK_H
+#ifndef _ASM_IA64_SN_XTALK_XTALK_H
+#define _ASM_IA64_SN_XTALK_XTALK_H
 #include <linux/config.h>
 
+#ifdef __KERNEL__
 #include "asm/sn/sgi.h"
+#endif
 
 
 /*
@@ -19,7 +20,7 @@
 /*
  * User-level device driver visible types
  */
-typedef int            xwidgetnum_t;	/* xtalk widget number  (0..15) */
+typedef char            xwidgetnum_t;	/* xtalk widget number  (0..15) */
 
 #define XWIDGET_NONE		(-1)
 
@@ -233,16 +234,6 @@
 typedef void
 xtalk_widgetdev_shutdown_f (vertex_hdl_t, int);
 
-typedef int
-xtalk_dma_enabled_f (vertex_hdl_t);
-
-/* Error Management */
-
-typedef int
-xtalk_error_devenable_f (vertex_hdl_t xconn_vhdl,
-			 int devnum,
-			 int error_code);
-
 /* Early Action Support */
 typedef caddr_t
 xtalk_early_piotrans_addr_f (xwidget_part_num_t part_num,
@@ -285,9 +276,6 @@
     /* CONFIGURATION MANAGEMENT */
     xtalk_provider_startup_f *provider_startup;
     xtalk_provider_shutdown_f *provider_shutdown;
-
-    /* Error Management     */
-    xtalk_error_devenable_f *error_devenable;
 } xtalk_provider_t;
 
 /* Crosstalk devices use these standard Crosstalk provider interfaces */
@@ -316,8 +304,6 @@
 extern xtalk_provider_shutdown_f xtalk_provider_shutdown;
 extern xtalk_widgetdev_enable_f xtalk_widgetdev_enable;
 extern xtalk_widgetdev_shutdown_f xtalk_widgetdev_shutdown;
-extern xtalk_dma_enabled_f xtalk_dma_enabled;
-extern xtalk_error_devenable_f xtalk_error_devenable;
 extern xtalk_early_piotrans_addr_f xtalk_early_piotrans_addr;
 
 /* error management */
@@ -397,4 +383,4 @@
 extern void		xtalk_iterate(char *prefix, xtalk_iter_f *func);
 
 #endif				/* __KERNEL__ */
-#endif				/* _ASM_SN_XTALK_XTALK_H */
+#endif				/* _ASM_IA64_SN_XTALK_XTALK_H */
diff -Nru a/include/asm-ia64/sn/xtalk/xtalk_private.h b/include/asm-ia64/sn/xtalk/xtalk_private.h
--- a/include/asm-ia64/sn/xtalk/xtalk_private.h	Thu Nov  6 13:42:35 2003
+++ b/include/asm-ia64/sn/xtalk/xtalk_private.h	Thu Nov  6 13:42:35 2003
@@ -1,13 +1,12 @@
-/* $Id$
- *
+/*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
  */
-#ifndef _ASM_SN_XTALK_XTALK_PRIVATE_H
-#define _ASM_SN_XTALK_XTALK_PRIVATE_H
+#ifndef _ASM_IA64_SN_XTALK_XTALK_PRIVATE_H
+#define _ASM_IA64_SN_XTALK_XTALK_PRIVATE_H
 
 #include <asm/sn/ioerror.h>        /* for error function and arg types */
 #include <asm/sn/xtalk/xwidget.h>
@@ -52,12 +51,6 @@
 /*
  * Xtalk interrupt handler structure access functions
  */
-#define	xtalk_intr_arg(xt)	((xt)->xi_sfarg)
-
-#define	xwidget_hwid_is_sn0_xswitch(_hwid)	\
-		(((_hwid)->part_num == XBOW_WIDGET_PART_NUM ) &&  	\
-		 ((_hwid)->mfg_num == XBOW_WIDGET_MFGR_NUM ))
-
 #define	xwidget_hwid_is_sn1_xswitch(_hwid)	\
 		(((_hwid)->part_num == XXBOW_WIDGET_PART_NUM ||		\
 		  (_hwid)->part_num == PXBOW_WIDGET_PART_NUM) &&  	\
@@ -83,4 +76,4 @@
 
 extern char             widget_info_fingerprint[];
 
-#endif				/* _ASM_SN_XTALK_XTALK_PRIVATE_H */
+#endif				/* _ASM_IA64_SN_XTALK_XTALK_PRIVATE_H */
diff -Nru a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c	Thu Nov  6 15:11:25 2003
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c	Thu Nov  6 15:11:25 2003
@@ -189,7 +189,7 @@
 	register_sn_partition_id();
 	register_sn_serial_numbers();
 	register_sn_force_interrupt();
-	register_sn_linkstats();
+	/* register_sn_linkstats(); */
 }
 
 #endif /* CONFIG_PROC_FS */
diff -Nru a/include/asm-ia64/sn/pci/pci_bus_cvlink.h b/include/asm-ia64/sn/pci/pci_bus_cvlink.h
--- a/include/asm-ia64/sn/pci/pci_bus_cvlink.h	Thu Nov  6 13:41:20 2003
+++ b/include/asm-ia64/sn/pci/pci_bus_cvlink.h	Thu Nov  6 13:41:20 2003
@@ -18,7 +17,6 @@
 #include <asm/sn/xtalk/xwidget.h>
 #include <asm/sn/sn_private.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/hcl_util.h>
 #include <asm/sn/intr.h>
@@ -56,8 +54,6 @@
         vertex_hdl_t  vhdl;
 	int		isa64;
 	int		isPIC;
-	volatile unsigned int *dma_buf_sync;
-	volatile unsigned int *xbow_buf_sync;
 };
 
 struct ioports_to_tlbs_s {
diff -Nru a/arch/ia64/sn/io/hwgfs/labelcl.c b/arch/ia64/sn/io/hwgfs/labelcl.c
--- a/arch/ia64/sn/io/hwgfs/labelcl.c   Thu Nov  6 13:42:35 2003
+++ b/arch/ia64/sn/io/hwgfs/labelcl.c   Thu Nov  6 13:42:35 2003
@@ -16,7 +16,6 @@
 #include <linux/smp_lock.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/hwgfs.h>
-#include <asm/sn/invent.h>
 #include <asm/sn/hcl.h>
 #include <asm/sn/labelcl.h>

