- 2.6.17 port work build breaks, but the patch set is relativly stable
[linux-flexiantxendom0-3.2.10.git] / arch / i386 / kernel / cpu / intel_cacheinfo.c
index 7057db2..c8547a6 100644 (file)
@@ -174,11 +174,11 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
        unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
        unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
        unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
+#ifdef CONFIG_SMP
        unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
 #endif
 
-       if (c->cpuid_level > 4) {
+       if (c->cpuid_level > 3) {
                static int is_initialized;
 
                if (is_initialized == 0) {
@@ -225,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                        }
                }
        }
-       if (c->cpuid_level > 1) {
+       /*
+        * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
+        * trace cache
+        */
+       if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
                /* supports eax=2  call */
                int i, j, n;
                int regs[4];
                unsigned char *dp = (unsigned char *)regs;
+               int only_trace = 0;
+
+               if (num_cache_leaves != 0 && c->x86 == 15)
+                       only_trace = 1;
 
                /* Number of times to iterate */
                n = cpuid_eax(2) & 0xFF;
@@ -251,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                                while (cache_table[k].descriptor != 0)
                                {
                                        if (cache_table[k].descriptor == des) {
+                                               if (only_trace && cache_table[k].cache_type != LVL_TRACE)
+                                                       break;
                                                switch (cache_table[k].cache_type) {
                                                case LVL_1_INST:
                                                        l1i += cache_table[k].size;
@@ -276,43 +286,46 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                                }
                        }
                }
+       }
 
-               if (new_l1d)
-                       l1d = new_l1d;
+       if (new_l1d)
+               l1d = new_l1d;
 
-               if (new_l1i)
-                       l1i = new_l1i;
+       if (new_l1i)
+               l1i = new_l1i;
 
-               if (new_l2) {
-                       l2 = new_l2;
-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-                       cpu_llc_id[cpu] = l2_id;
+       if (new_l2) {
+               l2 = new_l2;
+#ifdef CONFIG_SMP
+               cpu_llc_id[cpu] = l2_id;
 #endif
-               }
+       }
 
-               if (new_l3) {
-                       l3 = new_l3;
-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-                       cpu_llc_id[cpu] = l3_id;
+       if (new_l3) {
+               l3 = new_l3;
+#ifdef CONFIG_SMP
+               cpu_llc_id[cpu] = l3_id;
 #endif
-               }
-
-               if ( trace )
-                       printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
-               else if ( l1i )
-                       printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
-               if ( l1d )
-                       printk(", L1 D cache: %dK\n", l1d);
-               else
-                       printk("\n");
-               if ( l2 )
-                       printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
-               if ( l3 )
-                       printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
-
-               c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
        }
 
+       if (trace)
+               printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
+       else if ( l1i )
+               printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
+
+       if (l1d)
+               printk(", L1 D cache: %dK\n", l1d);
+       else
+               printk("\n");
+
+       if (l2)
+               printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+
+       if (l3)
+               printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+
+       c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
+
        return l2;
 }
 
@@ -348,7 +361,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
                }
        }
 }
-static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
+static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
 {
        struct _cpuid4_info     *this_leaf, *sibling_leaf;
        int sibling;
@@ -629,7 +642,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
        return;
 }
 
-static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int cacheinfo_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;