2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
283 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
287 eax->split.is_fully_associative = 1;
288 ebx->split.coherency_line_size = line_size - 1;
289 ebx->split.ways_of_associativity = assoc - 1;
290 ebx->split.physical_line_partition = lines_per_tag - 1;
291 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
292 (ebx->split.ways_of_associativity + 1) - 1;
296 struct attribute attr;
297 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
298 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
302 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
305 * L3 cache descriptors
307 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
309 struct amd_l3_cache *l3 = &nb->l3_cache;
310 unsigned int sc0, sc1, sc2, sc3;
313 pci_read_config_dword(nb->misc, 0x1C4, &val);
315 /* calculate subcache sizes */
316 l3->subcaches[0] = sc0 = !(val & BIT(0));
317 l3->subcaches[1] = sc1 = !(val & BIT(4));
319 if (boot_cpu_data.x86 == 0x15) {
320 l3->subcaches[0] = sc0 += !(val & BIT(1));
321 l3->subcaches[1] = sc1 += !(val & BIT(5));
324 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
325 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
327 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
330 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
335 /* only for L3, and not in virtualized environments */
339 node = amd_get_nb_id(smp_processor_id());
340 this_leaf->nb = node_to_amd_nb(node);
341 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
342 amd_calc_l3_indices(this_leaf->nb);
346 * check whether a slot used for disabling an L3 index is occupied.
347 * @l3: L3 cache descriptor
348 * @slot: slot number (0..1)
350 * @returns: the disabled index if used or negative value if slot free.
352 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
354 unsigned int reg = 0;
356 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
358 /* check whether this slot is activated already */
359 if (reg & (3UL << 30))
365 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
370 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
373 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
375 return sprintf(buf, "%d\n", index);
377 return sprintf(buf, "FREE\n");
380 #define SHOW_CACHE_DISABLE(slot) \
382 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
385 return show_cache_disable(this_leaf, buf, slot); \
387 SHOW_CACHE_DISABLE(0)
388 SHOW_CACHE_DISABLE(1)
390 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
391 unsigned slot, unsigned long idx)
398 * disable index in all 4 subcaches
400 for (i = 0; i < 4; i++) {
401 u32 reg = idx | (i << 20);
403 if (!nb->l3_cache.subcaches[i])
406 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
409 * We need to WBINVD on a core on the node containing the L3
410 * cache which indices we disable therefore a simple wbinvd()
416 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
421 * disable a L3 cache index by using a disable-slot
423 * @l3: L3 cache descriptor
424 * @cpu: A CPU on the node containing the L3 cache
425 * @slot: slot number (0..1)
426 * @index: index to disable
428 * @return: 0 on success, error status on failure
430 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
435 /* check if @slot is already used or the index is already disabled */
436 ret = amd_get_l3_disable_slot(nb, slot);
440 if (index > nb->l3_cache.indices)
443 /* check whether the other slot has disabled the same index already */
444 if (index == amd_get_l3_disable_slot(nb, !slot))
447 amd_l3_disable_index(nb, cpu, slot, index);
452 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
453 const char *buf, size_t count,
456 unsigned long val = 0;
459 if (!capable(CAP_SYS_ADMIN))
462 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
465 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
467 if (strict_strtoul(buf, 10, &val) < 0)
470 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
473 printk(KERN_WARNING "L3 disable slot %d in use!\n",
480 #define STORE_CACHE_DISABLE(slot) \
482 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
483 const char *buf, size_t count, \
486 return store_cache_disable(this_leaf, buf, count, slot); \
488 STORE_CACHE_DISABLE(0)
489 STORE_CACHE_DISABLE(1)
491 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
492 show_cache_disable_0, store_cache_disable_0);
493 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
494 show_cache_disable_1, store_cache_disable_1);
497 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
499 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
502 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
506 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
511 if (!capable(CAP_SYS_ADMIN))
514 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
517 if (strict_strtoul(buf, 16, &val) < 0)
520 if (amd_set_subcaches(cpu, val))
526 static struct _cache_attr subcaches =
527 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
529 #else /* CONFIG_AMD_NB */
530 #define amd_init_l3_cache(x, y)
531 #endif /* CONFIG_AMD_NB */
534 __cpuinit cpuid4_cache_lookup_regs(int index,
535 struct _cpuid4_info_regs *this_leaf)
537 union _cpuid4_leaf_eax eax;
538 union _cpuid4_leaf_ebx ebx;
539 union _cpuid4_leaf_ecx ecx;
542 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
543 amd_cpuid4(index, &eax, &ebx, &ecx);
544 amd_init_l3_cache(this_leaf, index);
546 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
549 if (eax.split.type == CACHE_TYPE_NULL)
550 return -EIO; /* better error ? */
552 this_leaf->eax = eax;
553 this_leaf->ebx = ebx;
554 this_leaf->ecx = ecx;
555 this_leaf->size = (ecx.split.number_of_sets + 1) *
556 (ebx.split.coherency_line_size + 1) *
557 (ebx.split.physical_line_partition + 1) *
558 (ebx.split.ways_of_associativity + 1);
562 static int __cpuinit find_num_cache_leaves(void)
564 unsigned int eax, ebx, ecx, edx;
565 union _cpuid4_leaf_eax cache_eax;
570 /* Do cpuid(4) loop to find out num_cache_leaves */
571 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
572 cache_eax.full = eax;
573 } while (cache_eax.split.type != CACHE_TYPE_NULL);
577 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
580 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
581 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
582 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
584 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
585 unsigned int cpu = c->cpu_index;
588 if (c->cpuid_level > 3) {
589 static int is_initialized;
591 if (is_initialized == 0) {
592 /* Init num_cache_leaves from boot CPU */
593 num_cache_leaves = find_num_cache_leaves();
598 * Whenever possible use cpuid(4), deterministic cache
599 * parameters cpuid leaf to find the cache details
601 for (i = 0; i < num_cache_leaves; i++) {
602 struct _cpuid4_info_regs this_leaf;
605 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
607 switch (this_leaf.eax.split.level) {
609 if (this_leaf.eax.split.type ==
611 new_l1d = this_leaf.size/1024;
612 else if (this_leaf.eax.split.type ==
614 new_l1i = this_leaf.size/1024;
617 new_l2 = this_leaf.size/1024;
619 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
620 index_msb = get_count_order(num_threads_sharing);
621 l2_id = c->apicid >> index_msb;
625 new_l3 = this_leaf.size/1024;
627 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
628 index_msb = get_count_order(
629 num_threads_sharing);
630 l3_id = c->apicid >> index_msb;
640 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
643 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
644 /* supports eax=2 call */
646 unsigned int regs[4];
647 unsigned char *dp = (unsigned char *)regs;
650 if (num_cache_leaves != 0 && c->x86 == 15)
653 /* Number of times to iterate */
654 n = cpuid_eax(2) & 0xFF;
656 for (i = 0 ; i < n ; i++) {
657 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
659 /* If bit 31 is set, this is an unknown format */
660 for (j = 0 ; j < 3 ; j++)
661 if (regs[j] & (1 << 31))
664 /* Byte 0 is level count, not a descriptor */
665 for (j = 1 ; j < 16 ; j++) {
666 unsigned char des = dp[j];
669 /* look up this descriptor in the table */
670 while (cache_table[k].descriptor != 0) {
671 if (cache_table[k].descriptor == des) {
672 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
674 switch (cache_table[k].cache_type) {
676 l1i += cache_table[k].size;
679 l1d += cache_table[k].size;
682 l2 += cache_table[k].size;
685 l3 += cache_table[k].size;
688 trace += cache_table[k].size;
710 per_cpu(cpu_llc_id, cpu) = l2_id;
717 per_cpu(cpu_llc_id, cpu) = l3_id;
721 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
728 /* pointer to _cpuid4_info array (for each cache leaf) */
729 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
730 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
732 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
733 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
735 struct _cpuid4_info *this_leaf, *sibling_leaf;
736 unsigned long num_threads_sharing;
737 int index_msb, i, sibling;
738 struct cpuinfo_x86 *c = &cpu_data(cpu);
740 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
741 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
742 if (!per_cpu(ici_cpuid4_info, i))
744 this_leaf = CPUID4_INFO_IDX(i, index);
745 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
746 if (!cpu_online(sibling))
748 set_bit(sibling, this_leaf->shared_cpu_map);
753 this_leaf = CPUID4_INFO_IDX(cpu, index);
754 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
756 if (num_threads_sharing == 1)
757 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
759 index_msb = get_count_order(num_threads_sharing);
761 for_each_online_cpu(i) {
762 if (cpu_data(i).apicid >> index_msb ==
763 c->apicid >> index_msb) {
765 to_cpumask(this_leaf->shared_cpu_map));
766 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
768 CPUID4_INFO_IDX(i, index);
769 cpumask_set_cpu(cpu, to_cpumask(
770 sibling_leaf->shared_cpu_map));
776 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
778 struct _cpuid4_info *this_leaf, *sibling_leaf;
781 this_leaf = CPUID4_INFO_IDX(cpu, index);
782 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
783 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
784 cpumask_clear_cpu(cpu,
785 to_cpumask(sibling_leaf->shared_cpu_map));
789 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
793 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
798 static void __cpuinit free_cache_attributes(unsigned int cpu)
802 for (i = 0; i < num_cache_leaves; i++)
803 cache_remove_shared_cpu_map(cpu, i);
805 kfree(per_cpu(ici_cpuid4_info, cpu));
806 per_cpu(ici_cpuid4_info, cpu) = NULL;
809 static void __cpuinit get_cpu_leaves(void *_retval)
811 int j, *retval = _retval, cpu = smp_processor_id();
813 /* Do cpuid and store the results */
814 for (j = 0; j < num_cache_leaves; j++) {
815 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
817 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
818 if (unlikely(*retval < 0)) {
821 for (i = 0; i < j; i++)
822 cache_remove_shared_cpu_map(cpu, i);
825 cache_shared_cpu_map_setup(cpu, j);
829 static int __cpuinit detect_cache_attributes(unsigned int cpu)
833 if (num_cache_leaves == 0)
836 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
837 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
838 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
841 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
843 kfree(per_cpu(ici_cpuid4_info, cpu));
844 per_cpu(ici_cpuid4_info, cpu) = NULL;
850 #include <linux/kobject.h>
851 #include <linux/sysfs.h>
852 #include <linux/cpu.h>
854 /* pointer to kobject for cpuX/cache */
855 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
857 struct _index_kobject {
860 unsigned short index;
863 /* pointer to array of kobjects for cpuX/cache/indexY */
864 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
865 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
867 #define show_one_plus(file_name, object, val) \
868 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
871 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
874 show_one_plus(level, base.eax.split.level, 0);
875 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
876 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
877 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
878 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
880 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
883 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
886 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
889 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
893 const struct cpumask *mask;
895 mask = to_cpumask(this_leaf->shared_cpu_map);
897 cpulist_scnprintf(buf, len-2, mask) :
898 cpumask_scnprintf(buf, len-2, mask);
905 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
908 return show_shared_cpu_map_func(leaf, 0, buf);
911 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
914 return show_shared_cpu_map_func(leaf, 1, buf);
917 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
920 switch (this_leaf->base.eax.split.type) {
921 case CACHE_TYPE_DATA:
922 return sprintf(buf, "Data\n");
923 case CACHE_TYPE_INST:
924 return sprintf(buf, "Instruction\n");
925 case CACHE_TYPE_UNIFIED:
926 return sprintf(buf, "Unified\n");
928 return sprintf(buf, "Unknown\n");
932 #define to_object(k) container_of(k, struct _index_kobject, kobj)
933 #define to_attr(a) container_of(a, struct _cache_attr, attr)
935 #define define_one_ro(_name) \
936 static struct _cache_attr _name = \
937 __ATTR(_name, 0444, show_##_name, NULL)
939 define_one_ro(level);
941 define_one_ro(coherency_line_size);
942 define_one_ro(physical_line_partition);
943 define_one_ro(ways_of_associativity);
944 define_one_ro(number_of_sets);
946 define_one_ro(shared_cpu_map);
947 define_one_ro(shared_cpu_list);
949 static struct attribute *default_attrs[] = {
952 &coherency_line_size.attr,
953 &physical_line_partition.attr,
954 &ways_of_associativity.attr,
955 &number_of_sets.attr,
957 &shared_cpu_map.attr,
958 &shared_cpu_list.attr,
962 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
963 static struct attribute ** __cpuinit amd_l3_attrs(void)
965 static struct attribute **attrs;
971 n = sizeof (default_attrs) / sizeof (struct attribute *);
973 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
976 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
979 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
981 return attrs = default_attrs;
983 for (n = 0; default_attrs[n]; n++)
984 attrs[n] = default_attrs[n];
986 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
987 attrs[n++] = &cache_disable_0.attr;
988 attrs[n++] = &cache_disable_1.attr;
991 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
992 attrs[n++] = &subcaches.attr;
998 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1000 struct _cache_attr *fattr = to_attr(attr);
1001 struct _index_kobject *this_leaf = to_object(kobj);
1005 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1006 buf, this_leaf->cpu) :
1011 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1012 const char *buf, size_t count)
1014 struct _cache_attr *fattr = to_attr(attr);
1015 struct _index_kobject *this_leaf = to_object(kobj);
1018 ret = fattr->store ?
1019 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1020 buf, count, this_leaf->cpu) :
1025 static const struct sysfs_ops sysfs_ops = {
1030 static struct kobj_type ktype_cache = {
1031 .sysfs_ops = &sysfs_ops,
1032 .default_attrs = default_attrs,
1035 static struct kobj_type ktype_percpu_entry = {
1036 .sysfs_ops = &sysfs_ops,
1039 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1041 kfree(per_cpu(ici_cache_kobject, cpu));
1042 kfree(per_cpu(ici_index_kobject, cpu));
1043 per_cpu(ici_cache_kobject, cpu) = NULL;
1044 per_cpu(ici_index_kobject, cpu) = NULL;
1045 free_cache_attributes(cpu);
1048 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1052 if (num_cache_leaves == 0)
1055 err = detect_cache_attributes(cpu);
1059 /* Allocate all required memory */
1060 per_cpu(ici_cache_kobject, cpu) =
1061 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1062 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1065 per_cpu(ici_index_kobject, cpu) = kzalloc(
1066 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1067 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1073 cpuid4_cache_sysfs_exit(cpu);
1077 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1079 /* Add/Remove cache interface for CPU device */
1080 static int __cpuinit cache_add_dev(struct device *dev)
1082 unsigned int cpu = dev->id;
1084 struct _index_kobject *this_object;
1085 struct _cpuid4_info *this_leaf;
1088 retval = cpuid4_cache_sysfs_init(cpu);
1089 if (unlikely(retval < 0))
1092 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1093 &ktype_percpu_entry,
1094 &dev->kobj, "%s", "cache");
1096 cpuid4_cache_sysfs_exit(cpu);
1100 for (i = 0; i < num_cache_leaves; i++) {
1101 this_object = INDEX_KOBJECT_PTR(cpu, i);
1102 this_object->cpu = cpu;
1103 this_object->index = i;
1105 this_leaf = CPUID4_INFO_IDX(cpu, i);
1107 ktype_cache.default_attrs = default_attrs;
1108 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
1109 if (this_leaf->base.nb)
1110 ktype_cache.default_attrs = amd_l3_attrs();
1112 retval = kobject_init_and_add(&(this_object->kobj),
1114 per_cpu(ici_cache_kobject, cpu),
1116 if (unlikely(retval)) {
1117 for (j = 0; j < i; j++)
1118 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1119 kobject_put(per_cpu(ici_cache_kobject, cpu));
1120 cpuid4_cache_sysfs_exit(cpu);
1123 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1125 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1127 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1131 static void __cpuinit cache_remove_dev(struct device *dev)
1133 unsigned int cpu = dev->id;
1136 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1138 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1140 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1142 for (i = 0; i < num_cache_leaves; i++)
1143 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1144 kobject_put(per_cpu(ici_cache_kobject, cpu));
1145 cpuid4_cache_sysfs_exit(cpu);
1148 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1149 unsigned long action, void *hcpu)
1151 unsigned int cpu = (unsigned long)hcpu;
1154 dev = get_cpu_device(cpu);
1157 case CPU_ONLINE_FROZEN:
1161 case CPU_DEAD_FROZEN:
1162 cache_remove_dev(dev);
1168 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1169 .notifier_call = cacheinfo_cpu_callback,
1172 static int __cpuinit cache_sysfs_init(void)
1176 if (num_cache_leaves == 0)
1179 for_each_online_cpu(i) {
1181 struct device *dev = get_cpu_device(i);
1183 err = cache_add_dev(dev);
1187 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1191 device_initcall(cache_sysfs_init);