2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
283 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
287 eax->split.is_fully_associative = 1;
288 ebx->split.coherency_line_size = line_size - 1;
289 ebx->split.ways_of_associativity = assoc - 1;
290 ebx->split.physical_line_partition = lines_per_tag - 1;
291 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
292 (ebx->split.ways_of_associativity + 1) - 1;
296 struct attribute attr;
297 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
298 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
302 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
305 * L3 cache descriptors
307 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
309 struct amd_l3_cache *l3 = &nb->l3_cache;
310 unsigned int sc0, sc1, sc2, sc3;
313 pci_read_config_dword(nb->misc, 0x1C4, &val);
315 /* calculate subcache sizes */
316 l3->subcaches[0] = sc0 = !(val & BIT(0));
317 l3->subcaches[1] = sc1 = !(val & BIT(4));
319 if (boot_cpu_data.x86 == 0x15) {
320 l3->subcaches[0] = sc0 += !(val & BIT(1));
321 l3->subcaches[1] = sc1 += !(val & BIT(5));
324 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
325 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
327 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
330 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
334 /* only for L3, and not in virtualized environments */
338 node = amd_get_nb_id(smp_processor_id());
339 this_leaf->nb = node_to_amd_nb(node);
340 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
341 amd_calc_l3_indices(this_leaf->nb);
345 * check whether a slot used for disabling an L3 index is occupied.
346 * @l3: L3 cache descriptor
347 * @slot: slot number (0..1)
349 * @returns: the disabled index if used or negative value if slot free.
351 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
353 unsigned int reg = 0;
355 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
357 /* check whether this slot is activated already */
358 if (reg & (3UL << 30))
364 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
369 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
372 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
374 return sprintf(buf, "%d\n", index);
376 return sprintf(buf, "FREE\n");
379 #define SHOW_CACHE_DISABLE(slot) \
381 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
384 return show_cache_disable(this_leaf, buf, slot); \
386 SHOW_CACHE_DISABLE(0)
387 SHOW_CACHE_DISABLE(1)
389 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
390 unsigned slot, unsigned long idx)
397 * disable index in all 4 subcaches
399 for (i = 0; i < 4; i++) {
400 u32 reg = idx | (i << 20);
402 if (!nb->l3_cache.subcaches[i])
405 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
408 * We need to WBINVD on a core on the node containing the L3
409 * cache which indices we disable therefore a simple wbinvd()
415 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
420 * disable a L3 cache index by using a disable-slot
422 * @l3: L3 cache descriptor
423 * @cpu: A CPU on the node containing the L3 cache
424 * @slot: slot number (0..1)
425 * @index: index to disable
427 * @return: 0 on success, error status on failure
429 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
434 /* check if @slot is already used or the index is already disabled */
435 ret = amd_get_l3_disable_slot(nb, slot);
439 if (index > nb->l3_cache.indices)
442 /* check whether the other slot has disabled the same index already */
443 if (index == amd_get_l3_disable_slot(nb, !slot))
446 amd_l3_disable_index(nb, cpu, slot, index);
451 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
452 const char *buf, size_t count,
455 unsigned long val = 0;
458 if (!capable(CAP_SYS_ADMIN))
461 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
464 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
466 if (strict_strtoul(buf, 10, &val) < 0)
469 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
472 printk(KERN_WARNING "L3 disable slot %d in use!\n",
479 #define STORE_CACHE_DISABLE(slot) \
481 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
482 const char *buf, size_t count, \
485 return store_cache_disable(this_leaf, buf, count, slot); \
487 STORE_CACHE_DISABLE(0)
488 STORE_CACHE_DISABLE(1)
490 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
491 show_cache_disable_0, store_cache_disable_0);
492 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
493 show_cache_disable_1, store_cache_disable_1);
496 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
498 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
501 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
505 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
510 if (!capable(CAP_SYS_ADMIN))
513 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
516 if (strict_strtoul(buf, 16, &val) < 0)
519 if (amd_set_subcaches(cpu, val))
525 static struct _cache_attr subcaches =
526 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
528 #else /* CONFIG_AMD_NB */
529 #define amd_init_l3_cache(x, y)
530 #endif /* CONFIG_AMD_NB */
533 __cpuinit cpuid4_cache_lookup_regs(int index,
534 struct _cpuid4_info_regs *this_leaf)
536 union _cpuid4_leaf_eax eax;
537 union _cpuid4_leaf_ebx ebx;
538 union _cpuid4_leaf_ecx ecx;
541 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
542 amd_cpuid4(index, &eax, &ebx, &ecx);
543 amd_init_l3_cache(this_leaf, index);
545 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
548 if (eax.split.type == CACHE_TYPE_NULL)
549 return -EIO; /* better error ? */
551 this_leaf->eax = eax;
552 this_leaf->ebx = ebx;
553 this_leaf->ecx = ecx;
554 this_leaf->size = (ecx.split.number_of_sets + 1) *
555 (ebx.split.coherency_line_size + 1) *
556 (ebx.split.physical_line_partition + 1) *
557 (ebx.split.ways_of_associativity + 1);
561 static int __cpuinit find_num_cache_leaves(void)
563 unsigned int eax, ebx, ecx, edx;
564 union _cpuid4_leaf_eax cache_eax;
569 /* Do cpuid(4) loop to find out num_cache_leaves */
570 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
571 cache_eax.full = eax;
572 } while (cache_eax.split.type != CACHE_TYPE_NULL);
576 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
579 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
580 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
581 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
583 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
584 unsigned int cpu = c->cpu_index;
587 if (c->cpuid_level > 3) {
588 static int is_initialized;
590 if (is_initialized == 0) {
591 /* Init num_cache_leaves from boot CPU */
592 num_cache_leaves = find_num_cache_leaves();
597 * Whenever possible use cpuid(4), deterministic cache
598 * parameters cpuid leaf to find the cache details
600 for (i = 0; i < num_cache_leaves; i++) {
601 struct _cpuid4_info_regs this_leaf;
604 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
606 switch (this_leaf.eax.split.level) {
608 if (this_leaf.eax.split.type ==
610 new_l1d = this_leaf.size/1024;
611 else if (this_leaf.eax.split.type ==
613 new_l1i = this_leaf.size/1024;
616 new_l2 = this_leaf.size/1024;
618 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
619 index_msb = get_count_order(num_threads_sharing);
620 l2_id = c->apicid >> index_msb;
624 new_l3 = this_leaf.size/1024;
626 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
627 index_msb = get_count_order(
628 num_threads_sharing);
629 l3_id = c->apicid >> index_msb;
639 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
642 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
643 /* supports eax=2 call */
645 unsigned int regs[4];
646 unsigned char *dp = (unsigned char *)regs;
649 if (num_cache_leaves != 0 && c->x86 == 15)
652 /* Number of times to iterate */
653 n = cpuid_eax(2) & 0xFF;
655 for (i = 0 ; i < n ; i++) {
656 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
658 /* If bit 31 is set, this is an unknown format */
659 for (j = 0 ; j < 3 ; j++)
660 if (regs[j] & (1 << 31))
663 /* Byte 0 is level count, not a descriptor */
664 for (j = 1 ; j < 16 ; j++) {
665 unsigned char des = dp[j];
668 /* look up this descriptor in the table */
669 while (cache_table[k].descriptor != 0) {
670 if (cache_table[k].descriptor == des) {
671 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
673 switch (cache_table[k].cache_type) {
675 l1i += cache_table[k].size;
678 l1d += cache_table[k].size;
681 l2 += cache_table[k].size;
684 l3 += cache_table[k].size;
687 trace += cache_table[k].size;
709 per_cpu(cpu_llc_id, cpu) = l2_id;
716 per_cpu(cpu_llc_id, cpu) = l3_id;
720 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
727 /* pointer to _cpuid4_info array (for each cache leaf) */
728 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
729 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
731 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
733 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
735 struct _cpuid4_info *this_leaf;
737 struct cpuinfo_x86 *c = &cpu_data(cpu);
742 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
743 if (!per_cpu(ici_cpuid4_info, i))
745 this_leaf = CPUID4_INFO_IDX(i, index);
746 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
747 if (!cpu_online(sibling))
749 set_bit(sibling, this_leaf->shared_cpu_map);
752 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
754 for_each_cpu(i, cpu_sibling_mask(cpu)) {
755 if (!per_cpu(ici_cpuid4_info, i))
757 this_leaf = CPUID4_INFO_IDX(i, index);
758 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
759 if (!cpu_online(sibling))
761 set_bit(sibling, this_leaf->shared_cpu_map);
769 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
771 struct _cpuid4_info *this_leaf, *sibling_leaf;
772 unsigned long num_threads_sharing;
774 struct cpuinfo_x86 *c = &cpu_data(cpu);
776 if (c->x86_vendor == X86_VENDOR_AMD) {
777 if (cache_shared_amd_cpu_map_setup(cpu, index))
781 this_leaf = CPUID4_INFO_IDX(cpu, index);
782 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
784 if (num_threads_sharing == 1)
785 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
787 index_msb = get_count_order(num_threads_sharing);
789 for_each_online_cpu(i) {
790 if (cpu_data(i).apicid >> index_msb ==
791 c->apicid >> index_msb) {
793 to_cpumask(this_leaf->shared_cpu_map));
794 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
796 CPUID4_INFO_IDX(i, index);
797 cpumask_set_cpu(cpu, to_cpumask(
798 sibling_leaf->shared_cpu_map));
804 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
806 struct _cpuid4_info *this_leaf, *sibling_leaf;
809 this_leaf = CPUID4_INFO_IDX(cpu, index);
810 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
811 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
812 cpumask_clear_cpu(cpu,
813 to_cpumask(sibling_leaf->shared_cpu_map));
817 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
821 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
826 static void __cpuinit free_cache_attributes(unsigned int cpu)
830 for (i = 0; i < num_cache_leaves; i++)
831 cache_remove_shared_cpu_map(cpu, i);
833 kfree(per_cpu(ici_cpuid4_info, cpu));
834 per_cpu(ici_cpuid4_info, cpu) = NULL;
837 static void __cpuinit get_cpu_leaves(void *_retval)
839 int j, *retval = _retval, cpu = smp_processor_id();
841 /* Do cpuid and store the results */
842 for (j = 0; j < num_cache_leaves; j++) {
843 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
845 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
846 if (unlikely(*retval < 0)) {
849 for (i = 0; i < j; i++)
850 cache_remove_shared_cpu_map(cpu, i);
853 cache_shared_cpu_map_setup(cpu, j);
857 static int __cpuinit detect_cache_attributes(unsigned int cpu)
861 if (num_cache_leaves == 0)
864 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
865 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
866 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
869 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
871 kfree(per_cpu(ici_cpuid4_info, cpu));
872 per_cpu(ici_cpuid4_info, cpu) = NULL;
878 #include <linux/kobject.h>
879 #include <linux/sysfs.h>
880 #include <linux/cpu.h>
882 /* pointer to kobject for cpuX/cache */
883 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
885 struct _index_kobject {
888 unsigned short index;
891 /* pointer to array of kobjects for cpuX/cache/indexY */
892 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
893 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
895 #define show_one_plus(file_name, object, val) \
896 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
899 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
902 show_one_plus(level, base.eax.split.level, 0);
903 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
904 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
905 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
906 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
908 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
911 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
914 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
917 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
921 const struct cpumask *mask;
923 mask = to_cpumask(this_leaf->shared_cpu_map);
925 cpulist_scnprintf(buf, len-2, mask) :
926 cpumask_scnprintf(buf, len-2, mask);
933 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
936 return show_shared_cpu_map_func(leaf, 0, buf);
939 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
942 return show_shared_cpu_map_func(leaf, 1, buf);
945 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
948 switch (this_leaf->base.eax.split.type) {
949 case CACHE_TYPE_DATA:
950 return sprintf(buf, "Data\n");
951 case CACHE_TYPE_INST:
952 return sprintf(buf, "Instruction\n");
953 case CACHE_TYPE_UNIFIED:
954 return sprintf(buf, "Unified\n");
956 return sprintf(buf, "Unknown\n");
960 #define to_object(k) container_of(k, struct _index_kobject, kobj)
961 #define to_attr(a) container_of(a, struct _cache_attr, attr)
963 #define define_one_ro(_name) \
964 static struct _cache_attr _name = \
965 __ATTR(_name, 0444, show_##_name, NULL)
967 define_one_ro(level);
969 define_one_ro(coherency_line_size);
970 define_one_ro(physical_line_partition);
971 define_one_ro(ways_of_associativity);
972 define_one_ro(number_of_sets);
974 define_one_ro(shared_cpu_map);
975 define_one_ro(shared_cpu_list);
977 static struct attribute *default_attrs[] = {
980 &coherency_line_size.attr,
981 &physical_line_partition.attr,
982 &ways_of_associativity.attr,
983 &number_of_sets.attr,
985 &shared_cpu_map.attr,
986 &shared_cpu_list.attr,
990 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
991 static struct attribute ** __cpuinit amd_l3_attrs(void)
993 static struct attribute **attrs;
999 n = sizeof (default_attrs) / sizeof (struct attribute *);
1001 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1004 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1007 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1009 return attrs = default_attrs;
1011 for (n = 0; default_attrs[n]; n++)
1012 attrs[n] = default_attrs[n];
1014 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1015 attrs[n++] = &cache_disable_0.attr;
1016 attrs[n++] = &cache_disable_1.attr;
1019 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1020 attrs[n++] = &subcaches.attr;
1026 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1028 struct _cache_attr *fattr = to_attr(attr);
1029 struct _index_kobject *this_leaf = to_object(kobj);
1033 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1034 buf, this_leaf->cpu) :
1039 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1040 const char *buf, size_t count)
1042 struct _cache_attr *fattr = to_attr(attr);
1043 struct _index_kobject *this_leaf = to_object(kobj);
1046 ret = fattr->store ?
1047 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1048 buf, count, this_leaf->cpu) :
1053 static const struct sysfs_ops sysfs_ops = {
1058 static struct kobj_type ktype_cache = {
1059 .sysfs_ops = &sysfs_ops,
1060 .default_attrs = default_attrs,
1063 static struct kobj_type ktype_percpu_entry = {
1064 .sysfs_ops = &sysfs_ops,
1067 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1069 kfree(per_cpu(ici_cache_kobject, cpu));
1070 kfree(per_cpu(ici_index_kobject, cpu));
1071 per_cpu(ici_cache_kobject, cpu) = NULL;
1072 per_cpu(ici_index_kobject, cpu) = NULL;
1073 free_cache_attributes(cpu);
1076 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1080 if (num_cache_leaves == 0)
1083 err = detect_cache_attributes(cpu);
1087 /* Allocate all required memory */
1088 per_cpu(ici_cache_kobject, cpu) =
1089 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1090 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1093 per_cpu(ici_index_kobject, cpu) = kzalloc(
1094 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1095 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1101 cpuid4_cache_sysfs_exit(cpu);
1105 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1107 /* Add/Remove cache interface for CPU device */
1108 static int __cpuinit cache_add_dev(struct device *dev)
1110 unsigned int cpu = dev->id;
1112 struct _index_kobject *this_object;
1113 struct _cpuid4_info *this_leaf;
1116 retval = cpuid4_cache_sysfs_init(cpu);
1117 if (unlikely(retval < 0))
1120 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1121 &ktype_percpu_entry,
1122 &dev->kobj, "%s", "cache");
1124 cpuid4_cache_sysfs_exit(cpu);
1128 for (i = 0; i < num_cache_leaves; i++) {
1129 this_object = INDEX_KOBJECT_PTR(cpu, i);
1130 this_object->cpu = cpu;
1131 this_object->index = i;
1133 this_leaf = CPUID4_INFO_IDX(cpu, i);
1135 ktype_cache.default_attrs = default_attrs;
1136 #if defined(CONFIG_AMD_NB) && !defined(CONFIG_XEN)
1137 if (this_leaf->base.nb)
1138 ktype_cache.default_attrs = amd_l3_attrs();
1140 retval = kobject_init_and_add(&(this_object->kobj),
1142 per_cpu(ici_cache_kobject, cpu),
1144 if (unlikely(retval)) {
1145 for (j = 0; j < i; j++)
1146 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1147 kobject_put(per_cpu(ici_cache_kobject, cpu));
1148 cpuid4_cache_sysfs_exit(cpu);
1151 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1153 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1155 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1159 static void __cpuinit cache_remove_dev(struct device *dev)
1161 unsigned int cpu = dev->id;
1164 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1166 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1168 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1170 for (i = 0; i < num_cache_leaves; i++)
1171 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1172 kobject_put(per_cpu(ici_cache_kobject, cpu));
1173 cpuid4_cache_sysfs_exit(cpu);
1176 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1177 unsigned long action, void *hcpu)
1179 unsigned int cpu = (unsigned long)hcpu;
1182 dev = get_cpu_device(cpu);
1185 case CPU_ONLINE_FROZEN:
1189 case CPU_DEAD_FROZEN:
1190 cache_remove_dev(dev);
1196 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1197 .notifier_call = cacheinfo_cpu_callback,
1200 static int __cpuinit cache_sysfs_init(void)
1204 if (num_cache_leaves == 0)
1207 for_each_online_cpu(i) {
1209 struct device *dev = get_cpu_device(i);
1211 err = cache_add_dev(dev);
1215 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1219 device_initcall(cache_sysfs_init);