3 #include <linux/uaccess.h>
4 #include <linux/module.h>
5 #include <linux/mutex.h>
6 #include <linux/init.h>
11 static DEFINE_MUTEX(mtrr_mutex);
13 void generic_get_mtrr(unsigned int reg, unsigned long *base,
14 unsigned long *size, mtrr_type * type)
16 struct xen_platform_op op;
18 op.cmd = XENPF_read_memtype;
19 op.u.read_memtype.reg = reg;
20 if (unlikely(HYPERVISOR_platform_op(&op)))
21 memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
23 *size = op.u.read_memtype.nr_mfns;
24 *base = op.u.read_memtype.mfn;
25 *type = op.u.read_memtype.type;
28 const struct mtrr_ops generic_mtrr_ops = {
30 .get = generic_get_mtrr,
33 const struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
34 unsigned int num_var_ranges;
35 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
39 static void __init set_num_var_ranges(void)
41 struct xen_platform_op op;
43 for (num_var_ranges = 0; ; num_var_ranges++) {
44 op.cmd = XENPF_read_memtype;
45 op.u.read_memtype.reg = num_var_ranges;
46 if (HYPERVISOR_platform_op(&op) != 0)
51 static void __init init_table(void)
56 for (i = 0; i < max; i++)
57 mtrr_usage_table[i] = 0;
60 int mtrr_add_page(unsigned long base, unsigned long size,
61 unsigned int type, bool increment)
64 struct xen_platform_op op;
66 mutex_lock(&mtrr_mutex);
68 op.cmd = XENPF_add_memtype;
69 op.u.add_memtype.mfn = base;
70 op.u.add_memtype.nr_mfns = size;
71 op.u.add_memtype.type = type;
72 error = HYPERVISOR_platform_op(&op);
74 mutex_unlock(&mtrr_mutex);
80 ++mtrr_usage_table[op.u.add_memtype.reg];
82 mutex_unlock(&mtrr_mutex);
84 return op.u.add_memtype.reg;
87 static int mtrr_check(unsigned long base, unsigned long size)
89 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
90 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
91 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
98 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
101 if (mtrr_check(base, size))
103 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
106 EXPORT_SYMBOL(mtrr_add);
108 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
112 unsigned long lbase, lsize;
114 struct xen_platform_op op;
116 mutex_lock(&mtrr_mutex);
119 /* Search for existing MTRR */
120 for (i = 0; i < num_var_ranges; ++i) {
121 mtrr_if->get(i, &lbase, &lsize, <ype);
122 if (lbase == base && lsize == size) {
128 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
133 if (mtrr_usage_table[reg] < 1) {
134 pr_warning("mtrr: reg: %d has count=0\n", reg);
137 if (--mtrr_usage_table[reg] < 1) {
138 op.cmd = XENPF_del_memtype;
139 op.u.del_memtype.handle = 0;
140 op.u.del_memtype.reg = reg;
141 error = HYPERVISOR_platform_op(&op);
149 mutex_unlock(&mtrr_mutex);
153 int mtrr_del(int reg, unsigned long base, unsigned long size)
155 if (mtrr_check(base, size))
157 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
159 EXPORT_SYMBOL(mtrr_del);
162 * Returns the effective MTRR type for the region
164 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
165 * - 0xFF - when MTRR is not enabled
167 u8 mtrr_type_lookup(u64 start, u64 end)
170 u64 start_mfn, end_mfn, base_mfn, top_mfn;
171 u8 prev_match, curr_match;
172 struct xen_platform_op op;
174 if (!is_initial_xendomain())
175 return MTRR_TYPE_WRBACK;
180 start_mfn = start >> PAGE_SHIFT;
181 /* Make end inclusive end, instead of exclusive */
182 end_mfn = --end >> PAGE_SHIFT;
184 /* Look in fixed ranges. Just return the type as per start */
185 if (start_mfn < 0x100) {
187 op.cmd = XENPF_read_memtype;
188 op.u.read_memtype.reg = ???;
189 error = HYPERVISOR_platform_op(&op);
191 return op.u.read_memtype.type;
193 return MTRR_TYPE_UNCACHABLE;
197 * Look in variable ranges
198 * Look of multiple ranges matching this address and pick type
199 * as per MTRR precedence
202 for (i = 0; i < num_var_ranges; ++i) {
203 op.cmd = XENPF_read_memtype;
204 op.u.read_memtype.reg = i;
205 error = HYPERVISOR_platform_op(&op);
207 if (error || !op.u.read_memtype.nr_mfns)
210 base_mfn = op.u.read_memtype.mfn;
211 top_mfn = base_mfn + op.u.read_memtype.nr_mfns - 1;
213 if (base_mfn > end_mfn || start_mfn > top_mfn) {
217 if (base_mfn > start_mfn || end_mfn > top_mfn) {
221 curr_match = op.u.read_memtype.type;
222 if (prev_match == 0xFF) {
223 prev_match = curr_match;
227 if (prev_match == MTRR_TYPE_UNCACHABLE ||
228 curr_match == MTRR_TYPE_UNCACHABLE) {
229 return MTRR_TYPE_UNCACHABLE;
232 if ((prev_match == MTRR_TYPE_WRBACK &&
233 curr_match == MTRR_TYPE_WRTHROUGH) ||
234 (prev_match == MTRR_TYPE_WRTHROUGH &&
235 curr_match == MTRR_TYPE_WRBACK)) {
236 prev_match = MTRR_TYPE_WRTHROUGH;
237 curr_match = MTRR_TYPE_WRTHROUGH;
240 if (prev_match != curr_match) {
241 return MTRR_TYPE_UNCACHABLE;
246 if (start >= (1ULL<<32) && (end < tom2))
247 return MTRR_TYPE_WRBACK;
250 if (prev_match != 0xFF)
254 op.cmd = XENPF_read_def_memtype;
255 error = HYPERVISOR_platform_op(&op);
257 return op.u.read_def_memtype.type;
259 return MTRR_TYPE_UNCACHABLE;
263 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
264 * for memory >4GB. Check for that here.
265 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
266 * apply to are wrong, but so far we don't know of any such case in the wild.
268 #define Tom2Enabled (1U << 21)
269 #define Tom2ForceMemTypeWB (1U << 22)
271 int __init amd_special_default_mtrr(void)
275 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
277 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
279 /* In case some hypervisor doesn't pass SYSCFG through */
280 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
283 * Memory between 4GB and top of mem is forced WB by this magic bit.
284 * Reserved before K8RevF, but should be zero there.
286 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
287 (Tom2Enabled | Tom2ForceMemTypeWB))
292 void __init mtrr_bp_init(void)
294 if (amd_special_default_mtrr()) {
296 rdmsrl(MSR_K8_TOP_MEM2, tom2);
297 tom2 &= 0xffffff8000000ULL;
301 void mtrr_ap_init(void)
305 static int __init mtrr_init(void)
307 struct cpuinfo_x86 *c = &boot_cpu_data;
309 if (!is_initial_xendomain())
312 if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
313 (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
314 (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
315 (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
318 set_num_var_ranges();
324 subsys_initcall(mtrr_init);