1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
42 #include <asm/uaccess.h>
43 #include <asm/processor.h>
47 #define MTRR_VERSION "2.0 (20020519)"
49 u32 num_var_ranges = 0;
51 unsigned int *usage_table;
52 static DECLARE_MUTEX(main_lock);
54 u32 size_or_mask, size_and_mask;
56 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
58 struct mtrr_ops * mtrr_if = NULL;
60 __initdata char *mtrr_if_name[] = {
61 "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
64 static void set_mtrr(unsigned int reg, unsigned long base,
65 unsigned long size, mtrr_type type);
67 extern int arr3_protected;
69 void set_mtrr_ops(struct mtrr_ops * ops)
71 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
72 mtrr_ops[ops->vendor] = ops;
75 /* Returns non-zero if we have the write-combining memory type */
76 static int have_wrcomb(void)
80 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
81 /* ServerWorks LE chipsets have problems with write-combining
82 Don't allow it and leave room for other chipsets to be tagged */
83 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
84 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
85 printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cachline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX)
93 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
97 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
100 /* This function returns the number of variable MTRRs */
101 void __init set_num_var_ranges(void)
103 unsigned long config = 0, dummy;
106 rdmsr(MTRRcap_MSR, config, dummy);
107 } else if (is_cpu(AMD))
109 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
111 num_var_ranges = config & 0xff;
114 static char * attrib_to_str(int x)
116 return (x <= 6) ? mtrr_strings[x] : "?";
119 static void init_table(void)
123 max = num_var_ranges;
124 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
126 printk(KERN_ERR "mtrr: could not allocate\n");
129 for (i = 0; i < max; i++)
133 struct set_mtrr_data {
136 unsigned long smp_base;
137 unsigned long smp_size;
138 unsigned int smp_reg;
144 static void ipi_handler(void *info)
145 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
149 struct set_mtrr_data *data = info;
152 local_irq_save(flags);
154 atomic_dec(&data->count);
155 while(!atomic_read(&data->gate)) {
160 /* The master has cleared me to execute */
161 if (data->smp_reg != ~0U)
162 mtrr_if->set(data->smp_reg, data->smp_base,
163 data->smp_size, data->smp_type);
167 atomic_dec(&data->count);
168 while(atomic_read(&data->gate)) {
172 local_irq_restore(flags);
178 * set_mtrr - update mtrrs on all processors
179 * @reg: mtrr in question
184 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
186 * 1. Send IPI to do the following:
187 * 2. Disable Interrupts
188 * 3. Wait for all procs to do so
189 * 4. Enter no-fill cache mode
193 * 8. Disable all range registers
194 * 9. Update the MTRRs
195 * 10. Enable all range registers
196 * 11. Flush all TLBs and caches again
197 * 12. Enter normal cache mode and reenable caching
199 * 14. Wait for buddies to catch up
200 * 15. Enable interrupts.
202 * What does that mean for us? Well, first we set data.count to the number
203 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
204 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
205 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
206 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
207 * differently, so we call mtrr_if->set() callback and let them take care of it.
208 * When they're done, they again decrement data->count and wait for data.gate to
210 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
211 * Everyone then enables interrupts and we all continue on.
213 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
216 static void set_mtrr(unsigned int reg, unsigned long base,
217 unsigned long size, mtrr_type type)
219 struct set_mtrr_data data;
223 data.smp_base = base;
224 data.smp_size = size;
225 data.smp_type = type;
226 atomic_set(&data.count, num_booting_cpus() - 1);
227 atomic_set(&data.gate,0);
229 /* Start the ball rolling on other CPUs */
230 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
231 panic("mtrr: timed out waiting for other CPUs\n");
233 local_irq_save(flags);
235 while(atomic_read(&data.count)) {
239 /* ok, reset count and toggle gate */
240 atomic_set(&data.count, num_booting_cpus() - 1);
241 atomic_set(&data.gate,1);
243 /* do our MTRR business */
246 * We use this same function to initialize the mtrrs on boot.
247 * The state of the boot cpu's mtrrs has been saved, and we want
248 * to replicate across all the APs.
249 * If we're doing that @reg is set to something special...
252 mtrr_if->set(reg,base,size,type);
254 /* wait for the others */
255 while(atomic_read(&data.count)) {
259 local_irq_restore(flags);
260 atomic_set(&data.gate,0);
264 * mtrr_add_page - Add a memory type region
265 * @base: Physical base address of region in pages (4 KB)
266 * @size: Physical size of region in pages (4 KB)
267 * @type: Type of MTRR desired
268 * @increment: If this is true do usage counting on the region
270 * Memory type region registers control the caching on newer Intel and
271 * non Intel processors. This function allows drivers to request an
272 * MTRR is added. The details and hardware specifics of each processor's
273 * implementation are hidden from the caller, but nevertheless the
274 * caller should expect to need to provide a power of two size on an
275 * equivalent power of two boundary.
277 * If the region cannot be added either because all regions are in use
278 * or the CPU cannot support it a negative value is returned. On success
279 * the register number for this entry is returned, but should be treated
282 * On a multiprocessor machine the changes are made to all processors.
283 * This is required on x86 by the Intel processors.
285 * The available types are
287 * %MTRR_TYPE_UNCACHABLE - No caching
289 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
291 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
293 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
295 * BUGS: Needs a quiet flag for the cases where drivers do not mind
296 * failures and do not wish system log messages to be sent.
299 int mtrr_add_page(unsigned long base, unsigned long size,
300 unsigned int type, char increment)
311 if ((error = mtrr_if->validate_add_page(base,size,type)))
314 if (type >= MTRR_NUM_TYPES) {
315 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
319 /* If the type is WC, check that this processor supports it */
320 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
322 "mtrr: your processor doesn't support write-combining\n");
326 if (base & size_or_mask || size & size_or_mask) {
327 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
333 /* Search for existing MTRR */
335 for (i = 0; i < num_var_ranges; ++i) {
336 mtrr_if->get(i, &lbase, &lsize, <ype);
337 if (base >= lbase + lsize)
339 if ((base < lbase) && (base + size <= lbase))
341 /* At this point we know there is some kind of overlap/enclosure */
342 if ((base < lbase) || (base + size > lbase + lsize)) {
344 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
345 " 0x%lx000,0x%x000\n", base, size, lbase,
349 /* New region is enclosed by an existing region */
351 if (type == MTRR_TYPE_UNCACHABLE)
353 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
354 base, size, attrib_to_str(ltype),
355 attrib_to_str(type));
363 /* Search for an empty MTRR */
364 i = mtrr_if->get_free_region(base, size);
366 set_mtrr(i, base, size, type);
369 printk(KERN_INFO "mtrr: no more MTRRs available\n");
377 * mtrr_add - Add a memory type region
378 * @base: Physical base address of region
379 * @size: Physical size of region
380 * @type: Type of MTRR desired
381 * @increment: If this is true do usage counting on the region
383 * Memory type region registers control the caching on newer Intel and
384 * non Intel processors. This function allows drivers to request an
385 * MTRR is added. The details and hardware specifics of each processor's
386 * implementation are hidden from the caller, but nevertheless the
387 * caller should expect to need to provide a power of two size on an
388 * equivalent power of two boundary.
390 * If the region cannot be added either because all regions are in use
391 * or the CPU cannot support it a negative value is returned. On success
392 * the register number for this entry is returned, but should be treated
395 * On a multiprocessor machine the changes are made to all processors.
396 * This is required on x86 by the Intel processors.
398 * The available types are
400 * %MTRR_TYPE_UNCACHABLE - No caching
402 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
404 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
406 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
408 * BUGS: Needs a quiet flag for the cases where drivers do not mind
409 * failures and do not wish system log messages to be sent.
413 mtrr_add(unsigned long base, unsigned long size, unsigned int type,
416 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
417 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
418 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
421 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
426 * mtrr_del_page - delete a memory type region
427 * @reg: Register returned by mtrr_add
428 * @base: Physical base address
429 * @size: Size of region
431 * If register is supplied then base and size are ignored. This is
432 * how drivers should call it.
434 * Releases an MTRR region. If the usage count drops to zero the
435 * register is freed and the region returns to default state.
436 * On success the register is returned, on failure a negative error
440 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
451 max = num_var_ranges;
454 /* Search for existing MTRR */
455 for (i = 0; i < max; ++i) {
456 mtrr_if->get(i, &lbase, &lsize, <ype);
457 if (lbase == base && lsize == size) {
463 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
469 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
472 if (is_cpu(CYRIX) && !use_intel()) {
473 if ((reg == 3) && arr3_protected) {
474 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
478 mtrr_if->get(reg, &lbase, &lsize, <ype);
480 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
483 if (usage_table[reg] < 1) {
484 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
487 if (--usage_table[reg] < 1)
488 set_mtrr(reg, 0, 0, 0);
495 * mtrr_del - delete a memory type region
496 * @reg: Register returned by mtrr_add
497 * @base: Physical base address
498 * @size: Size of region
500 * If register is supplied then base and size are ignored. This is
501 * how drivers should call it.
503 * Releases an MTRR region. If the usage count drops to zero the
504 * register is freed and the region returns to default state.
505 * On success the register is returned, on failure a negative error
510 mtrr_del(int reg, unsigned long base, unsigned long size)
512 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
513 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
514 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
517 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
520 EXPORT_SYMBOL(mtrr_add);
521 EXPORT_SYMBOL(mtrr_del);
524 * These should be called implicitly, but we can't yet until all the initcall
527 extern void amd_init_mtrr(void);
528 extern void cyrix_init_mtrr(void);
529 extern void centaur_init_mtrr(void);
531 static void __init init_ifs(void)
538 static void init_other_cpus(void)
543 /* bring up the other processors */
547 finalize_mtrr_state();
559 static struct mtrr_value * mtrr_state;
561 static int mtrr_save(struct sys_device * sysdev, u32 state)
564 int size = num_var_ranges * sizeof(struct mtrr_value);
566 mtrr_state = kmalloc(size,GFP_KERNEL);
568 memset(mtrr_state,0,size);
572 for (i = 0; i < num_var_ranges; i++) {
574 &mtrr_state[i].lbase,
575 &mtrr_state[i].lsize,
576 &mtrr_state[i].ltype);
581 static int mtrr_restore(struct sys_device * sysdev)
585 for (i = 0; i < num_var_ranges; i++) {
586 if (mtrr_state[i].lsize)
590 mtrr_state[i].ltype);
598 static struct sysdev_driver mtrr_sysdev_driver = {
600 .restore = mtrr_restore,
605 * mtrr_init - initialie mtrrs on the boot CPU
607 * This needs to be called early; before any of the other CPUs are
608 * initialized (i.e. before smp_init()).
611 static int __init mtrr_init(void)
615 if ( cpu_has_mtrr ) {
616 mtrr_if = &generic_mtrr_ops;
617 size_or_mask = 0xff000000; /* 36 bits */
618 size_and_mask = 0x00f00000;
620 switch (boot_cpu_data.x86_vendor) {
622 /* The original Athlon docs said that
623 total addressable memory is 44 bits wide.
624 It was not really clear whether its MTRRs
625 follow this or not. (Read: 44 or 36 bits).
626 However, "x86-64_overview.pdf" explicitly
627 states that "previous implementations support
628 36 bit MTRRs" and also provides a way to
629 query the width (in bits) of the physical
630 addressable memory on the Hammer family.
632 if (boot_cpu_data.x86 == 15
633 && (cpuid_eax(0x80000000) >= 0x80000008)) {
635 phys_addr = cpuid_eax(0x80000008) & 0xff;
637 ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
638 size_and_mask = ~size_or_mask & 0xfff00000;
640 /* Athlon MTRRs use an Intel-compatible interface for
641 * getting and setting */
643 case X86_VENDOR_CENTAUR:
644 if (boot_cpu_data.x86 == 6) {
645 /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
646 size_or_mask = 0xfff00000; /* 32 bits */
655 switch (boot_cpu_data.x86_vendor) {
657 if ( cpu_has_k6_mtrr ) {
658 /* Pre-Athlon (K6) AMD CPU MTRRs */
659 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
660 size_or_mask = 0xfff00000; /* 32 bits */
664 case X86_VENDOR_CENTAUR:
665 if ( cpu_has_centaur_mcr ) {
666 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
667 size_or_mask = 0xfff00000; /* 32 bits */
671 case X86_VENDOR_CYRIX:
672 if ( cpu_has_cyrix_arr ) {
673 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
674 size_or_mask = 0xfff00000; /* 32 bits */
682 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
685 set_num_var_ranges();
689 return sysdev_driver_register(&cpu_sysdev_class,
690 &mtrr_sysdev_driver);
695 char *mtrr_strings[MTRR_NUM_TYPES] =
697 "uncachable", /* 0 */
698 "write-combining", /* 1 */
701 "write-through", /* 4 */
702 "write-protect", /* 5 */
703 "write-back", /* 6 */
706 subsys_initcall(mtrr_init);