linux/arch/x86/kernel/cpu/intel.c

384 lines
9 KiB
C
Raw Normal View History

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/ds.h>
#include <asm/bugs.h>
#include "cpu.h"
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <mach_apic.h>
#endif
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies.
*/
struct movsl_mask movsl_mask __read_mostly;
#endif
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15 && c->x86_cache_alignment == 64)
c->x86_cache_alignment = 128;
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
}
/*
* Early probe support logic for ppro memory erratum #50
*
* This is called before we do cpu ident work
*/
int __cpuinit ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask < 8) {
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
return 0;
}
/*
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
if ((lo & (1<<9)) == 0) {
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
lo |= (1<<9); /* Disable hw prefetching */
wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
}
}
}
/*
* find out the number of processor cores on the die
*/
static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 4)
return 1;
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
if (eax & 0x1f)
return ((eax >> 26) + 1);
else
return 1;
}
#ifdef CONFIG_X86_F00F_BUG
static void __cpuinit trap_init_f00f_bug(void)
{
__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
/*
* Update the IDT descriptor and reload the IDT so that
* it uses the read-only mapped virtual address.
*/
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
load_idt(&idt_descr);
}
#endif
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
char *p = NULL;
early_init_intel(c);
#ifdef CONFIG_X86_F00F_BUG
/*
* All current models of Pentium and Pentium with MMX technology CPUs
* have the F0 0F bug, which lets nonprivileged users lock up the system.
* Note that the workaround only should be initialized once...
*/
c->f00f_bug = 0;
if (!paravirt_enabled() && c->x86 == 5) {
static int f00f_workaround_enabled;
c->f00f_bug = 1;
if (!f00f_workaround_enabled) {
trap_init_f00f_bug();
printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
f00f_workaround_enabled = 1;
}
}
#endif
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
clear_cpu_cap(c, X86_FEATURE_SEP);
/*
* Names for the Pentium II/Celeron processors
* detectable only by also checking the cache size.
* Dixon is NOT a Celeron.
*/
if (c->x86 == 6) {
switch (c->x86_model) {
case 5:
if (c->x86_mask == 0) {
if (l2 == 0)
p = "Celeron (Covington)";
else if (l2 == 256)
p = "Mobile Pentium II (Dixon)";
}
break;
case 6:
if (l2 == 128)
p = "Celeron (Mendocino)";
else if (c->x86_mask == 0 || c->x86_mask == 5)
p = "Celeron-A";
break;
case 8:
if (l2 == 128)
p = "Celeron (Coppermine)";
break;
}
}
if (p)
strcpy(c->x86_model_id, p);
c->x86_max_cores = num_cpu_cores(c);
detect_ht(c);
/* Work around errata */
Intel_errata_workarounds(c);
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Set up the preferred alignment for movsl bulk memory moves
*/
switch (c->x86) {
case 4: /* 486: untested */
break;
case 5: /* Old Pentia: untested */
break;
case 6: /* PII/PIII only like movsl with 8-byte alignment */
movsl_mask.mask = 7;
break;
case 15: /* P4 is OK down to 8-byte alignment */
movsl_mask.mask = 7;
break;
}
#endif
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (c->x86 == 15) {
set_cpu_cap(c, X86_FEATURE_P4);
}
if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_P3);
if (cpu_has_ds) {
unsigned int l1;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11)))
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
ds_init_intel(c);
}
if (cpu_has_bts)
ptrace_bts_init_intel(c);
x86: APIC: remove apic_write_around(); use alternatives Use alternatives to select the workaround for the 11AP Pentium erratum for the affected steppings on the fly rather than build time. Remove the X86_GOOD_APIC configuration option and replace all the calls to apic_write_around() with plain apic_write(), protecting accesses to the ESR as appropriate due to the 3AP Pentium erratum. Remove apic_read_around() and all its invocations altogether as not needed. Remove apic_write_atomic() and all its implementing backends. The use of ASM_OUTPUT2() is not strictly needed for input constraints, but I have used it for readability's sake. I had the feeling no one else was brave enough to do it, so I went ahead and here it is. Verified by checking the generated assembly and tested with both a 32-bit and a 64-bit configuration, also with the 11AP "feature" forced on and verified with gdb on /proc/kcore to work as expected (as an 11AP machines are quite hard to get hands on these days). Some script complained about the use of "volatile", but apic_write() needs it for the same reason and is effectively a replacement for writel(), so I have disregarded it. I am not sure what the policy wrt defconfig files is, they are generated and there is risk of a conflict resulting from an unrelated change, so I have left changes to them out. The option will get removed from them at the next run. Some testing with machines other than mine will be needed to avoid some stupid mistake, but despite its volume, the change is not really that intrusive, so I am fairly confident that because it works for me, it will everywhere. Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-16 18:15:30 +00:00
/*
* See if we have a good local APIC by checking for buggy Pentia,
* i.e. all B steppings and the C2 stepping of P54C when using their
* integrated APIC (see 11AP erratum in "Pentium Processor
* Specification Update").
*/
if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
(c->x86_mask < 0x6 || c->x86_mask == 0xb))
set_cpu_cap(c, X86_FEATURE_11AP);
#ifdef CONFIG_X86_NUMAQ
numaq_tsc_disable();
#endif
}
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/*
* Intel PIII Tualatin. This comes in two flavours.
* One has 256kb of cache, the other 512. We have no way
* to determine which, so we use a boottime override
* for the 512kb model, and assume 256 otherwise.
*/
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
size = 256;
return size;
}
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_models = {
{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
{
[0] = "486 DX-25/33",
[1] = "486 DX-50",
[2] = "486 SX",
[3] = "486 DX/2",
[4] = "486 SL",
[5] = "486 SX/2",
[7] = "486 DX/2-WB",
[8] = "486 DX/4",
[9] = "486 DX/4-WB"
}
},
{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
{
[0] = "Pentium 60/66 A-step",
[1] = "Pentium 60/66",
[2] = "Pentium 75 - 200",
[3] = "OverDrive PODP5V83",
[4] = "Pentium MMX",
[7] = "Mobile Pentium 75 - 200",
[8] = "Mobile Pentium MMX"
}
},
{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
{
[0] = "Pentium Pro A-step",
[1] = "Pentium Pro",
[3] = "Pentium II (Klamath)",
[4] = "Pentium II (Deschutes)",
[5] = "Pentium II (Deschutes)",
[6] = "Mobile Pentium II",
[7] = "Pentium III (Katmai)",
[8] = "Pentium III (Coppermine)",
[10] = "Pentium III (Cascades)",
[11] = "Pentium III (Tualatin)",
}
},
{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
{
[0] = "Pentium 4 (Unknown)",
[1] = "Pentium 4 (Willamette)",
[2] = "Pentium 4 (Northwood)",
[4] = "Pentium 4 (Foster)",
[5] = "Pentium 4 (Foster)",
}
},
},
.c_early_init = early_init_intel,
.c_init = init_intel,
.c_size_cache = intel_size_cache,
};
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
#ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
{
u8 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u8);
unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
{
u16 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u16);
unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
{
u32 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u32);
#endif
x86: fall back on interrupt disable in cmpxchg8b on 80386 and 80486 Actually, on 386, cmpxchg and cmpxchg_local fall back on cmpxchg_386_u8/16/32: it disables interruptions around non atomic updates to mimic the cmpxchg behavior. The comment: /* Poor man's cmpxchg for 386. Unsuitable for SMP */ already present in cmpxchg_386_u32 tells much about how this cmpxchg implementation should not be used in a SMP context. However, the cmpxchg_local can perfectly use this fallback, since it only needs to be atomic wrt the local cpu. This patch adds a cmpxchg_486_u64 and uses it as a fallback for cmpxchg64 and cmpxchg64_local on 80386 and 80486. Q: but why is it called cmpxchg_486 when the other functions are called A: Because the standard cmpxchg is missing only on 386, but cmpxchg8b is missing both on 386 and 486. Citing Intel's Instruction set reference: cmpxchg: This instruction is not supported on Intel processors earlier than the Intel486 processors. cmpxchg8b: This instruction encoding is not supported on Intel processors earlier than the Pentium processors. Q: What's the reason to have cmpxchg64_local on 32 bit architectures? Without that need all this would just be a few simple defines. A: cmpxchg64_local on 32 bits architectures takes unsigned long long parameters, but cmpxchg_local only takes longs. Since we have cmpxchg8b to execute a 8 byte cmpxchg atomically on pentium and +, it makes sense to provide a flavor of cmpxchg and cmpxchg_local using this instruction. Also, for 32 bits architectures lacking the 64 bits atomic cmpxchg, it makes sense _not_ to define cmpxchg64 while cmpxchg could still be available. Moreover, the fallback for cmpxchg8b on i386 for 386 and 486 is a However, cmpxchg64_local will be emulated by disabling interrupts on all architectures where it is not supported atomically. Therefore, we *could* turn cmpxchg64_local into a cmpxchg_local, but it would make the 386/486 fallbacks ugly, make its design different from cmpxchg/cmpxchg64 (which really depends on atomic operations and cannot be emulated) and require the __cmpxchg_local to be expressed as a macro rather than an inline function so the parameters would not be fixed to unsigned long long in every case. So I think cmpxchg64_local makes sense there, but I am open to suggestions. Q: Are there any callers? A: I am actually using it in LTTng in my timestamping code. I use it to work around CPUs with asynchronous TSCs. I need to update 64 bits values atomically on this 32 bits architecture. Changelog: - Ran though checkpatch. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:30:47 +00:00
#ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
{
u64 prev;
unsigned long flags;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_486_u64);
#endif
/* arch_initcall(intel_cpu_init); */