lkml.org 
[lkml]   [2008]   [Sep]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 1/9] x86: cpu make amd.c more like amd_64.c v2
    Date
    1. make 32bit have early_init_amd_mc and amd_detect_cmp
    2. seperate init_amd_k5/k6/k7 ...

    v2: fix compiling for !CONFIG_SMP

    Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
    ---
    arch/x86/kernel/cpu/amd.c | 396 ++++++++++++++++++++++++------------------
    arch/x86/kernel/cpu/amd_64.c | 17 +-
    arch/x86/kernel/cpu/common.c | 2 +-
    include/asm-x86/processor.h | 2 +-
    4 files changed, 236 insertions(+), 181 deletions(-)

    diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
    index c3175da..a3a9e3c 100644
    --- a/arch/x86/kernel/cpu/amd.c
    +++ b/arch/x86/kernel/cpu/amd.c
    @@ -24,8 +24,200 @@
    extern void vide(void);
    __asm__(".align 4\nvide: ret");

    +static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
    +{
    +/*
    + * General Systems BIOSen alias the cpu frequency registers
    + * of the Elan at 0x000df000. Unfortuantly, one of the Linux
    + * drivers subsequently pokes it, and changes the CPU speed.
    + * Workaround : Remove the unneeded alias.
    + */
    +#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
    +#define CBAR_ENB (0x80000000)
    +#define CBAR_KEY (0X000000CB)
    + if (c->x86_model == 9 || c->x86_model == 10) {
    + if (inl (CBAR) & CBAR_ENB)
    + outl (0 | CBAR_KEY, CBAR);
    + }
    +}
    +
    +
    +static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
    +{
    + u32 l, h;
    + int mbytes = num_physpages >> (20-PAGE_SHIFT);
    +
    + if (c->x86_model < 6) {
    + /* Based on AMD doc 20734R - June 2000 */
    + if (c->x86_model == 0) {
    + clear_cpu_cap(c, X86_FEATURE_APIC);
    + set_cpu_cap(c, X86_FEATURE_PGE);
    + }
    + return;
    + }
    +
    + if (c->x86_model == 6 && c->x86_mask == 1) {
    + const int K6_BUG_LOOP = 1000000;
    + int n;
    + void (*f_vide)(void);
    + unsigned long d, d2;
    +
    + printk(KERN_INFO "AMD K6 stepping B detected - ");
    +
    + /*
    + * It looks like AMD fixed the 2.6.2 bug and improved indirect
    + * calls at the same time.
    + */
    +
    + n = K6_BUG_LOOP;
    + f_vide = vide;
    + rdtscl(d);
    + while (n--)
    + f_vide();
    + rdtscl(d2);
    + d = d2-d;
    +
    + if (d > 20*K6_BUG_LOOP)
    + printk("system stability may be impaired when more than 32 MB are used.\n");
    + else
    + printk("probably OK (after B9730xxxx).\n");
    + printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
    + }
    +
    + /* K6 with old style WHCR */
    + if (c->x86_model < 8 ||
    + (c->x86_model == 8 && c->x86_mask < 8)) {
    + /* We can only write allocate on the low 508Mb */
    + if (mbytes > 508)
    + mbytes = 508;
    +
    + rdmsr(MSR_K6_WHCR, l, h);
    + if ((l&0x0000FFFF) == 0) {
    + unsigned long flags;
    + l = (1<<0)|((mbytes/4)<<1);
    + local_irq_save(flags);
    + wbinvd();
    + wrmsr(MSR_K6_WHCR, l, h);
    + local_irq_restore(flags);
    + printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
    + mbytes);
    + }
    + return;
    + }
    +
    + if ((c->x86_model == 8 && c->x86_mask > 7) ||
    + c->x86_model == 9 || c->x86_model == 13) {
    + /* The more serious chips .. */
    +
    + if (mbytes > 4092)
    + mbytes = 4092;
    +
    + rdmsr(MSR_K6_WHCR, l, h);
    + if ((l&0xFFFF0000) == 0) {
    + unsigned long flags;
    + l = ((mbytes>>2)<<22)|(1<<16);
    + local_irq_save(flags);
    + wbinvd();
    + wrmsr(MSR_K6_WHCR, l, h);
    + local_irq_restore(flags);
    + printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
    + mbytes);
    + }
    +
    + return;
    + }
    +
    + if (c->x86_model == 10) {
    + /* AMD Geode LX is model 10 */
    + /* placeholder for any needed mods */
    + return;
    + }
    +}
    +
    +static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
    +{
    + u32 l, h;
    +
    + /*
    + * Bit 15 of Athlon specific MSR 15, needs to be 0
    + * to enable SSE on Palomino/Morgan/Barton CPU's.
    + * If the BIOS didn't enable it already, enable it here.
    + */
    + if (c->x86_model >= 6 && c->x86_model <= 10) {
    + if (!cpu_has(c, X86_FEATURE_XMM)) {
    + printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
    + rdmsr(MSR_K7_HWCR, l, h);
    + l &= ~0x00008000;
    + wrmsr(MSR_K7_HWCR, l, h);
    + set_cpu_cap(c, X86_FEATURE_XMM);
    + }
    + }
    +
    + /*
    + * It's been determined by AMD that Athlons since model 8 stepping 1
    + * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
    + * As per AMD technical note 27212 0.2
    + */
    + if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
    + rdmsr(MSR_K7_CLK_CTL, l, h);
    + if ((l & 0xfff00000) != 0x20000000) {
    + printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
    + ((l & 0x000fffff)|0x20000000));
    + wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
    + }
    + }
    +
    + set_cpu_cap(c, X86_FEATURE_K7);
    +}
    +
    +/*
    + * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
    + * Assumes number of cores is a power of two.
    + */
    +static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
    +{
    +#ifdef CONFIG_X86_HT
    + unsigned bits;
    +
    + bits = c->x86_coreid_bits;
    +
    + /* Low order bits define the core id (index of core in socket) */
    + c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
    + /* Convert the initial APIC ID into the socket ID */
    + c->phys_proc_id = c->initial_apicid >> bits;
    +#endif
    +}
    +
    +static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
    +{
    +#ifdef CONFIG_X86_HT
    + unsigned bits, ecx;
    +
    + /* Multi core CPU? */
    + if (c->extended_cpuid_level < 0x80000008)
    + return;
    +
    + ecx = cpuid_ecx(0x80000008);
    +
    + c->x86_max_cores = (ecx & 0xff) + 1;
    +
    + /* CPU telling us the core id bits shift? */
    + bits = (ecx >> 12) & 0xF;
    +
    + /* Otherwise recompute */
    + if (bits == 0) {
    + while ((1 << bits) < c->x86_max_cores)
    + bits++;
    + }
    +
    + c->x86_coreid_bits = bits;
    +#endif
    +}
    +
    static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
    {
    + early_init_amd_mc(c);
    +
    if (c->x86_power & (1<<8))
    set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);

    @@ -37,9 +229,6 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)

    static void __cpuinit init_amd(struct cpuinfo_x86 *c)
    {
    - u32 l, h;
    - int mbytes = num_physpages >> (20-PAGE_SHIFT);
    -
    #ifdef CONFIG_SMP
    unsigned long long value;

    @@ -50,7 +239,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
    * Errata 63 for SH-B3 steppings
    * Errata 122 for all steppings (F+ have it disabled by default)
    */
    - if (c->x86 == 15) {
    + if (c->x86 == 0xf) {
    rdmsrl(MSR_K7_HWCR, value);
    value |= 1 << 6;
    wrmsrl(MSR_K7_HWCR, value);
    @@ -73,192 +262,55 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)

    switch (c->x86) {
    case 4:
    - /*
    - * General Systems BIOSen alias the cpu frequency registers
    - * of the Elan at 0x000df000. Unfortuantly, one of the Linux
    - * drivers subsequently pokes it, and changes the CPU speed.
    - * Workaround : Remove the unneeded alias.
    - */
    -#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
    -#define CBAR_ENB (0x80000000)
    -#define CBAR_KEY (0X000000CB)
    - if (c->x86_model == 9 || c->x86_model == 10) {
    - if (inl (CBAR) & CBAR_ENB)
    - outl (0 | CBAR_KEY, CBAR);
    - }
    - break;
    + init_amd_k5(c);
    + break;
    case 5:
    - if (c->x86_model < 6) {
    - /* Based on AMD doc 20734R - June 2000 */
    - if (c->x86_model == 0) {
    - clear_cpu_cap(c, X86_FEATURE_APIC);
    - set_cpu_cap(c, X86_FEATURE_PGE);
    - }
    - break;
    - }
    -
    - if (c->x86_model == 6 && c->x86_mask == 1) {
    - const int K6_BUG_LOOP = 1000000;
    - int n;
    - void (*f_vide)(void);
    - unsigned long d, d2;
    -
    - printk(KERN_INFO "AMD K6 stepping B detected - ");
    -
    - /*
    - * It looks like AMD fixed the 2.6.2 bug and improved indirect
    - * calls at the same time.
    - */
    -
    - n = K6_BUG_LOOP;
    - f_vide = vide;
    - rdtscl(d);
    - while (n--)
    - f_vide();
    - rdtscl(d2);
    - d = d2-d;
    -
    - if (d > 20*K6_BUG_LOOP)
    - printk("system stability may be impaired when more than 32 MB are used.\n");
    - else
    - printk("probably OK (after B9730xxxx).\n");
    - printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
    - }
    -
    - /* K6 with old style WHCR */
    - if (c->x86_model < 8 ||
    - (c->x86_model == 8 && c->x86_mask < 8)) {
    - /* We can only write allocate on the low 508Mb */
    - if (mbytes > 508)
    - mbytes = 508;
    -
    - rdmsr(MSR_K6_WHCR, l, h);
    - if ((l&0x0000FFFF) == 0) {
    - unsigned long flags;
    - l = (1<<0)|((mbytes/4)<<1);
    - local_irq_save(flags);
    - wbinvd();
    - wrmsr(MSR_K6_WHCR, l, h);
    - local_irq_restore(flags);
    - printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
    - mbytes);
    - }
    - break;
    - }
    -
    - if ((c->x86_model == 8 && c->x86_mask > 7) ||
    - c->x86_model == 9 || c->x86_model == 13) {
    - /* The more serious chips .. */
    -
    - if (mbytes > 4092)
    - mbytes = 4092;
    -
    - rdmsr(MSR_K6_WHCR, l, h);
    - if ((l&0xFFFF0000) == 0) {
    - unsigned long flags;
    - l = ((mbytes>>2)<<22)|(1<<16);
    - local_irq_save(flags);
    - wbinvd();
    - wrmsr(MSR_K6_WHCR, l, h);
    - local_irq_restore(flags);
    - printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
    - mbytes);
    - }
    -
    - break;
    - }
    -
    - if (c->x86_model == 10) {
    - /* AMD Geode LX is model 10 */
    - /* placeholder for any needed mods */
    - break;
    - }
    - break;
    - case 6: /* An Athlon/Duron */
    -
    - /*
    - * Bit 15 of Athlon specific MSR 15, needs to be 0
    - * to enable SSE on Palomino/Morgan/Barton CPU's.
    - * If the BIOS didn't enable it already, enable it here.
    - */
    - if (c->x86_model >= 6 && c->x86_model <= 10) {
    - if (!cpu_has(c, X86_FEATURE_XMM)) {
    - printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
    - rdmsr(MSR_K7_HWCR, l, h);
    - l &= ~0x00008000;
    - wrmsr(MSR_K7_HWCR, l, h);
    - set_cpu_cap(c, X86_FEATURE_XMM);
    - }
    - }
    -
    - /*
    - * It's been determined by AMD that Athlons since model 8 stepping 1
    - * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
    - * As per AMD technical note 27212 0.2
    - */
    - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
    - rdmsr(MSR_K7_CLK_CTL, l, h);
    - if ((l & 0xfff00000) != 0x20000000) {
    - printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
    - ((l & 0x000fffff)|0x20000000));
    - wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
    - }
    - }
    - break;
    - }
    -
    - switch (c->x86) {
    - case 15:
    - /* Use K8 tuning for Fam10h and Fam11h */
    - case 0x10:
    - case 0x11:
    - set_cpu_cap(c, X86_FEATURE_K8);
    + init_amd_k6(c);
    break;
    - case 6:
    - set_cpu_cap(c, X86_FEATURE_K7);
    + case 6: /* An Athlon/Duron */
    + init_amd_k7(c);
    break;
    }
    +
    + /* K6s reports MCEs but don't actually have all the MSRs */
    + if (c->x86 < 6)
    + clear_cpu_cap(c, X86_FEATURE_MCE);
    +
    if (c->x86 >= 6)
    set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);

    - display_cacheinfo(c);
    + if (!c->x86_model_id[0]) {
    + switch (c->x86) {
    + case 0xf:
    + /* Should distinguish Models here, but this is only
    + a fallback anyways. */
    + strcpy(c->x86_model_id, "Hammer");
    + break;
    + }
    + }

    - if (cpuid_eax(0x80000000) >= 0x80000008)
    - c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
    + display_cacheinfo(c);

    -#ifdef CONFIG_X86_HT
    - /*
    - * On a AMD multi core setup the lower bits of the APIC id
    - * distinguish the cores.
    - */
    - if (c->x86_max_cores > 1) {
    - int cpu = smp_processor_id();
    - unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
    + /* Multi core CPU? */
    + if (c->extended_cpuid_level >= 0x80000008)
    + amd_detect_cmp(c);

    - if (bits == 0) {
    - while ((1 << bits) < c->x86_max_cores)
    - bits++;
    - }
    - c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
    - c->phys_proc_id >>= bits;
    - printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
    - cpu, c->x86_max_cores, c->cpu_core_id);
    - }
    -#endif
    + detect_ht(c);

    - if (cpuid_eax(0x80000000) >= 0x80000006) {
    - if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
    + if (c->extended_cpuid_level >= 0x80000006) {
    + if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
    num_cache_leaves = 4;
    else
    num_cache_leaves = 3;
    }

    - /* K6s reports MCEs but don't actually have all the MSRs */
    - if (c->x86 < 6)
    - clear_cpu_cap(c, X86_FEATURE_MCE);
    + if (c->x86 >= 0xf && c->x86 <= 0x11)
    + set_cpu_cap(c, X86_FEATURE_K8);

    - if (cpu_has_xmm2)
    + if (cpu_has_xmm2) {
    + /* MFENCE stops RDTSC speculation */
    set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
    + }
    }

    static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
    diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c
    index 8c2d07f..7913e48 100644
    --- a/arch/x86/kernel/cpu/amd_64.c
    +++ b/arch/x86/kernel/cpu/amd_64.c
    @@ -174,17 +174,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
    if (c->extended_cpuid_level >= 0x80000008)
    amd_detect_cmp(c);

    - if (c->extended_cpuid_level >= 0x80000006 &&
    - (cpuid_edx(0x80000006) & 0xf000))
    - num_cache_leaves = 4;
    - else
    - num_cache_leaves = 3;
    + if (c->extended_cpuid_level >= 0x80000006) {
    + if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
    + num_cache_leaves = 4;
    + else
    + num_cache_leaves = 3;
    + }

    if (c->x86 >= 0xf && c->x86 <= 0x11)
    set_cpu_cap(c, X86_FEATURE_K8);

    - /* MFENCE stops RDTSC speculation */
    - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
    + if (cpu_has_xmm2) {
    + /* MFENCE stops RDTSC speculation */
    + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
    + }

    if (c->x86 == 0x10) {
    /* do this for boot cpu */
    diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
    index 5cde4b1..9a57106 100644
    --- a/arch/x86/kernel/cpu/common.c
    +++ b/arch/x86/kernel/cpu/common.c
    @@ -629,8 +629,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
    c->x86_vendor_id[0] = '\0'; /* Unset */
    c->x86_model_id[0] = '\0'; /* Unset */
    c->x86_max_cores = 1;
    -#ifdef CONFIG_X86_64
    c->x86_coreid_bits = 0;
    +#ifdef CONFIG_X86_64
    c->x86_clflush_size = 64;
    #else
    c->cpuid_level = -1; /* CPUID not detected */
    diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
    index 1197cc6..ee7cbb3 100644
    --- a/include/asm-x86/processor.h
    +++ b/include/asm-x86/processor.h
    @@ -76,9 +76,9 @@ struct cpuinfo_x86 {
    int x86_tlbsize;
    __u8 x86_virt_bits;
    __u8 x86_phys_bits;
    +#endif
    /* CPUID returned core id bits: */
    __u8 x86_coreid_bits;
    -#endif
    /* Max extended CPUID function supported: */
    __u32 extended_cpuid_level;
    /* Maximum supported CPUID level, -1=no CPUID: */
    --
    1.5.4.5


    \
     
     \ /
      Last update: 2008-09-08 03:01    [W:3.950 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site