lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 2/4] x86/mce/amd: Introduce deferred error interrupt handler
    Date
    Changes introduced in the patch-
    - Assign vector number 0xf4 for Deferred errors
    - Declare deferred_interrupt, allocate gate and bind it
    to DEFERRED_APIC_VECTOR.
    - Declare smp_deferred_interrupt to be used as the
    entry point for the interrupt in mce_amd.c
    - Define trace_deferred_interrupt for tracing
    - Enable deferred error interrupt selectively upon detection
    of 'succor' bitfield
    - Setup amd_deferred_error_interrupt() to handle the interrupt
    and assign it to def_int_vector if feature is present in HW.
    Else, let default handler deal with it.
    - Provide Deferred error interrupt stats on
    /proc/interrupts by incrementing irq_deferred_count

    Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
    ---
    arch/x86/include/asm/entry_arch.h | 3 +
    arch/x86/include/asm/hardirq.h | 3 +
    arch/x86/include/asm/hw_irq.h | 2 +
    arch/x86/include/asm/irq_vectors.h | 1 +
    arch/x86/include/asm/mce.h | 3 +
    arch/x86/include/asm/trace/irq_vectors.h | 6 ++
    arch/x86/include/asm/traps.h | 3 +-
    arch/x86/kernel/cpu/mcheck/mce_amd.c | 101 +++++++++++++++++++++++++++++++
    arch/x86/kernel/entry_64.S | 5 ++
    arch/x86/kernel/irq.c | 6 ++
    arch/x86/kernel/irqinit.c | 4 ++
    arch/x86/kernel/traps.c | 4 ++
    12 files changed, 140 insertions(+), 1 deletion(-)

    diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
    index dc5fa66..f7b957b 100644
    --- a/arch/x86/include/asm/entry_arch.h
    +++ b/arch/x86/include/asm/entry_arch.h
    @@ -50,4 +50,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
    BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
    #endif

    +#ifdef CONFIG_X86_MCE_AMD
    +BUILD_INTERRUPT(deferred_interrupt, DEFERRED_APIC_VECTOR)
    +#endif
    #endif
    diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
    index 0f5fb6b..448451c 100644
    --- a/arch/x86/include/asm/hardirq.h
    +++ b/arch/x86/include/asm/hardirq.h
    @@ -33,6 +33,9 @@ typedef struct {
    #ifdef CONFIG_X86_MCE_THRESHOLD
    unsigned int irq_threshold_count;
    #endif
    +#ifdef CONFIG_X86_MCE_AMD
    + unsigned int irq_deferred_count;
    +#endif
    #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
    unsigned int irq_hv_callback_count;
    #endif
    diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
    index e9571dd..7cf2670 100644
    --- a/arch/x86/include/asm/hw_irq.h
    +++ b/arch/x86/include/asm/hw_irq.h
    @@ -73,6 +73,7 @@ extern asmlinkage void invalidate_interrupt31(void);
    extern asmlinkage void irq_move_cleanup_interrupt(void);
    extern asmlinkage void reboot_interrupt(void);
    extern asmlinkage void threshold_interrupt(void);
    +extern asmlinkage void deferred_interrupt(void);

    extern asmlinkage void call_function_interrupt(void);
    extern asmlinkage void call_function_single_interrupt(void);
    @@ -87,6 +88,7 @@ extern void trace_spurious_interrupt(void);
    extern void trace_thermal_interrupt(void);
    extern void trace_reschedule_interrupt(void);
    extern void trace_threshold_interrupt(void);
    +extern void trace_deferred_interrupt(void);
    extern void trace_call_function_interrupt(void);
    extern void trace_call_function_single_interrupt(void);
    #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
    diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
    index 666c89e..cee723f 100644
    --- a/arch/x86/include/asm/irq_vectors.h
    +++ b/arch/x86/include/asm/irq_vectors.h
    @@ -113,6 +113,7 @@
    #define IRQ_WORK_VECTOR 0xf6

    #define UV_BAU_MESSAGE 0xf5
    +#define DEFERRED_APIC_VECTOR 0xf4

    /* Vector on which hypervisor callbacks will be delivered */
    #define HYPERVISOR_CALLBACK_VECTOR 0xf3
    diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
    index dfcb664..b21b887 100644
    --- a/arch/x86/include/asm/mce.h
    +++ b/arch/x86/include/asm/mce.h
    @@ -224,6 +224,9 @@ void do_machine_check(struct pt_regs *, long);
    extern void (*mce_threshold_vector)(void);
    extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);

    +/* Deferred error interrupt handler */
    +extern void (*deferred_int_vector)(void);
    +
    /*
    * Thermal handler
    */
    diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
    index 4cab890..3c1f0a7 100644
    --- a/arch/x86/include/asm/trace/irq_vectors.h
    +++ b/arch/x86/include/asm/trace/irq_vectors.h
    @@ -101,6 +101,12 @@ DEFINE_IRQ_VECTOR_EVENT(call_function_single);
    DEFINE_IRQ_VECTOR_EVENT(threshold_apic);

    /*
    + * deferred_apic - called when entering/exiting a deferred apic interrupt
    + * vector handler
    + */
    +DEFINE_IRQ_VECTOR_EVENT(deferred_apic);
    +
    +/*
    * thermal_apic - called when entering/exiting a thermal apic interrupt
    * vector handler
    */
    diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
    index 4e49d7d..ef937b7 100644
    --- a/arch/x86/include/asm/traps.h
    +++ b/arch/x86/include/asm/traps.h
    @@ -108,7 +108,8 @@ extern int panic_on_unrecovered_nmi;
    void math_emulate(struct math_emu_info *);
    #ifndef CONFIG_X86_32
    asmlinkage void smp_thermal_interrupt(void);
    -asmlinkage void mce_threshold_interrupt(void);
    +asmlinkage void smp_threshold_interrupt(void);
    +asmlinkage void smp_deferred_interrupt(void);
    #endif

    extern enum ctx_state ist_enter(struct pt_regs *regs);
    diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
    index 55ad9b3..ce82f0b 100644
    --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
    +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
    @@ -12,6 +12,8 @@
    * - added support for AMD Family 0x10 processors
    * May 2012
    * - major scrubbing
    + * April 2015
    + * - add support for deferred error interrupts
    *
    * All MC4_MISCi registers are shared between multi-cores
    */
    @@ -32,6 +34,7 @@
    #include <asm/idle.h>
    #include <asm/mce.h>
    #include <asm/msr.h>
    +#include <asm/trace/irq_vectors.h>

    #define NR_BLOCKS 9
    #define THRESHOLD_MAX 0xFFF
    @@ -47,6 +50,13 @@
    #define MASK_BLKPTR_LO 0xFF000000
    #define MCG_XBLK_ADDR 0xC0000400

    +/* Deferred error settings */
    +#define MSR_CU_DEF_ERR 0xC0000410
    +#define MASK_DEF_LVTOFF 0x000000F0
    +#define MASK_DEF_INT_TYPE 0x00000006
    +#define DEF_LVT_OFF 0x2
    +#define DEF_INT_TYPE_APIC 0x2
    +
    static const char * const th_names[] = {
    "load_store",
    "insn_fetch",
    @@ -60,6 +70,15 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
    static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */

    static void amd_threshold_interrupt(void);
    +static void amd_deferred_error_interrupt(void);
    +
    +/* Setup default deferred error interrupt handler */
    +static void default_deferred_interrupt(void)
    +{
    + pr_err("Unexpected deferred interrupt at vector %x\n",
    + DEFERRED_APIC_VECTOR);
    +}
    +void (*def_int_vector)(void) = default_deferred_interrupt;

    /*
    * CPU Initialization
    @@ -205,6 +224,62 @@ static int setup_APIC_mce(int reserved, int new)
    return reserved;
    }

    +static int setup_APIC_deferred_error(int reserved, int new)
    +{
    + if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_APIC_VECTOR,
    + APIC_EILVT_MSG_FIX, 0))
    + return new;
    +
    + return reserved;
    +}
    +
    +static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
    +{
    + u32 low = 0, high = 0;
    + int def_offset = -1, def_new;
    +
    + if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
    + return;
    +
    + def_new = (low & MASK_DEF_LVTOFF) >> 4;
    + if (c->x86 == 0x15 && c->x86_model == 0x60 &&
    + !(low & MASK_DEF_LVTOFF)) {
    + def_new = DEF_LVT_OFF;
    + low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
    + }
    +
    + def_offset = setup_APIC_deferred_error(def_offset, def_new);
    +
    + if ((def_offset == def_new) &&
    + (def_int_vector != amd_deferred_error_interrupt))
    + def_int_vector = amd_deferred_error_interrupt;
    +
    + low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
    + wrmsr(MSR_CU_DEF_ERR, low, high);
    +}
    +
    +static inline void __smp_deferred_interrupt(void)
    +{
    + inc_irq_stat(irq_deferred_count);
    + def_int_vector();
    +}
    +
    +asmlinkage __visible void smp_deferred_interrupt(void)
    +{
    + entering_irq();
    + __smp_deferred_interrupt();
    + exiting_ack_irq();
    +}
    +
    +asmlinkage __visible void smp_trace_deferred_interrupt(void)
    +{
    + entering_irq();
    + trace_deferred_apic_entry(DEFERRED_APIC_VECTOR);
    + __smp_deferred_interrupt();
    + trace_deferred_apic_exit(DEFERRED_APIC_VECTOR);
    + exiting_ack_irq();
    +}
    +
    /* cpu init entry point, called from mce.c with preempt off */
    void mce_amd_feature_init(struct cpuinfo_x86 *c)
    {
    @@ -262,6 +337,32 @@ init:
    mce_threshold_block_init(&b, offset);
    }
    }
    +
    + if (mce_flags.succor)
    + deferred_error_interrupt_enable(c);
    +}
    +
    +/* Apic interrupt handler for deferred errors */
    +static void amd_deferred_error_interrupt(void)
    +{
    + u64 status;
    + unsigned int bank;
    + struct mce m;
    +
    + for (bank = 0; bank < mca_cfg.banks; ++bank) {
    + rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
    +
    + if (!(status & MCI_STATUS_VAL) ||
    + !(status & MCI_STATUS_DEFERRED))
    + continue;
    +
    + mce_setup(&m);
    + m.bank = bank;
    + m.status = status;
    + mce_log(&m);
    + wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
    + break;
    + }
    }

    /*
    diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
    index e952f6b..820965c 100644
    --- a/arch/x86/kernel/entry_64.S
    +++ b/arch/x86/kernel/entry_64.S
    @@ -914,6 +914,11 @@ apicinterrupt THRESHOLD_APIC_VECTOR \
    threshold_interrupt smp_threshold_interrupt
    #endif

    +#ifdef CONFIG_X86_MCE_AMD
    +apicinterrupt DEFERRED_APIC_VECTOR \
    + deferred_interrupt smp_deferred_interrupt
    +#endif
    +
    #ifdef CONFIG_X86_THERMAL_VECTOR
    apicinterrupt THERMAL_APIC_VECTOR \
    thermal_interrupt smp_thermal_interrupt
    diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
    index e5952c2..406f204 100644
    --- a/arch/x86/kernel/irq.c
    +++ b/arch/x86/kernel/irq.c
    @@ -116,6 +116,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
    seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
    seq_puts(p, " Threshold APIC interrupts\n");
    #endif
    +#ifdef CONFIG_X86_MCE_AMD
    + seq_printf(p, "%*s: ", prec, "DEF");
    + for_each_online_cpu(j)
    + seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_count);
    + seq_puts(p, " Deferred APIC interrupts\n");
    +#endif
    #ifdef CONFIG_X86_MCE
    seq_printf(p, "%*s: ", prec, "MCE");
    for_each_online_cpu(j)
    diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
    index cd10a64..e0ffd29 100644
    --- a/arch/x86/kernel/irqinit.c
    +++ b/arch/x86/kernel/irqinit.c
    @@ -135,6 +135,10 @@ static void __init apic_intr_init(void)
    alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
    #endif

    +#ifdef CONFIG_X86_MCE_AMD
    + alloc_intr_gate(DEFERRED_APIC_VECTOR, deferred_interrupt);
    +#endif
    +
    #ifdef CONFIG_X86_LOCAL_APIC
    /* self generated IPI for local APIC timer */
    alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
    diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
    index 324ab52..dbfe07c2 100644
    --- a/arch/x86/kernel/traps.c
    +++ b/arch/x86/kernel/traps.c
    @@ -827,6 +827,10 @@ asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
    {
    }

    +asmlinkage __visible void __attribute__((weak)) smp_deferred_interrupt(void)
    +{
    +}
    +
    /*
    * 'math_state_restore()' saves the current math information in the
    * old math state array, and gets the new ones from the current task
    --
    1.9.1


    \
     
     \ /
      Last update: 2015-04-30 22:21    [W:4.476 / U:0.108 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site