Messages in this thread Patch in this message | | | From | kan.liang@linux ... | Subject | [PATCH 10/21] perf/x86/intel: Check Arch LBR MSRs | Date | Fri, 19 Jun 2020 07:03:58 -0700 |
| |
From: Kan Liang <kan.liang@linux.intel.com>
The KVM may not support the MSRs of Architecture LBR. Accessing the MSRs may cause #GP and crash the guest.
The MSRs have to be checked at guest boot time.
Only using the max number of Architecture LBR depth to check the MSR_ARCH_LBR_DEPTH should be good enough. The max number can be calculated by 8 * the position of the last set bit of LBR_DEPTH value in CPUID enumeration.
Co-developed-by: Like Xu <like.xu@linux.intel.com> Signed-off-by: Like Xu <like.xu@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> --- arch/x86/events/intel/core.c | 24 ++++++++++++++++++++++-- arch/x86/events/perf_event.h | 5 +++++ 2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index b236cff..c3372bd 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4145,6 +4145,8 @@ static bool check_msr(unsigned long msr, u64 mask) if (is_lbr_from(msr)) val_tmp = lbr_from_signext_quirk_wr(val_tmp); + else if (msr == MSR_ARCH_LBR_DEPTH) + val_tmp = x86_pmu_get_max_arch_lbr_nr(); if (wrmsrl_safe(msr, val_tmp) || rdmsrl_safe(msr, &val_new)) @@ -5188,8 +5190,23 @@ __init int intel_pmu_init(void) * Check all LBT MSR here. * Disable LBR access if any LBR MSRs can not be accessed. */ - if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) - x86_pmu.lbr_nr = 0; + if (x86_pmu.lbr_nr) { + if (x86_pmu.arch_lbr) { + u64 mask = 1; + + if (x86_pmu.arch_lbr_cpl) + mask |= ARCH_LBR_CTL_CPL; + if (x86_pmu.arch_lbr_filter) + mask |= ARCH_LBR_CTL_FILTER; + if (x86_pmu.arch_lbr_call_stack) + mask |= ARCH_LBR_CTL_STACK; + if (!check_msr(MSR_ARCH_LBR_CTL, mask)) + x86_pmu.lbr_nr = 0; + if (!check_msr(MSR_ARCH_LBR_DEPTH, 0)) + x86_pmu.lbr_nr = 0; + } else if (!check_msr(x86_pmu.lbr_tos, 0x3UL)) + x86_pmu.lbr_nr = 0; + } for (i = 0; i < x86_pmu.lbr_nr; i++) { if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && check_msr(x86_pmu.lbr_to + i, 0xffffUL))) @@ -5206,6 +5223,9 @@ __init int intel_pmu_init(void) */ if (x86_pmu.extra_regs) { for (er = x86_pmu.extra_regs; er->msr; er++) { + /* Skip Arch LBR which is already verified */ + if (x86_pmu.arch_lbr && (er->idx == EXTRA_REG_LBR)) + continue; er->extra_msr_access = check_msr(er->msr, 0x11UL); /* Disable LBR select mapping */ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index cbfc55b..7112c51 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -853,6 +853,11 @@ static inline bool x86_pmu_has_lbr_callstack(void) x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; } +static inline int x86_pmu_get_max_arch_lbr_nr(void) +{ + return fls(x86_pmu.arch_lbr_depth_mask) * 8; +} + DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); int x86_perf_event_set_period(struct perf_event *event); -- 2.7.4
| |