lkml.org 
[lkml]   [2018]   [Aug]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.14 061/104] x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
    Date
    4.14-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

    commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a upstream

    There is no semantic change but this change allows an unbalanced amount of
    MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
    restore on VMEXIT or VMENTER may be different.

    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/x86/kvm/vmx.c | 65 ++++++++++++++++++++++++++++-------------------------
    1 file changed, 35 insertions(+), 30 deletions(-)

    --- a/arch/x86/kvm/vmx.c
    +++ b/arch/x86/kvm/vmx.c
    @@ -618,6 +618,11 @@ static inline int pi_test_sn(struct pi_d
    (unsigned long *)&pi_desc->control);
    }

    +struct vmx_msrs {
    + unsigned int nr;
    + struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
    +};
    +
    struct vcpu_vmx {
    struct kvm_vcpu vcpu;
    unsigned long host_rsp;
    @@ -651,9 +656,8 @@ struct vcpu_vmx {
    struct loaded_vmcs *loaded_vmcs;
    bool __launched; /* temporary, used in vmx_vcpu_run */
    struct msr_autoload {
    - unsigned nr;
    - struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
    - struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
    + struct vmx_msrs guest;
    + struct vmx_msrs host;
    } msr_autoload;
    struct {
    int loaded;
    @@ -2041,18 +2045,18 @@ static void clear_atomic_switch_msr(stru
    }
    break;
    }
    -
    - for (i = 0; i < m->nr; ++i)
    - if (m->guest[i].index == msr)
    + for (i = 0; i < m->guest.nr; ++i)
    + if (m->guest.val[i].index == msr)
    break;

    - if (i == m->nr)
    + if (i == m->guest.nr)
    return;
    - --m->nr;
    - m->guest[i] = m->guest[m->nr];
    - m->host[i] = m->host[m->nr];
    - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
    - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
    + --m->guest.nr;
    + --m->host.nr;
    + m->guest.val[i] = m->guest.val[m->guest.nr];
    + m->host.val[i] = m->host.val[m->host.nr];
    + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
    + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
    }

    static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
    @@ -2104,24 +2108,25 @@ static void add_atomic_switch_msr(struct
    wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
    }

    - for (i = 0; i < m->nr; ++i)
    - if (m->guest[i].index == msr)
    + for (i = 0; i < m->guest.nr; ++i)
    + if (m->guest.val[i].index == msr)
    break;

    if (i == NR_AUTOLOAD_MSRS) {
    printk_once(KERN_WARNING "Not enough msr switch entries. "
    "Can't add msr %x\n", msr);
    return;
    - } else if (i == m->nr) {
    - ++m->nr;
    - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
    - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
    + } else if (i == m->guest.nr) {
    + ++m->guest.nr;
    + ++m->host.nr;
    + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
    + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
    }

    - m->guest[i].index = msr;
    - m->guest[i].value = guest_val;
    - m->host[i].index = msr;
    - m->host[i].value = host_val;
    + m->guest.val[i].index = msr;
    + m->guest.val[i].value = guest_val;
    + m->host.val[i].index = msr;
    + m->host.val[i].value = host_val;
    }

    static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
    @@ -5765,9 +5770,9 @@ static int vmx_vcpu_setup(struct vcpu_vm

    vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
    vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
    - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
    + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
    vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
    - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
    + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));

    if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
    vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
    @@ -10901,10 +10906,10 @@ static int prepare_vmcs02(struct kvm_vcp
    * Set the MSR load/store lists to match L0's settings.
    */
    vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
    - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
    - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
    - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
    - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
    + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
    + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
    + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
    + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));

    /*
    * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
    @@ -11842,8 +11847,8 @@ static void nested_vmx_vmexit(struct kvm
    vmx_segment_cache_clear(vmx);

    /* Update any VMCS fields that might have changed while L2 ran */
    - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
    - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
    + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
    + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
    vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
    if (vmx->hv_deadline_tsc == -1)
    vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,

    \
     
     \ /
      Last update: 2018-08-14 20:03    [W:4.066 / U:0.508 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site