lkml.org 
[lkml]   [2019]   [Mar]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH AUTOSEL 4.19 07/44] KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock
    Date
    From: Julien Thierry <julien.thierry@arm.com>

    [ Upstream commit fc3bc475231e12e9c0142f60100cf84d077c79e1 ]

    vgic_dist->lpi_list_lock must always be taken with interrupts disabled as
    it is used in interrupt context.

    For configurations such as PREEMPT_RT_FULL, this means that it should
    be a raw_spinlock since RT spinlocks are interruptible.

    Signed-off-by: Julien Thierry <julien.thierry@arm.com>
    Acked-by: Christoffer Dall <christoffer.dall@arm.com>
    Acked-by: Marc Zyngier <marc.zyngier@arm.com>
    Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    include/kvm/arm_vgic.h | 2 +-
    virt/kvm/arm/vgic/vgic-init.c | 2 +-
    virt/kvm/arm/vgic/vgic-its.c | 8 ++++----
    virt/kvm/arm/vgic/vgic.c | 10 +++++-----
    4 files changed, 11 insertions(+), 11 deletions(-)

    diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
    index 4f31f96bbfab..90ac450745f1 100644
    --- a/include/kvm/arm_vgic.h
    +++ b/include/kvm/arm_vgic.h
    @@ -256,7 +256,7 @@ struct vgic_dist {
    u64 propbaser;

    /* Protects the lpi_list and the count value below. */
    - spinlock_t lpi_list_lock;
    + raw_spinlock_t lpi_list_lock;
    struct list_head lpi_list_head;
    int lpi_list_count;

    diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
    index c0c0b88af1d5..33e7ee814f7b 100644
    --- a/virt/kvm/arm/vgic/vgic-init.c
    +++ b/virt/kvm/arm/vgic/vgic-init.c
    @@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
    struct vgic_dist *dist = &kvm->arch.vgic;

    INIT_LIST_HEAD(&dist->lpi_list_head);
    - spin_lock_init(&dist->lpi_list_lock);
    + raw_spin_lock_init(&dist->lpi_list_lock);
    }

    /* CREATION */
    diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
    index 12502251727e..f376c82afb61 100644
    --- a/virt/kvm/arm/vgic/vgic-its.c
    +++ b/virt/kvm/arm/vgic/vgic-its.c
    @@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
    irq->target_vcpu = vcpu;
    irq->group = 1;

    - spin_lock_irqsave(&dist->lpi_list_lock, flags);
    + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);

    /*
    * There could be a race with another vgic_add_lpi(), so we need to
    @@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
    dist->lpi_list_count++;

    out_unlock:
    - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);

    /*
    * We "cache" the configuration table entries in our struct vgic_irq's.
    @@ -339,7 +339,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
    if (!intids)
    return -ENOMEM;

    - spin_lock_irqsave(&dist->lpi_list_lock, flags);
    + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
    list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
    if (i == irq_count)
    break;
    @@ -348,7 +348,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
    continue;
    intids[i++] = irq->intid;
    }
    - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);

    *intid_ptr = intids;
    return i;
    diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
    index f884a54b2601..c5165e3b80cb 100644
    --- a/virt/kvm/arm/vgic/vgic.c
    +++ b/virt/kvm/arm/vgic/vgic.c
    @@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
    struct vgic_irq *irq = NULL;
    unsigned long flags;

    - spin_lock_irqsave(&dist->lpi_list_lock, flags);
    + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);

    list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
    if (irq->intid != intid)
    @@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
    irq = NULL;

    out_unlock:
    - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);

    return irq;
    }
    @@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
    if (irq->intid < VGIC_MIN_LPI)
    return;

    - spin_lock_irqsave(&dist->lpi_list_lock, flags);
    + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
    if (!kref_put(&irq->refcount, vgic_irq_release)) {
    - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    return;
    };

    list_del(&irq->lpi_list);
    dist->lpi_list_count--;
    - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
    + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);

    kfree(irq);
    }
    --
    2.19.1
    \
     
     \ /
      Last update: 2019-03-11 21:08    [W:3.856 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site