lkml.org 
[lkml]   [2020]   [Mar]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 1/2] irqchip/gic-v3-its: Track LPI distribution on a per CPU basis
    Date
    In order to improve the distribution of LPIs among CPUs, let start by
    tracking the number of LPIs assigned to CPUs, both for managed and
    non-managed interrupts (as separate counters).

    Signed-off-by: Marc Zyngier <maz@kernel.org>
    ---
    drivers/irqchip/irq-gic-v3-its.c | 35 ++++++++++++++++++++++++++++++++
    1 file changed, 35 insertions(+)

    diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
    index 34e5a06ec874..941786e1e8f7 100644
    --- a/drivers/irqchip/irq-gic-v3-its.c
    +++ b/drivers/irqchip/irq-gic-v3-its.c
    @@ -173,6 +173,13 @@ static struct {
    int next_victim;
    } vpe_proxy;

    +struct cpu_lpi_count {
    + atomic_t managed;
    + atomic_t unmanaged;
    +};
    +
    +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
    +
    static LIST_HEAD(its_nodes);
    static DEFINE_RAW_SPINLOCK(its_lock);
    static struct rdists *gic_rdists;
    @@ -1500,6 +1507,30 @@ static void its_unmask_irq(struct irq_data *d)
    lpi_update_config(d, 0, LPI_PROP_ENABLED);
    }

    +static u32 its_read_lpi_count(struct irq_data *d, int cpu)
    +{
    + if (irqd_affinity_is_managed(d))
    + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
    +
    + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
    +}
    +
    +static void its_inc_lpi_count(struct irq_data *d, int cpu)
    +{
    + if (irqd_affinity_is_managed(d))
    + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
    + else
    + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
    +}
    +
    +static void its_dec_lpi_count(struct irq_data *d, int cpu)
    +{
    + if (irqd_affinity_is_managed(d))
    + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
    + else
    + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
    +}
    +
    static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
    bool force)
    {
    @@ -1529,6 +1560,8 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,

    /* don't set the affinity when the target cpu is same as current one */
    if (cpu != its_dev->event_map.col_map[id]) {
    + its_inc_lpi_count(d, cpu);
    + its_dec_lpi_count(d, its_dev->event_map.col_map[id]);
    target_col = &its_dev->its->collections[cpu];
    its_send_movi(its_dev, target_col, id);
    its_dev->event_map.col_map[id] = cpu;
    @@ -3438,6 +3471,7 @@ static int its_irq_domain_activate(struct irq_domain *domain,
    cpu = cpumask_first(cpu_online_mask);
    }

    + its_inc_lpi_count(d, cpu);
    its_dev->event_map.col_map[event] = cpu;
    irq_data_update_effective_affinity(d, cpumask_of(cpu));

    @@ -3452,6 +3486,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain,
    struct its_device *its_dev = irq_data_get_irq_chip_data(d);
    u32 event = its_get_event_id(d);

    + its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
    /* Stop the delivery of interrupts */
    its_send_discard(its_dev, event);
    }
    --
    2.20.1
    \
     
     \ /
      Last update: 2020-03-16 12:55    [W:7.217 / U:0.128 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site