lkml.org 
[lkml]   [2020]   [Feb]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 3/5] powerpc/pseries: Account for SPURR ticks on idle CPUs
    Date
    From: "Gautham R. Shenoy" <ego@linux.vnet.ibm.com>

    On Pseries LPARs, to calculate utilization, we need to know the
    [S]PURR ticks when the CPUs were busy or idle.

    Via idle_loop_prolog(), idle_loop_epilog(), we track the idle PURR
    ticks in the VPA variable "wait_state_cycles". This patch extends the
    support to account for the idle SPURR ticks. It also provides an
    accessor function to accurately reads idle SPURR ticks.

    Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
    ---
    arch/powerpc/include/asm/idle.h | 33 +++++++++++++++++++++++++++++++++
    arch/powerpc/platforms/pseries/setup.c | 2 ++
    2 files changed, 35 insertions(+)

    diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h
    index 126a217..db82fc1 100644
    --- a/arch/powerpc/include/asm/idle.h
    +++ b/arch/powerpc/include/asm/idle.h
    @@ -2,13 +2,20 @@
    #define _ASM_POWERPC_IDLE_H
    #include <asm/runlatch.h>

    +DECLARE_PER_CPU(u64, idle_spurr_cycles);
    DECLARE_PER_CPU(u64, idle_entry_purr_snap);
    +DECLARE_PER_CPU(u64, idle_entry_spurr_snap);

    static inline void snapshot_purr_idle_entry(void)
    {
    *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR);
    }

    +static inline void snapshot_spurr_idle_entry(void)
    +{
    + *this_cpu_ptr(&idle_entry_spurr_snap) = mfspr(SPRN_SPURR);
    +}
    +
    static inline void update_idle_purr_accounting(void)
    {
    u64 wait_cycles;
    @@ -19,10 +26,19 @@ static inline void update_idle_purr_accounting(void)
    get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
    }

    +static inline void update_idle_spurr_accounting(void)
    +{
    + u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles);
    + u64 in_spurr = *this_cpu_ptr(&idle_entry_spurr_snap);
    +
    + *idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr;
    +}
    +
    static inline void idle_loop_prolog(void)
    {
    ppc64_runlatch_off();
    snapshot_purr_idle_entry();
    + snapshot_spurr_idle_entry();
    /*
    * Indicate to the HV that we are idle. Now would be
    * a good time to find other work to dispatch.
    @@ -33,6 +49,7 @@ static inline void idle_loop_prolog(void)
    static inline void idle_loop_epilog(void)
    {
    update_idle_purr_accounting();
    + update_idle_spurr_accounting();
    get_lppaca()->idle = 0;
    ppc64_runlatch_on();
    }
    @@ -52,4 +69,20 @@ static inline u64 read_this_idle_purr(void)

    return be64_to_cpu(get_lppaca()->wait_state_cycles);
    }
    +
    +static inline u64 read_this_idle_spurr(void)
    +{
    + /*
    + * If we are reading from an idle context, update the
    + * idle-spurr cycles corresponding to the last idle period.
    + * Since the idle context is not yet over, take a fresh
    + * snapshot of the idle-spurr.
    + */
    + if (get_lppaca()->idle == 1) {
    + update_idle_spurr_accounting();
    + snapshot_spurr_idle_entry();
    + }
    +
    + return *this_cpu_ptr(&idle_spurr_cycles);
    +}
    #endif
    diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
    index e9f2cefa..5ef5c82 100644
    --- a/arch/powerpc/platforms/pseries/setup.c
    +++ b/arch/powerpc/platforms/pseries/setup.c
    @@ -318,7 +318,9 @@ static int alloc_dispatch_log_kmem_cache(void)
    }
    machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);

    +DEFINE_PER_CPU(u64, idle_spurr_cycles);
    DEFINE_PER_CPU(u64, idle_entry_purr_snap);
    +DEFINE_PER_CPU(u64, idle_entry_spurr_snap);
    static void pseries_lpar_idle(void)
    {
    /*
    --
    1.9.4
    \
     
     \ /
      Last update: 2020-02-21 06:19    [W:2.952 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site