lkml.org 
[lkml]   [2018]   [Oct]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 02/11] arch/x86: Rename the RDT functions and definitions
    Date
    As AMD is starting to support RDT(or QOS) features, rename
    the RDT functions and definitions to more generic names.

    Signed-off-by: Babu Moger <babu.moger@amd.com>
    ---
    arch/x86/include/asm/rdt_sched.h | 22 +++++++++++-----------
    arch/x86/kernel/cpu/rdt.c | 24 ++++++++++++------------
    arch/x86/kernel/cpu/rdt.h | 8 ++++----
    arch/x86/kernel/cpu/rdt_monitor.c | 10 +++++-----
    arch/x86/kernel/cpu/rdt_rdtgroup.c | 10 +++++-----
    arch/x86/kernel/process_32.c | 2 +-
    arch/x86/kernel/process_64.c | 2 +-
    7 files changed, 39 insertions(+), 39 deletions(-)

    diff --git a/arch/x86/include/asm/rdt_sched.h b/arch/x86/include/asm/rdt_sched.h
    index 9acb06b6f81e..666bf9acb41d 100644
    --- a/arch/x86/include/asm/rdt_sched.h
    +++ b/arch/x86/include/asm/rdt_sched.h
    @@ -1,6 +1,6 @@
    /* SPDX-License-Identifier: GPL-2.0 */
    -#ifndef _ASM_X86_INTEL_RDT_SCHED_H
    -#define _ASM_X86_INTEL_RDT_SCHED_H
    +#ifndef _ASM_X86_RDT_SCHED_H
    +#define _ASM_X86_RDT_SCHED_H

    #ifdef CONFIG_INTEL_RDT

    @@ -24,21 +24,21 @@
    * The cache also helps to avoid pointless updates if the value does
    * not change.
    */
    -struct intel_pqr_state {
    +struct rdt_pqr_state {
    u32 cur_rmid;
    u32 cur_closid;
    u32 default_rmid;
    u32 default_closid;
    };

    -DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
    +DECLARE_PER_CPU(struct rdt_pqr_state, pqr_state);

    DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
    DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
    DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);

    /*
    - * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
    + * __rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
    *
    * Following considerations are made so that this has minimal impact
    * on scheduler hot path:
    @@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
    * simple as possible.
    * Must be called with preemption disabled.
    */
    -static void __intel_rdt_sched_in(void)
    +static void __rdt_sched_in(void)
    {
    - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
    + struct rdt_pqr_state *state = this_cpu_ptr(&pqr_state);
    u32 closid = state->default_closid;
    u32 rmid = state->default_rmid;

    @@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
    }
    }

    -static inline void intel_rdt_sched_in(void)
    +static inline void rdt_sched_in(void)
    {
    if (static_branch_likely(&rdt_enable_key))
    - __intel_rdt_sched_in();
    + __rdt_sched_in();
    }

    #else

    -static inline void intel_rdt_sched_in(void) {}
    +static inline void rdt_sched_in(void) {}

    #endif /* CONFIG_INTEL_RDT */

    -#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
    +#endif /* _ASM_X86_RDT_SCHED_H */
    diff --git a/arch/x86/kernel/cpu/rdt.c b/arch/x86/kernel/cpu/rdt.c
    index 28d6cd254ba9..b361c63170d7 100644
    --- a/arch/x86/kernel/cpu/rdt.c
    +++ b/arch/x86/kernel/cpu/rdt.c
    @@ -40,12 +40,12 @@
    DEFINE_MUTEX(rdtgroup_mutex);

    /*
    - * The cached intel_pqr_state is strictly per CPU and can never be
    + * The cached rdt_pqr_state is strictly per CPU and can never be
    * updated from a remote CPU. Functions which modify the state
    * are called with interrupts disabled and no preemption, which
    * is sufficient for the protection.
    */
    -DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
    +DEFINE_PER_CPU(struct rdt_pqr_state, pqr_state);

    /*
    * Used to store the max resource name width and max resource data width
    @@ -634,7 +634,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)

    static void clear_closid_rmid(int cpu)
    {
    - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
    + struct rdt_pqr_state *state = this_cpu_ptr(&pqr_state);

    state->default_closid = 0;
    state->default_rmid = 0;
    @@ -643,7 +643,7 @@ static void clear_closid_rmid(int cpu)
    wrmsr(IA32_PQR_ASSOC, 0, 0);
    }

    -static int intel_rdt_online_cpu(unsigned int cpu)
    +static int rdt_online_cpu(unsigned int cpu)
    {
    struct rdt_resource *r;

    @@ -669,7 +669,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
    }
    }

    -static int intel_rdt_offline_cpu(unsigned int cpu)
    +static int rdt_offline_cpu(unsigned int cpu)
    {
    struct rdtgroup *rdtgrp;
    struct rdt_resource *r;
    @@ -861,7 +861,7 @@ static __init bool get_rdt_resources(void)

    static enum cpuhp_state rdt_online;

    -static int __init intel_rdt_late_init(void)
    +static int __init rdt_late_init(void)
    {
    struct rdt_resource *r;
    int state, ret;
    @@ -873,7 +873,7 @@ static int __init intel_rdt_late_init(void)

    state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
    "x86/rdt/cat:online:",
    - intel_rdt_online_cpu, intel_rdt_offline_cpu);
    + rdt_online_cpu, rdt_offline_cpu);
    if (state < 0)
    return state;

    @@ -885,20 +885,20 @@ static int __init intel_rdt_late_init(void)
    rdt_online = state;

    for_each_alloc_capable_rdt_resource(r)
    - pr_info("Intel RDT %s allocation detected\n", r->name);
    + pr_info("RDT %s allocation detected\n", r->name);

    for_each_mon_capable_rdt_resource(r)
    - pr_info("Intel RDT %s monitoring detected\n", r->name);
    + pr_info("RDT %s monitoring detected\n", r->name);

    return 0;
    }

    -late_initcall(intel_rdt_late_init);
    +late_initcall(rdt_late_init);

    -static void __exit intel_rdt_exit(void)
    +static void __exit rdt_exit(void)
    {
    cpuhp_remove_state(rdt_online);
    rdtgroup_exit();
    }

    -__exitcall(intel_rdt_exit);
    +__exitcall(rdt_exit);
    diff --git a/arch/x86/kernel/cpu/rdt.h b/arch/x86/kernel/cpu/rdt.h
    index 285eb3ec4200..1d7aa7e266af 100644
    --- a/arch/x86/kernel/cpu/rdt.h
    +++ b/arch/x86/kernel/cpu/rdt.h
    @@ -1,6 +1,6 @@
    /* SPDX-License-Identifier: GPL-2.0 */
    -#ifndef _ASM_X86_INTEL_RDT_H
    -#define _ASM_X86_INTEL_RDT_H
    +#ifndef _ASM_X86_RDT_H
    +#define _ASM_X86_RDT_H

    #include <linux/sched.h>
    #include <linux/kernfs.h>
    @@ -69,7 +69,7 @@ struct rmid_read {
    u64 val;
    };

    -extern unsigned int intel_cqm_threshold;
    +extern unsigned int rdt_cqm_threshold;
    extern bool rdt_alloc_capable;
    extern bool rdt_mon_capable;
    extern unsigned int rdt_mon_features;
    @@ -568,4 +568,4 @@ void cqm_handle_limbo(struct work_struct *work);
    bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
    void __check_limbo(struct rdt_domain *d, bool force_free);

    -#endif /* _ASM_X86_INTEL_RDT_H */
    +#endif /* _ASM_X86_RDT_H */
    diff --git a/arch/x86/kernel/cpu/rdt_monitor.c b/arch/x86/kernel/cpu/rdt_monitor.c
    index 2898a61cbdd9..577514cd4a71 100644
    --- a/arch/x86/kernel/cpu/rdt_monitor.c
    +++ b/arch/x86/kernel/cpu/rdt_monitor.c
    @@ -73,7 +73,7 @@ unsigned int rdt_mon_features;
    * This is the threshold cache occupancy at which we will consider an
    * RMID available for re-allocation.
    */
    -unsigned int intel_cqm_threshold;
    +unsigned int rdt_cqm_threshold;

    static inline struct rmid_entry *__rmid_entry(u32 rmid)
    {
    @@ -107,7 +107,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
    {
    u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);

    - return val >= intel_cqm_threshold;
    + return val >= rdt_cqm_threshold;
    }

    /*
    @@ -187,7 +187,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
    list_for_each_entry(d, &r->domains, list) {
    if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
    val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
    - if (val <= intel_cqm_threshold)
    + if (val <= rdt_cqm_threshold)
    continue;
    }

    @@ -637,10 +637,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
    *
    * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
    */
    - intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
    + rdt_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;

    /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
    - intel_cqm_threshold /= r->mon_scale;
    + rdt_cqm_threshold /= r->mon_scale;

    ret = dom_data_init(r);
    if (ret)
    diff --git a/arch/x86/kernel/cpu/rdt_rdtgroup.c b/arch/x86/kernel/cpu/rdt_rdtgroup.c
    index 5ecf73c833d3..bd8d03bad4aa 100644
    --- a/arch/x86/kernel/cpu/rdt_rdtgroup.c
    +++ b/arch/x86/kernel/cpu/rdt_rdtgroup.c
    @@ -288,7 +288,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
    }

    /*
    - * This is safe against intel_rdt_sched_in() called from __switch_to()
    + * This is safe against rdt_sched_in() called from __switch_to()
    * because __switch_to() is executed with interrupts disabled. A local call
    * from update_closid_rmid() is proteced against __switch_to() because
    * preemption is disabled.
    @@ -307,7 +307,7 @@ static void update_cpu_closid_rmid(void *info)
    * executing task might have its own closid selected. Just reuse
    * the context switch code.
    */
    - intel_rdt_sched_in();
    + rdt_sched_in();
    }

    /*
    @@ -532,7 +532,7 @@ static void move_myself(struct callback_head *head)

    preempt_disable();
    /* update PQR_ASSOC MSR to make resource group go into effect */
    - intel_rdt_sched_in();
    + rdt_sched_in();
    preempt_enable();

    kfree(callback);
    @@ -916,7 +916,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
    {
    struct rdt_resource *r = of->kn->parent->priv;

    - seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
    + seq_printf(seq, "%u\n", rdt_cqm_threshold * r->mon_scale);

    return 0;
    }
    @@ -935,7 +935,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
    if (bytes > (boot_cpu_data.x86_cache_size * 1024))
    return -EINVAL;

    - intel_cqm_threshold = bytes / r->mon_scale;
    + rdt_cqm_threshold = bytes / r->mon_scale;

    return nbytes;
    }
    diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
    index 931b2d0cb95e..d9e7e5668fe1 100644
    --- a/arch/x86/kernel/process_32.c
    +++ b/arch/x86/kernel/process_32.c
    @@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
    this_cpu_write(current_task, next_p);

    /* Load the Intel cache allocation PQR MSR. */
    - intel_rdt_sched_in();
    + rdt_sched_in();

    return prev_p;
    }
    diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
    index c029782a9216..3b38d37b7742 100644
    --- a/arch/x86/kernel/process_64.c
    +++ b/arch/x86/kernel/process_64.c
    @@ -536,7 +536,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
    }

    /* Load the Intel cache allocation PQR MSR. */
    - intel_rdt_sched_in();
    + rdt_sched_in();

    return prev_p;
    }
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-10-05 22:57    [W:4.517 / U:0.160 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site