lkml.org 
[lkml]   [2022]   [Oct]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH v4 03/16] x86/mtrr: replace use_intel() with a local flag
    On Tue, Oct 04, 2022 at 10:10:10AM +0200, Juergen Gross wrote:
    > In MTRR code use_intel() is only used in one source file, and the
    > relevant use_intel_if member of struct mtrr_ops is set only in
    > generic_mtrr_ops.
    >
    > Replace use_intel() with a single flag in cacheinfo.c, which can be set
    > when assigning generic_mtrr_ops to mtrr_if. This allows to drop
    > use_intel_if from mtrr_ops, while preparing to support PAT without
    > MTRR. As another preparation for the PAT/MTRR decoupling use a bit for
    > MTRR control and one for PAT control. For now set both bits together,
    > this can be changed later.
    >
    > As the new flag will be set only if mtrr_enabled is set, the test for
    > mtrr_enabled can be dropped at some places.
    >
    > At the same time drop the local mtrr_enabled() function and rename
    > the __mtrr_enabled flag to mtrr_enabled.

    So, I kinda like your idea about "replace the bool with mtrr_if != NULL"
    to test whether MTRRs are enabled.

    IOW, how does this look ontop of yours?

    At the end of mtrr_bp_init() I need to do some careful dancing but I
    think this way it is even clearer. I'm pretty sure it can be simplified
    even more but one thing at a time...

    Thx.

    ---
    diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
    index dacb537da126..927b463a22bd 100644
    --- a/arch/x86/kernel/cpu/mtrr/mtrr.c
    +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
    @@ -59,7 +59,6 @@
    #define MTRR_TO_PHYS_WC_OFFSET 1000

    u32 num_var_ranges;
    -static bool mtrr_enabled;

    unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
    static DEFINE_MUTEX(mtrr_mutex);
    @@ -71,15 +70,17 @@ static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;

    const struct mtrr_ops *mtrr_if;

    -static void set_mtrr(unsigned int reg, unsigned long base,
    - unsigned long size, mtrr_type type);
    -
    void __init set_mtrr_ops(const struct mtrr_ops *ops)
    {
    if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
    mtrr_ops[ops->vendor] = ops;
    }

    +static bool mtrr_enabled(void)
    +{
    + return !!mtrr_if;
    +}
    +
    /* Returns non-zero if we have the write-combining memory type */
    static int have_wrcomb(void)
    {
    @@ -299,7 +300,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
    int i, replace, error;
    mtrr_type ltype;

    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return -ENXIO;

    error = mtrr_if->validate_add_page(base, size, type);
    @@ -447,7 +448,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
    int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
    bool increment)
    {
    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return -ENODEV;
    if (mtrr_check(base, size))
    return -EINVAL;
    @@ -476,7 +477,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
    unsigned long lbase, lsize;
    int error = -EINVAL;

    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return -ENODEV;

    max = num_var_ranges;
    @@ -536,7 +537,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
    */
    int mtrr_del(int reg, unsigned long base, unsigned long size)
    {
    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return -ENODEV;
    if (mtrr_check(base, size))
    return -EINVAL;
    @@ -562,7 +563,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
    {
    int ret;

    - if (pat_enabled() || !mtrr_enabled)
    + if (pat_enabled() || !mtrr_enabled())
    return 0; /* Success! (We don't need to do anything.) */

    ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
    @@ -750,15 +751,18 @@ void __init mtrr_bp_init(void)
    }
    }

    - if (mtrr_if) {
    - mtrr_enabled = true;
    - set_num_var_ranges(mtrr_if == &generic_mtrr_ops);
    + if (mtrr_enabled()) {
    + bool use_generic = mtrr_if == &generic_mtrr_ops;
    + bool mtrr_state_enabled = true;
    +
    + set_num_var_ranges(use_generic);
    init_table();
    - if (mtrr_if == &generic_mtrr_ops) {
    +
    + if (use_generic) {
    /* BIOS may override */
    - mtrr_enabled = get_mtrr_state();
    + mtrr_state_enabled = get_mtrr_state();

    - if (mtrr_enabled) {
    + if (mtrr_state_enabled) {
    mtrr_bp_pat_init();
    memory_caching_control |= CACHE_MTRR | CACHE_PAT;
    }
    @@ -767,10 +771,13 @@ void __init mtrr_bp_init(void)
    changed_by_mtrr_cleanup = 1;
    mtrr_if->set_all();
    }
    +
    + if (!mtrr_state_enabled)
    + mtrr_if = NULL;
    }
    }

    - if (!mtrr_enabled) {
    + if (!mtrr_enabled()) {
    pr_info("Disabled\n");

    /*
    @@ -811,7 +818,7 @@ void mtrr_save_state(void)
    {
    int first_cpu;

    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return;

    first_cpu = cpumask_first(cpu_online_mask);
    @@ -856,7 +863,7 @@ void mtrr_bp_restore(void)

    static int __init mtrr_init_finialize(void)
    {
    - if (!mtrr_enabled)
    + if (!mtrr_enabled())
    return 0;

    if (memory_caching_control & CACHE_MTRR) {
    --
    Regards/Gruss,
    Boris.

    https://people.kernel.org/tglx/notes-about-netiquette

    \
     
     \ /
      Last update: 2022-10-21 19:20    [W:4.119 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site