lkml.org 
[lkml]   [2021]   [Nov]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.15 198/279] kmap_local: dont assume kmap PTEs are linear arrays in memory
    Date
    From: Ard Biesheuvel <ardb@kernel.org>

    commit 825c43f50e3aa811a291ffcb40e02fbf6d91ba86 upstream.

    The kmap_local conversion broke the ARM architecture, because the new
    code assumes that all PTEs used for creating kmaps form a linear array
    in memory, and uses array indexing to look up the kmap PTE belonging to
    a certain kmap index.

    On ARM, this cannot work, not only because the PTE pages may be
    non-adjacent in memory, but also because ARM/!LPAE interleaves hardware
    entries and extended entries (carrying software-only bits) in a way that
    is not compatible with array indexing.

    Fortunately, this only seems to affect configurations with more than 8
    CPUs, due to the way the per-CPU kmap slots are organized in memory.

    Work around this by permitting an architecture to set a Kconfig symbol
    that signifies that the kmap PTEs do not form a lineary array in memory,
    and so the only way to locate the appropriate one is to walk the page
    tables.

    Link: https://lore.kernel.org/linux-arm-kernel/20211026131249.3731275-1-ardb@kernel.org/
    Link: https://lkml.kernel.org/r/20211116094737.7391-1-ardb@kernel.org
    Fixes: 2a15ba82fa6c ("ARM: highmem: Switch to generic kmap atomic")
    Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
    Reported-by: Quanyang Wang <quanyang.wang@windriver.com>
    Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
    Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: <stable@vger.kernel.org>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/arm/Kconfig | 1 +
    mm/Kconfig | 3 +++
    mm/highmem.c | 32 +++++++++++++++++++++-----------
    3 files changed, 25 insertions(+), 11 deletions(-)

    --- a/arch/arm/Kconfig
    +++ b/arch/arm/Kconfig
    @@ -1455,6 +1455,7 @@ config HIGHMEM
    bool "High Memory Support"
    depends on MMU
    select KMAP_LOCAL
    + select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
    help
    The address space of ARM processors is only 4 Gigabytes large
    and it has to accommodate user address space, kernel address
    --- a/mm/Kconfig
    +++ b/mm/Kconfig
    @@ -887,6 +887,9 @@ config MAPPING_DIRTY_HELPERS
    config KMAP_LOCAL
    bool

    +config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
    + bool
    +
    # struct io_mapping based helper. Selected by drivers that need them
    config IO_MAPPING
    bool
    --- a/mm/highmem.c
    +++ b/mm/highmem.c
    @@ -504,16 +504,22 @@ static inline int kmap_local_calc_idx(in

    static pte_t *__kmap_pte;

    -static pte_t *kmap_get_pte(void)
    +static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
    {
    + if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
    + /*
    + * Set by the arch if __kmap_pte[-idx] does not produce
    + * the correct entry.
    + */
    + return virt_to_kpte(vaddr);
    if (!__kmap_pte)
    __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
    - return __kmap_pte;
    + return &__kmap_pte[-idx];
    }

    void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
    {
    - pte_t pteval, *kmap_pte = kmap_get_pte();
    + pte_t pteval, *kmap_pte;
    unsigned long vaddr;
    int idx;

    @@ -525,9 +531,10 @@ void *__kmap_local_pfn_prot(unsigned lon
    preempt_disable();
    idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
    vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    - BUG_ON(!pte_none(*(kmap_pte - idx)));
    + kmap_pte = kmap_get_pte(vaddr, idx);
    + BUG_ON(!pte_none(*kmap_pte));
    pteval = pfn_pte(pfn, prot);
    - arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
    + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
    arch_kmap_local_post_map(vaddr, pteval);
    current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
    preempt_enable();
    @@ -560,7 +567,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
    void kunmap_local_indexed(void *vaddr)
    {
    unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
    - pte_t *kmap_pte = kmap_get_pte();
    + pte_t *kmap_pte;
    int idx;

    if (addr < __fix_to_virt(FIX_KMAP_END) ||
    @@ -585,8 +592,9 @@ void kunmap_local_indexed(void *vaddr)
    idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
    WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

    + kmap_pte = kmap_get_pte(addr, idx);
    arch_kmap_local_pre_unmap(addr);
    - pte_clear(&init_mm, addr, kmap_pte - idx);
    + pte_clear(&init_mm, addr, kmap_pte);
    arch_kmap_local_post_unmap(addr);
    current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
    kmap_local_idx_pop();
    @@ -608,7 +616,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
    void __kmap_local_sched_out(void)
    {
    struct task_struct *tsk = current;
    - pte_t *kmap_pte = kmap_get_pte();
    + pte_t *kmap_pte;
    int i;

    /* Clear kmaps */
    @@ -635,8 +643,9 @@ void __kmap_local_sched_out(void)
    idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));

    addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    + kmap_pte = kmap_get_pte(addr, idx);
    arch_kmap_local_pre_unmap(addr);
    - pte_clear(&init_mm, addr, kmap_pte - idx);
    + pte_clear(&init_mm, addr, kmap_pte);
    arch_kmap_local_post_unmap(addr);
    }
    }
    @@ -644,7 +653,7 @@ void __kmap_local_sched_out(void)
    void __kmap_local_sched_in(void)
    {
    struct task_struct *tsk = current;
    - pte_t *kmap_pte = kmap_get_pte();
    + pte_t *kmap_pte;
    int i;

    /* Restore kmaps */
    @@ -664,7 +673,8 @@ void __kmap_local_sched_in(void)
    /* See comment in __kmap_local_sched_out() */
    idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
    addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
    + kmap_pte = kmap_get_pte(addr, idx);
    + set_pte_at(&init_mm, addr, kmap_pte, pteval);
    arch_kmap_local_post_map(addr, pteval);
    }
    }

    \
     
     \ /
      Last update: 2021-11-24 15:05    [W:4.227 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site