lkml.org 
[lkml]   [2018]   [Nov]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v8 07/20] powerpc/mm: add helpers to get/set mm.context->pte_frag
    Date
    In order to handle pte_fragment functions with single fragment
    without adding pte_frag in all mm_context_t, this patch creates
    two helpers which do nothing on platforms using a single fragment.

    Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
    ---
    arch/powerpc/include/asm/pgtable.h | 31 +++++++++++++++++++++++++++++++
    arch/powerpc/mm/pgtable-frag.c | 8 ++++----
    2 files changed, 35 insertions(+), 4 deletions(-)

    diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
    index 9679b7519a35..734df2210749 100644
    --- a/arch/powerpc/include/asm/pgtable.h
    +++ b/arch/powerpc/include/asm/pgtable.h
    @@ -110,6 +110,37 @@ void mark_initmem_nx(void);
    static inline void mark_initmem_nx(void) { }
    #endif

    +/*
    + * When used, PTE_FRAG_NR is defined in subarch pgtable.h
    + * so we are sure it is included when arriving here.
    + */
    +#ifndef PTE_FRAG_NR
    +#define PTE_FRAG_NR 1
    +#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
    +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
    +#endif
    +
    +#if PTE_FRAG_NR != 1
    +static inline void *pte_frag_get(mm_context_t *ctx)
    +{
    + return ctx->pte_frag;
    +}
    +
    +static inline void pte_frag_set(mm_context_t *ctx, void *p)
    +{
    + ctx->pte_frag = p;
    +}
    +#else
    +static inline void *pte_frag_get(mm_context_t *ctx)
    +{
    + return NULL;
    +}
    +
    +static inline void pte_frag_set(mm_context_t *ctx, void *p)
    +{
    +}
    +#endif
    +
    #endif /* __ASSEMBLY__ */

    #endif /* _ASM_POWERPC_PGTABLE_H */
    diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
    index 7544d0d7177d..af23a587f019 100644
    --- a/arch/powerpc/mm/pgtable-frag.c
    +++ b/arch/powerpc/mm/pgtable-frag.c
    @@ -38,7 +38,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
    return NULL;

    spin_lock(&mm->page_table_lock);
    - ret = mm->context.pte_frag;
    + ret = pte_frag_get(&mm->context);
    if (ret) {
    pte_frag = ret + PTE_FRAG_SIZE;
    /*
    @@ -46,7 +46,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
    */
    if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
    pte_frag = NULL;
    - mm->context.pte_frag = pte_frag;
    + pte_frag_set(&mm->context, pte_frag);
    }
    spin_unlock(&mm->page_table_lock);
    return (pte_t *)ret;
    @@ -86,9 +86,9 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
    * the allocated page with single fragement
    * count.
    */
    - if (likely(!mm->context.pte_frag)) {
    + if (likely(!pte_frag_get(&mm->context))) {
    atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
    - mm->context.pte_frag = ret + PTE_FRAG_SIZE;
    + pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
    }
    spin_unlock(&mm->page_table_lock);

    --
    2.13.3
    \
     
     \ /
      Last update: 2018-11-29 09:12    [W:3.940 / U:0.272 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site