lkml.org 
[lkml]   [2019]   [Feb]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v6 15/18] asm-generic/tlb: Remove arch_tlb*_mmu()
    Now that all architectures are converted to the generic code, remove
    the arch hooks.

    Acked-by: Will Deacon <will.deacon@arm.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    mm/mmu_gather.c | 93 +++++++++++++++++++++++++-------------------------------
    1 file changed, 42 insertions(+), 51 deletions(-)

    --- a/mm/mmu_gather.c
    +++ b/mm/mmu_gather.c
    @@ -93,33 +93,6 @@ bool __tlb_remove_page_size(struct mmu_g

    #endif /* HAVE_MMU_GATHER_NO_GATHER */

    -void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
    - unsigned long start, unsigned long end)
    -{
    - tlb->mm = mm;
    -
    - /* Is it from 0 to ~0? */
    - tlb->fullmm = !(start | (end+1));
    -
    -#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
    - tlb->need_flush_all = 0;
    - tlb->local.next = NULL;
    - tlb->local.nr = 0;
    - tlb->local.max = ARRAY_SIZE(tlb->__pages);
    - tlb->active = &tlb->local;
    - tlb->batch_count = 0;
    -#endif
    -
    -#ifdef CONFIG_HAVE_RCU_TABLE_FREE
    - tlb->batch = NULL;
    -#endif
    -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
    - tlb->page_size = 0;
    -#endif
    -
    - __tlb_reset_range(tlb);
    -}
    -
    void tlb_flush_mmu_free(struct mmu_gather *tlb)
    {
    #ifdef CONFIG_HAVE_RCU_TABLE_FREE
    @@ -136,27 +109,6 @@ void tlb_flush_mmu(struct mmu_gather *tl
    tlb_flush_mmu_free(tlb);
    }

    -/* tlb_finish_mmu
    - * Called at the end of the shootdown operation to free up any resources
    - * that were required.
    - */
    -void arch_tlb_finish_mmu(struct mmu_gather *tlb,
    - unsigned long start, unsigned long end, bool force)
    -{
    - if (force) {
    - __tlb_reset_range(tlb);
    - __tlb_adjust_range(tlb, start, end - start);
    - }
    -
    - tlb_flush_mmu(tlb);
    -
    - /* keep the page table cache within bounds */
    - check_pgt_cache();
    -#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
    - tlb_batch_list_free(tlb);
    -#endif
    -}
    -
    #endif /* HAVE_GENERIC_MMU_GATHER */

    #ifdef CONFIG_HAVE_RCU_TABLE_FREE
    @@ -258,10 +210,40 @@ void tlb_remove_table(struct mmu_gather
    void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
    unsigned long start, unsigned long end)
    {
    - arch_tlb_gather_mmu(tlb, mm, start, end);
    + tlb->mm = mm;
    +
    + /* Is it from 0 to ~0? */
    + tlb->fullmm = !(start | (end+1));
    +
    +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
    + tlb->need_flush_all = 0;
    + tlb->local.next = NULL;
    + tlb->local.nr = 0;
    + tlb->local.max = ARRAY_SIZE(tlb->__pages);
    + tlb->active = &tlb->local;
    + tlb->batch_count = 0;
    +#endif
    +
    +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
    + tlb->batch = NULL;
    +#endif
    +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
    + tlb->page_size = 0;
    +#endif
    +
    + __tlb_reset_range(tlb);
    inc_tlb_flush_pending(tlb->mm);
    }

    +/**
    + * tlb_finish_mmu - finish an mmu_gather structure
    + * @tlb: the mmu_gather structure to finish
    + * @start: start of the region that will be removed from the page-table
    + * @end: end of the region that will be removed from the page-table
    + *
    + * Called at the end of the shootdown operation to free up any resources that
    + * were required.
    + */
    void tlb_finish_mmu(struct mmu_gather *tlb,
    unsigned long start, unsigned long end)
    {
    @@ -272,8 +254,17 @@ void tlb_finish_mmu(struct mmu_gather *t
    * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
    * forcefully if we detect parallel PTE batching threads.
    */
    - bool force = mm_tlb_flush_nested(tlb->mm);
    + if (mm_tlb_flush_nested(tlb->mm)) {
    + __tlb_reset_range(tlb);
    + __tlb_adjust_range(tlb, start, end - start);
    + }

    - arch_tlb_finish_mmu(tlb, start, end, force);
    + tlb_flush_mmu(tlb);
    +
    + /* keep the page table cache within bounds */
    + check_pgt_cache();
    +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
    + tlb_batch_list_free(tlb);
    +#endif
    dec_tlb_flush_pending(tlb->mm);
    }

    \
     
     \ /
      Last update: 2019-02-19 11:34    [W:4.048 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site