lkml.org 
[lkml]   [2014]   [Apr]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[PATCH 3/6] x86: mm: fix missed global TLB flush stat
    From
    Date

    From: Dave Hansen <dave.hansen@linux.intel.com>

    If we take the

    if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
    local_flush_tlb();
    goto out;
    }

    path out of flush_tlb_mm_range(), we will have flushed the tlb,
    but not incremented NR_TLB_LOCAL_FLUSH_ALL. This unifies the
    way out of the function so that we always take a single path when
    doing a full tlb flush.

    Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
    ---

    b/arch/x86/mm/tlb.c | 15 +++++++--------
    1 file changed, 7 insertions(+), 8 deletions(-)

    diff -puN arch/x86/mm/tlb.c~fix-missed-global-flush-stat arch/x86/mm/tlb.c
    --- a/arch/x86/mm/tlb.c~fix-missed-global-flush-stat 2014-04-21 11:10:35.176852256 -0700
    +++ b/arch/x86/mm/tlb.c 2014-04-21 11:10:35.190852888 -0700
    @@ -172,8 +172,9 @@ unsigned long tlb_single_page_flush_ceil
    void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
    unsigned long end, unsigned long vmflag)
    {
    - int need_flush_others_all = 1;
    unsigned long addr;
    + /* do a global flush by default */
    + unsigned long base_pages_to_flush = TLB_FLUSH_ALL;

    preempt_disable();
    if (current->active_mm != mm)
    @@ -184,16 +185,14 @@ void flush_tlb_mm_range(struct mm_struct
    goto out;
    }

    - if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
    - local_flush_tlb();
    - goto out;
    - }
    + if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
    + base_pages_to_flush = (end - start) >> PAGE_SHIFT;

    - if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
    + if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
    + base_pages_to_flush = TLB_FLUSH_ALL;
    count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
    local_flush_tlb();
    } else {
    - need_flush_others_all = 0;
    /* flush range by one by one 'invlpg' */
    for (addr = start; addr < end; addr += PAGE_SIZE) {
    count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
    @@ -201,7 +200,7 @@ void flush_tlb_mm_range(struct mm_struct
    }
    }
    out:
    - if (need_flush_others_all) {
    + if (base_pages_to_flush == TLB_FLUSH_ALL) {
    start = 0UL;
    end = TLB_FLUSH_ALL;
    }
    _

    \
     
     \ /
      Last update: 2014-04-21 21:01    [W:7.201 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site