lkml.org 
[lkml]   [2013]   [May]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCHv4 30/39] thp: do_huge_pmd_anonymous_page() cleanup
    Date
    From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>

    Minor cleanup: unindent most code of the fucntion by inverting one
    condition. It's preparation for the next patch.

    No functional changes.

    Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    ---
    mm/huge_memory.c | 83 +++++++++++++++++++++++++++---------------------------
    1 file changed, 41 insertions(+), 42 deletions(-)

    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index 575f29b..ab07f5d 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -804,55 +804,54 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
    unsigned long haddr = address & HPAGE_PMD_MASK;
    pte_t *pte;

    - if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
    - if (unlikely(anon_vma_prepare(vma)))
    - return VM_FAULT_OOM;
    - if (unlikely(khugepaged_enter(vma)))
    + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
    + goto out;
    + if (unlikely(anon_vma_prepare(vma)))
    + return VM_FAULT_OOM;
    + if (unlikely(khugepaged_enter(vma)))
    + return VM_FAULT_OOM;
    + if (!(flags & FAULT_FLAG_WRITE) &&
    + transparent_hugepage_use_zero_page()) {
    + pgtable_t pgtable;
    + struct page *zero_page;
    + bool set;
    + pgtable = pte_alloc_one(mm, haddr);
    + if (unlikely(!pgtable))
    return VM_FAULT_OOM;
    - if (!(flags & FAULT_FLAG_WRITE) &&
    - transparent_hugepage_use_zero_page()) {
    - pgtable_t pgtable;
    - struct page *zero_page;
    - bool set;
    - pgtable = pte_alloc_one(mm, haddr);
    - if (unlikely(!pgtable))
    - return VM_FAULT_OOM;
    - zero_page = get_huge_zero_page();
    - if (unlikely(!zero_page)) {
    - pte_free(mm, pgtable);
    - count_vm_event(THP_FAULT_FALLBACK);
    - goto out;
    - }
    - spin_lock(&mm->page_table_lock);
    - set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
    - zero_page);
    - spin_unlock(&mm->page_table_lock);
    - if (!set) {
    - pte_free(mm, pgtable);
    - put_huge_zero_page();
    - }
    - return 0;
    - }
    - page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
    - vma, haddr, numa_node_id(), 0);
    - if (unlikely(!page)) {
    + zero_page = get_huge_zero_page();
    + if (unlikely(!zero_page)) {
    + pte_free(mm, pgtable);
    count_vm_event(THP_FAULT_FALLBACK);
    goto out;
    }
    - count_vm_event(THP_FAULT_ALLOC);
    - if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
    - put_page(page);
    - goto out;
    - }
    - if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
    - page))) {
    - mem_cgroup_uncharge_page(page);
    - put_page(page);
    - goto out;
    + spin_lock(&mm->page_table_lock);
    + set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
    + zero_page);
    + spin_unlock(&mm->page_table_lock);
    + if (!set) {
    + pte_free(mm, pgtable);
    + put_huge_zero_page();
    }
    -
    return 0;
    }
    + page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
    + vma, haddr, numa_node_id(), 0);
    + if (unlikely(!page)) {
    + count_vm_event(THP_FAULT_FALLBACK);
    + goto out;
    + }
    + count_vm_event(THP_FAULT_ALLOC);
    + if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
    + put_page(page);
    + goto out;
    + }
    + if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
    + mem_cgroup_uncharge_page(page);
    + put_page(page);
    + goto out;
    + }
    +
    + return 0;
    out:
    /*
    * Use __pte_alloc instead of pte_alloc_map, because we can't
    --
    1.7.10.4


    \
     
     \ /
      Last update: 2013-05-12 03:41    [W:5.471 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site