lkml.org 
[lkml]   [2010]   [Nov]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[PATCH 50 of 66] mprotect: transparent huge page support
    Date
    From
    From: Johannes Weiner <hannes@cmpxchg.org>

    Natively handle huge pmds when changing page tables on behalf of
    mprotect().

    I left out update_mmu_cache() because we do not need it on x86 anyway
    but more importantly the interface works on ptes, not pmds.

    Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
    Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
    Reviewed-by: Rik van Riel <riel@redhat.com>
    ---

    diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
    --- a/include/linux/huge_mm.h
    +++ b/include/linux/huge_mm.h
    @@ -22,6 +22,8 @@ extern int zap_huge_pmd(struct mmu_gathe
    extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
    unsigned long addr, unsigned long end,
    unsigned char *vec);
    +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
    + unsigned long addr, pgprot_t newprot);

    enum transparent_hugepage_flag {
    TRANSPARENT_HUGEPAGE_FLAG,
    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -944,6 +944,33 @@ int mincore_huge_pmd(struct vm_area_stru
    return ret;
    }

    +int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
    + unsigned long addr, pgprot_t newprot)
    +{
    + struct mm_struct *mm = vma->vm_mm;
    + int ret = 0;
    +
    + spin_lock(&mm->page_table_lock);
    + if (likely(pmd_trans_huge(*pmd))) {
    + if (unlikely(pmd_trans_splitting(*pmd))) {
    + spin_unlock(&mm->page_table_lock);
    + wait_split_huge_page(vma->anon_vma, pmd);
    + } else {
    + pmd_t entry;
    +
    + entry = pmdp_get_and_clear(mm, addr, pmd);
    + entry = pmd_modify(entry, newprot);
    + set_pmd_at(mm, addr, pmd, entry);
    + spin_unlock(&vma->vm_mm->page_table_lock);
    + flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
    + ret = 1;
    + }
    + } else
    + spin_unlock(&vma->vm_mm->page_table_lock);
    +
    + return ret;
    +}
    +
    pmd_t *page_check_address_pmd(struct page *page,
    struct mm_struct *mm,
    unsigned long address,
    diff --git a/mm/mprotect.c b/mm/mprotect.c
    --- a/mm/mprotect.c
    +++ b/mm/mprotect.c
    @@ -88,7 +88,13 @@ static inline void change_pmd_range(stru
    pmd = pmd_offset(pud, addr);
    do {
    next = pmd_addr_end(addr, end);
    - split_huge_page_pmd(vma->vm_mm, pmd);
    + if (pmd_trans_huge(*pmd)) {
    + if (next - addr != HPAGE_PMD_SIZE)
    + split_huge_page_pmd(vma->vm_mm, pmd);
    + else if (change_huge_pmd(vma, pmd, addr, newprot))
    + continue;
    + /* fall through */
    + }
    if (pmd_none_or_clear_bad(pmd))
    continue;
    change_pte_range(vma->vm_mm, pmd, addr, next, newprot,

    \
     
     \ /
      Last update: 2010-11-03 16:39    [W:7.922 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site