lkml.org 
[lkml]   [2022]   [Nov]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH RFC 01/10] mm/hugetlb: Let vma_offset_start() to return start
On 10/30/22 17:29, Peter Xu wrote:
> Even though vma_offset_start() is named like that, it's not returning "the
> start address of the range" but rather the offset we should use to offset
> the vma->vm_start address.
>
> Make it return the real value of the start vaddr, and it also helps for all
> the callers because whenever the retval is used, it'll be ultimately added
> into the vma->vm_start anyway, so it's better.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> fs/hugetlbfs/inode.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)

Thanks, nice cleanup/simplification.

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
--
Mike Kravetz

>
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index 3ee84604e36d..ac3a69fe29c3 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -412,10 +412,12 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
> */
> static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
> {
> + unsigned long offset = 0;
> +
> if (vma->vm_pgoff < start)
> - return (start - vma->vm_pgoff) << PAGE_SHIFT;
> - else
> - return 0;
> + offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
> +
> + return vma->vm_start + offset;
> }
>
> static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
> @@ -457,7 +459,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
> v_start = vma_offset_start(vma, start);
> v_end = vma_offset_end(vma, end);
>
> - if (!hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
> + if (!hugetlb_vma_maps_page(vma, v_start, page))
> continue;
>
> if (!hugetlb_vma_trylock_write(vma)) {
> @@ -473,8 +475,8 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
> break;
> }
>
> - unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
> - NULL, ZAP_FLAG_DROP_MARKER);
> + unmap_hugepage_range(vma, v_start, v_end, NULL,
> + ZAP_FLAG_DROP_MARKER);
> hugetlb_vma_unlock_write(vma);
> }
>
> @@ -507,10 +509,9 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
> */
> v_start = vma_offset_start(vma, start);
> v_end = vma_offset_end(vma, end);
> - if (hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
> - unmap_hugepage_range(vma, vma->vm_start + v_start,
> - v_end, NULL,
> - ZAP_FLAG_DROP_MARKER);
> + if (hugetlb_vma_maps_page(vma, v_start, page))
> + unmap_hugepage_range(vma, v_start, v_end, NULL,
> + ZAP_FLAG_DROP_MARKER);
>
> kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
> hugetlb_vma_unlock_write(vma);
> @@ -540,8 +541,7 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
> v_start = vma_offset_start(vma, start);
> v_end = vma_offset_end(vma, end);
>
> - unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
> - NULL, zap_flags);
> + unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
>
> /*
> * Note that vma lock only exists for shared/non-private
> --
> 2.37.3
>

\
 
 \ /
  Last update: 2022-11-03 16:30    [W:0.497 / U:0.108 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site