lkml.org 
[lkml]   [2023]   [Jan]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 05/44] mm: Expand vma iterator interface.
    Date
    From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

    Add wrappers for the maple tree to the vma iterator. This will provide
    type safety at compile time.

    Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
    ---
    include/linux/mm.h | 46 +++++++++++++++++++++---
    include/linux/mm_types.h | 4 +--
    mm/mmap.c | 77 ++++++++++++++++++++++++++++++++++++++++
    3 files changed, 120 insertions(+), 7 deletions(-)

    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index f3f196e4d66d..f4b964f96db1 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -665,16 +665,16 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
    static inline
    struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
    {
    - return mas_find(&vmi->mas, max);
    + return mas_find(&vmi->mas, max - 1);
    }

    static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
    {
    /*
    - * Uses vma_find() to get the first VMA when the iterator starts.
    + * Uses mas_find() to get the first VMA when the iterator starts.
    * Calling mas_next() could skip the first entry.
    */
    - return vma_find(vmi, ULONG_MAX);
    + return mas_find(&vmi->mas, ULONG_MAX);
    }

    static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
    @@ -687,12 +687,50 @@ static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
    return vmi->mas.index;
    }

    +static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
    +{
    + return vmi->mas.last + 1;
    +}
    +static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
    + unsigned long count)
    +{
    + return mas_expected_entries(&vmi->mas, count);
    +}
    +
    +/* Free any unused preallocations */
    +static inline void vma_iter_free(struct vma_iterator *vmi)
    +{
    + mas_destroy(&vmi->mas);
    +}
    +
    +static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
    + struct vm_area_struct *vma)
    +{
    + vmi->mas.index = vma->vm_start;
    + vmi->mas.last = vma->vm_end - 1;
    + mas_store(&vmi->mas, vma);
    + if (unlikely(mas_is_err(&vmi->mas)))
    + return -ENOMEM;
    +
    + return 0;
    +}
    +
    +static inline void vma_iter_invalidate(struct vma_iterator *vmi)
    +{
    + mas_pause(&vmi->mas);
    +}
    +
    +static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
    +{
    + mas_set(&vmi->mas, addr);
    +}
    +
    #define for_each_vma(__vmi, __vma) \
    while (((__vma) = vma_next(&(__vmi))) != NULL)

    /* The MM code likes to work with exclusive end addresses */
    #define for_each_vma_range(__vmi, __vma, __end) \
    - while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
    + while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)

    #ifdef CONFIG_SHMEM
    /*
    diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
    index 3b8475007734..3cd8b7034c48 100644
    --- a/include/linux/mm_types.h
    +++ b/include/linux/mm_types.h
    @@ -904,9 +904,7 @@ struct vma_iterator {
    static inline void vma_iter_init(struct vma_iterator *vmi,
    struct mm_struct *mm, unsigned long addr)
    {
    - vmi->mas.tree = &mm->mm_mt;
    - vmi->mas.index = addr;
    - vmi->mas.node = MAS_START;
    + mas_init(&vmi->mas, &mm->mm_mt, addr);
    }

    struct mmu_gather;
    diff --git a/mm/mmap.c b/mm/mmap.c
    index 87d929316d57..9318f2ac8a6e 100644
    --- a/mm/mmap.c
    +++ b/mm/mmap.c
    @@ -144,6 +144,83 @@ static void remove_vma(struct vm_area_struct *vma)
    vm_area_free(vma);
    }

    +static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
    +{
    + return mas_walk(&vmi->mas);
    +}
    +
    +static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
    + unsigned long min)
    +{
    + return mas_prev(&vmi->mas, min);
    +}
    +
    +static inline int vma_iter_prealloc(struct vma_iterator *vmi,
    + struct vm_area_struct *vma)
    +{
    + return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
    +}
    +
    +/* Store a VMA with preallocated memory */
    +static inline void vma_iter_store(struct vma_iterator *vmi,
    + struct vm_area_struct *vma)
    +{
    +
    +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
    + if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
    + printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
    + printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
    + printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
    + mt_dump(vmi->mas.tree);
    + }
    + if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) {
    + printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
    + printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
    + printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
    + mt_dump(vmi->mas.tree);
    + }
    +#endif
    +
    + if (vmi->mas.node != MAS_START &&
    + ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
    + vma_iter_invalidate(vmi);
    +
    + vmi->mas.index = vma->vm_start;
    + vmi->mas.last = vma->vm_end - 1;
    + mas_store_prealloc(&vmi->mas, vma);
    +}
    +
    +static inline void vma_iter_clear(struct vma_iterator *vmi,
    + unsigned long start, unsigned long end)
    +{
    + mas_set_range(&vmi->mas, start, end - 1);
    + mas_store_prealloc(&vmi->mas, NULL);
    +}
    +
    +static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
    + struct vm_area_struct *vma, gfp_t gfp)
    +{
    + vmi->mas.index = vma->vm_start;
    + vmi->mas.last = vma->vm_end - 1;
    + mas_store_gfp(&vmi->mas, vma, gfp);
    + if (unlikely(mas_is_err(&vmi->mas)))
    + return -ENOMEM;
    +
    + return 0;
    +}
    +
    +static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
    + unsigned long start, unsigned long end, gfp_t gfp)
    +{
    + vmi->mas.index = start;
    + vmi->mas.last = end - 1;
    + mas_store_gfp(&vmi->mas, NULL, gfp);
    + if (unlikely(mas_is_err(&vmi->mas)))
    + return -ENOMEM;
    +
    + return 0;
    +}
    +
    /*
    * check_brk_limits() - Use platform specific check of range & verify mlock
    * limits.
    --
    2.35.1
    \
     
     \ /
      Last update: 2023-03-26 23:29    [W:4.697 / U:0.068 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site