lkml.org 
[lkml]   [2021]   [Jan]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4/5] mm/vmalloc: remove unmap_kernel_range
    Date
    This is a shim around vunmap_range, get rid of it.

    Move the main API comment from the _noflush variant to the normal
    variant, and make _noflush internal to mm/.

    Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
    ---
    Documentation/core-api/cachetlb.rst | 2 +-
    arch/arm64/mm/init.c | 2 +-
    arch/powerpc/kernel/isa-bridge.c | 4 +-
    arch/powerpc/kernel/pci_64.c | 2 +-
    arch/powerpc/mm/ioremap.c | 2 +-
    drivers/pci/pci.c | 2 +-
    include/linux/vmalloc.h | 8 +---
    mm/internal.h | 1 +
    mm/percpu-vm.c | 2 +-
    mm/vmalloc.c | 59 ++++++++++++++---------------
    10 files changed, 38 insertions(+), 46 deletions(-)

    diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst
    index 756f7bcf8191..fe4290e26729 100644
    --- a/Documentation/core-api/cachetlb.rst
    +++ b/Documentation/core-api/cachetlb.rst
    @@ -215,7 +215,7 @@ Here are the routines, one by one:

    The first of these two routines is invoked after vmap_range()
    has installed the page table entries. The second is invoked
    - before unmap_kernel_range() deletes the page table entries.
    + before vunmap_range() deletes the page table entries.

    There exists another whole class of cpu cache issues which currently
    require a whole different set of interfaces to handle properly.
    diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
    index 709d98fea90c..7fe0a5074205 100644
    --- a/arch/arm64/mm/init.c
    +++ b/arch/arm64/mm/init.c
    @@ -498,7 +498,7 @@ void free_initmem(void)
    * prevents the region from being reused for kernel modules, which
    * is not supported by kallsyms.
    */
    - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
    + vunmap_range((u64)__init_begin, (u64)__init_end);
    }

    void dump_mem_limit(void)
    diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
    index 2257d24e6a26..39c625737c09 100644
    --- a/arch/powerpc/kernel/isa-bridge.c
    +++ b/arch/powerpc/kernel/isa-bridge.c
    @@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
    if (slab_is_available()) {
    if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
    pgprot_noncached(PAGE_KERNEL)))
    - unmap_kernel_range(ISA_IO_BASE, size);
    + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
    } else {
    early_ioremap_range(ISA_IO_BASE, pa, size,
    pgprot_noncached(PAGE_KERNEL));
    @@ -311,7 +311,7 @@ static void isa_bridge_remove(void)
    isa_bridge_pcidev = NULL;

    /* Unmap the ISA area */
    - unmap_kernel_range(ISA_IO_BASE, 0x10000);
    + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000);
    }

    /**
    diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
    index 9312e6eda7ff..3fb7e572abed 100644
    --- a/arch/powerpc/kernel/pci_64.c
    +++ b/arch/powerpc/kernel/pci_64.c
    @@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
    addr = (unsigned long)area->addr;
    if (ioremap_page_range(addr, addr + size, paddr,
    pgprot_noncached(PAGE_KERNEL))) {
    - unmap_kernel_range(addr, size);
    + vunmap_range(addr, addr + size);
    return NULL;
    }

    diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
    index b1a0aebe8c48..57342154d2b0 100644
    --- a/arch/powerpc/mm/ioremap.c
    +++ b/arch/powerpc/mm/ioremap.c
    @@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
    if (!ret)
    return (void __iomem *)area->addr + offset;

    - unmap_kernel_range(va, size);
    + vunmap_range(va, va + size);
    free_vm_area(area);

    return NULL;
    diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
    index b9fecc25d213..d1e5ee09b381 100644
    --- a/drivers/pci/pci.c
    +++ b/drivers/pci/pci.c
    @@ -4107,7 +4107,7 @@ void pci_unmap_iospace(struct resource *res)
    #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
    unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;

    - unmap_kernel_range(vaddr, resource_size(res));
    + vunmap_range(vaddr, vaddr + resource_size(res));
    #endif
    }
    EXPORT_SYMBOL(pci_unmap_iospace);
    diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
    index 913c9d4f5e03..b569a13c9960 100644
    --- a/include/linux/vmalloc.h
    +++ b/include/linux/vmalloc.h
    @@ -217,8 +217,7 @@ static inline bool is_vm_area_hugepages(const void *addr)
    int vmap_range(unsigned long addr, unsigned long end,
    phys_addr_t phys_addr, pgprot_t prot,
    unsigned int max_page_shift);
    -extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
    -extern void unmap_kernel_range(unsigned long addr, unsigned long size);
    +void vunmap_range(unsigned long addr, unsigned long end);
    static inline void set_vm_flush_reset_perms(void *addr)
    {
    struct vm_struct *vm = find_vm_area(addr);
    @@ -228,11 +227,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
    }

    #else
    -static inline void
    -unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
    -{
    -}
    -#define unmap_kernel_range unmap_kernel_range_noflush
    static inline void set_vm_flush_reset_perms(void *addr)
    {
    }
    diff --git a/mm/internal.h b/mm/internal.h
    index 3ce246945882..3d4449f79582 100644
    --- a/mm/internal.h
    +++ b/mm/internal.h
    @@ -628,5 +628,6 @@ struct migration_target_control {
    */
    int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
    pgprot_t prot, struct page **pages, unsigned int page_shift);
    +void vunmap_range_noflush(unsigned long start, unsigned long end);

    #endif /* __MM_INTERNAL_H */
    diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
    index 88a53eb68a94..8d3844bc0c7c 100644
    --- a/mm/percpu-vm.c
    +++ b/mm/percpu-vm.c
    @@ -134,7 +134,7 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,

    static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
    {
    - unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
    + vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT));
    }

    /**
    diff --git a/mm/vmalloc.c b/mm/vmalloc.c
    index 78dc3ec457ef..5ff190590fe4 100644
    --- a/mm/vmalloc.c
    +++ b/mm/vmalloc.c
    @@ -378,22 +378,20 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
    } while (p4d++, addr = next, addr != end);
    }

    -/**
    - * unmap_kernel_range_noflush - unmap kernel VM area
    - * @start: start of the VM area to unmap
    - * @size: size of the VM area to unmap
    +/*
    + * vunmap_range_noflush is similar to vunmap_range, but does not
    + * flush caches or TLBs.
    *
    - * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
    - * should have been allocated using get_vm_area() and its friends.
    + * The caller is responsible for calling flush_cache_vmap() before calling
    + * this function, and flush_tlb_kernel_range after it has returned
    + * successfully (and before the addresses are expected to cause a page fault
    + * or be re-mapped for something else, if TLB flushes are being delayed or
    + * coalesced).
    *
    - * NOTE:
    - * This function does NOT do any cache flushing. The caller is responsible
    - * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
    - * function and flush_tlb_kernel_range() after.
    + * This is an internal function only. Do not use outside mm/.
    */
    -void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
    +void vunmap_range_noflush(unsigned long start, unsigned long end)
    {
    - unsigned long end = start + size;
    unsigned long next;
    pgd_t *pgd;
    unsigned long addr = start;
    @@ -414,6 +412,22 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
    arch_sync_kernel_mappings(start, end);
    }

    +/**
    + * vunmap_range_noflush - unmap kernel virtual addresses
    + * @addr: start of the VM area to unmap
    + * @end: end of the VM area to unmap (non-inclusive)
    + *
    + * Clears any present PTEs in the virtual address range, flushes TLBs and
    + * caches. Any subsequent access to the address before it has been re-mapped
    + * is a kernel bug.
    + */
    +void vunmap_range(unsigned long addr, unsigned long end)
    +{
    + flush_cache_vunmap(addr, end);
    + vunmap_range_noflush(addr, end);
    + flush_tlb_kernel_range(addr, end);
    +}
    +
    static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
    unsigned long end, pgprot_t prot, struct page **pages, int *nr,
    pgtbl_mod_mask *mask)
    @@ -1712,7 +1726,7 @@ static void free_vmap_area_noflush(struct vmap_area *va)
    static void free_unmap_vmap_area(struct vmap_area *va)
    {
    flush_cache_vunmap(va->va_start, va->va_end);
    - unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
    + vunmap_range_noflush(va->va_start, va->va_end);
    if (debug_pagealloc_enabled_static())
    flush_tlb_kernel_range(va->va_start, va->va_end);

    @@ -1990,7 +2004,7 @@ static void vb_free(unsigned long addr, unsigned long size)
    offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
    vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));

    - unmap_kernel_range_noflush(addr, size);
    + vunmap_range_noflush(addr, addr + size);

    if (debug_pagealloc_enabled_static())
    flush_tlb_kernel_range(addr, addr + size);
    @@ -2307,23 +2321,6 @@ void __init vmalloc_init(void)
    vmap_initialized = true;
    }

    -/**
    - * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
    - * @addr: start of the VM area to unmap
    - * @size: size of the VM area to unmap
    - *
    - * Similar to unmap_kernel_range_noflush() but flushes vcache before
    - * the unmapping and tlb after.
    - */
    -void unmap_kernel_range(unsigned long addr, unsigned long size)
    -{
    - unsigned long end = addr + size;
    -
    - flush_cache_vunmap(addr, end);
    - unmap_kernel_range_noflush(addr, size);
    - flush_tlb_kernel_range(addr, end);
    -}
    -
    static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
    struct vmap_area *va, unsigned long flags, const void *caller)
    {
    --
    2.23.0
    \
     
     \ /
      Last update: 2021-01-27 02:11    [W:6.186 / U:0.248 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site