lkml.org 
[lkml]   [2020]   [Nov]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.9 043/391] powerpc/vmemmap: Fix memory leak with vmemmap list allocation failures.
    Date
    From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

    [ Upstream commit ccaea15296f9773abd43aaa17ee4b88848e4a505 ]

    If we fail to allocate vmemmap list, we don't keep track of allocated
    vmemmap block buf. Hence on section deactivate we skip vmemmap block
    buf free. This results in memory leak.

    Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
    Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
    Link: https://lore.kernel.org/r/20200731113500.248306-1-aneesh.kumar@linux.ibm.com
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/powerpc/mm/init_64.c | 35 ++++++++++++++++++++++++++++-------
    1 file changed, 28 insertions(+), 7 deletions(-)

    diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
    index 8459056cce671..2ae42c2a5cf04 100644
    --- a/arch/powerpc/mm/init_64.c
    +++ b/arch/powerpc/mm/init_64.c
    @@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
    return next++;
    }

    -static __meminit void vmemmap_list_populate(unsigned long phys,
    - unsigned long start,
    - int node)
    +static __meminit int vmemmap_list_populate(unsigned long phys,
    + unsigned long start,
    + int node)
    {
    struct vmemmap_backing *vmem_back;

    vmem_back = vmemmap_list_alloc(node);
    if (unlikely(!vmem_back)) {
    - WARN_ON(1);
    - return;
    + pr_debug("vmemap list allocation failed\n");
    + return -ENOMEM;
    }

    vmem_back->phys = phys;
    @@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
    vmem_back->list = vmemmap_list;

    vmemmap_list = vmem_back;
    + return 0;
    }

    static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
    @@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
    int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
    struct vmem_altmap *altmap)
    {
    + bool altmap_alloc;
    unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

    /* Align to the page size of the linear mapping. */
    @@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
    p = vmemmap_alloc_block_buf(page_size, node, altmap);
    if (!p)
    pr_debug("altmap block allocation failed, falling back to system memory");
    + else
    + altmap_alloc = true;
    }
    - if (!p)
    + if (!p) {
    p = vmemmap_alloc_block_buf(page_size, node, NULL);
    + altmap_alloc = false;
    + }
    if (!p)
    return -ENOMEM;

    - vmemmap_list_populate(__pa(p), start, node);
    + if (vmemmap_list_populate(__pa(p), start, node)) {
    + /*
    + * If we don't populate vmemap list, we don't have
    + * the ability to free the allocated vmemmap
    + * pages in section_deactivate. Hence free them
    + * here.
    + */
    + int nr_pfns = page_size >> PAGE_SHIFT;
    + unsigned long page_order = get_order(page_size);
    +
    + if (altmap_alloc)
    + vmem_altmap_free(altmap, nr_pfns);
    + else
    + free_pages((unsigned long)p, page_order);
    + return -ENOMEM;
    + }

    pr_debug(" * %016lx..%016lx allocated at %p\n",
    start, start + page_size, p);
    --
    2.27.0


    \
     
     \ /
      Last update: 2020-11-03 21:39    [W:4.635 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site