lkml.org 
[lkml]   [2022]   [Jun]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.18 422/879] dma-direct: dont fail on highmem CMA pages in dma_direct_alloc_pages
    Date
    From: Christoph Hellwig <hch@lst.de>

    [ Upstream commit 92826e967535db2eb117db227b1191aaf98e4bb3 ]

    When dma_direct_alloc_pages encounters a highmem page it just gives up
    currently. But what we really should do is to try memory using the
    page allocator instead - without this platforms with a global highmem
    CMA pool will fail all dma_alloc_pages allocations.

    Fixes: efa70f2fdc84 ("dma-mapping: add a new dma_alloc_pages API")
    Reported-by: Mark O'Neill <mao@tumblingdice.co.uk>
    Signed-off-by: Christoph Hellwig <hch@lst.de>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    kernel/dma/direct.c | 27 ++++++++++-----------------
    1 file changed, 10 insertions(+), 17 deletions(-)

    diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
    index 9743c6ccce1a..3e7f4aab740e 100644
    --- a/kernel/dma/direct.c
    +++ b/kernel/dma/direct.c
    @@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
    }

    static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
    - gfp_t gfp)
    + gfp_t gfp, bool allow_highmem)
    {
    int node = dev_to_node(dev);
    struct page *page = NULL;
    @@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
    gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
    &phys_limit);
    page = dma_alloc_contiguous(dev, size, gfp);
    - if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
    - dma_free_contiguous(dev, page, size);
    - page = NULL;
    + if (page) {
    + if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
    + (!allow_highmem && PageHighMem(page))) {
    + dma_free_contiguous(dev, page, size);
    + page = NULL;
    + }
    }
    again:
    if (!page)
    @@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
    {
    struct page *page;

    - page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
    + page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
    if (!page)
    return NULL;

    @@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
    return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);

    /* we always manually zero the memory once we are done */
    - page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
    + page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
    if (!page)
    return NULL;

    @@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
    if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
    return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);

    - page = __dma_direct_alloc_pages(dev, size, gfp);
    + page = __dma_direct_alloc_pages(dev, size, gfp, false);
    if (!page)
    return NULL;
    - if (PageHighMem(page)) {
    - /*
    - * Depending on the cma= arguments and per-arch setup
    - * dma_alloc_contiguous could return highmem pages.
    - * Without remapping there is no way to return them here,
    - * so log an error and fail.
    - */
    - dev_info(dev, "Rejecting highmem page from CMA.\n");
    - goto out_free_pages;
    - }

    ret = page_address(page);
    if (dma_set_decrypted(dev, ret, size))
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-06-08 02:18    [W:3.849 / U:0.432 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site