lkml.org 
[lkml]   [2021]   [Aug]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 4/7] dma-iommu: fold _swiotlb helpers into callers
    Date
    From: David Stevens <stevensd@chromium.org>

    Fold the _swiotlb helper functions into the respective _page functions,
    since recent fixes have moved all logic from the _page functions to the
    _swiotlb functions.

    Signed-off-by: David Stevens <stevensd@chromium.org>
    ---
    drivers/iommu/dma-iommu.c | 136 +++++++++++++++++---------------------
    1 file changed, 60 insertions(+), 76 deletions(-)

    diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
    index 5dd2c517dbf5..f7da4934f7e6 100644
    --- a/drivers/iommu/dma-iommu.c
    +++ b/drivers/iommu/dma-iommu.c
    @@ -493,26 +493,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
    iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
    }

    -static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
    - size_t size, enum dma_data_direction dir,
    - unsigned long attrs)
    -{
    - struct iommu_domain *domain = iommu_get_dma_domain(dev);
    - phys_addr_t phys;
    -
    - phys = iommu_iova_to_phys(domain, dma_addr);
    - if (WARN_ON(!phys))
    - return;
    -
    - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
    - arch_sync_dma_for_cpu(phys, size, dir);
    -
    - __iommu_dma_unmap(dev, dma_addr, size);
    -
    - if (unlikely(is_swiotlb_buffer(phys)))
    - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
    -}
    -
    static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
    size_t size, int prot, u64 dma_mask)
    {
    @@ -539,55 +519,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
    return iova + iova_off;
    }

    -static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
    - size_t org_size, dma_addr_t dma_mask, bool coherent,
    - enum dma_data_direction dir, unsigned long attrs)
    -{
    - int prot = dma_info_to_prot(dir, coherent, attrs);
    - struct iommu_domain *domain = iommu_get_dma_domain(dev);
    - struct iommu_dma_cookie *cookie = domain->iova_cookie;
    - struct iova_domain *iovad = &cookie->iovad;
    - size_t aligned_size = org_size;
    - void *padding_start;
    - size_t padding_size;
    - dma_addr_t iova;
    -
    - /*
    - * If both the physical buffer start address and size are
    - * page aligned, we don't need to use a bounce page.
    - */
    - if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
    - iova_offset(iovad, phys | org_size)) {
    - aligned_size = iova_align(iovad, org_size);
    - phys = swiotlb_tbl_map_single(dev, phys, org_size,
    - aligned_size, dir, attrs);
    -
    - if (phys == DMA_MAPPING_ERROR)
    - return DMA_MAPPING_ERROR;
    -
    - /* Cleanup the padding area. */
    - padding_start = phys_to_virt(phys);
    - padding_size = aligned_size;
    -
    - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
    - (dir == DMA_TO_DEVICE ||
    - dir == DMA_BIDIRECTIONAL)) {
    - padding_start += org_size;
    - padding_size -= org_size;
    - }
    -
    - memset(padding_start, 0, padding_size);
    - }
    -
    - if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
    - arch_sync_dma_for_device(phys, org_size, dir);
    -
    - iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
    - if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
    - swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
    - return iova;
    -}
    -
    static void __iommu_dma_free_pages(struct page **pages, int count)
    {
    while (count--)
    @@ -848,15 +779,69 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
    {
    phys_addr_t phys = page_to_phys(page) + offset;
    bool coherent = dev_is_dma_coherent(dev);
    + int prot = dma_info_to_prot(dir, coherent, attrs);
    + struct iommu_domain *domain = iommu_get_dma_domain(dev);
    + struct iommu_dma_cookie *cookie = domain->iova_cookie;
    + struct iova_domain *iovad = &cookie->iovad;
    + size_t aligned_size = size;
    + dma_addr_t iova, dma_mask = dma_get_mask(dev);
    +
    + /*
    + * If both the physical buffer start address and size are
    + * page aligned, we don't need to use a bounce page.
    + */
    + if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
    + iova_offset(iovad, phys | size)) {
    + void *padding_start;
    + size_t padding_size;

    - return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
    - coherent, dir, attrs);
    + aligned_size = iova_align(iovad, size);
    + phys = swiotlb_tbl_map_single(dev, phys, size,
    + aligned_size, dir, attrs);
    +
    + if (phys == DMA_MAPPING_ERROR)
    + return DMA_MAPPING_ERROR;
    +
    + /* Cleanup the padding area. */
    + padding_start = phys_to_virt(phys);
    + padding_size = aligned_size;
    +
    + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
    + (dir == DMA_TO_DEVICE ||
    + dir == DMA_BIDIRECTIONAL)) {
    + padding_start += size;
    + padding_size -= size;
    + }
    +
    + memset(padding_start, 0, padding_size);
    + }
    +
    + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
    + arch_sync_dma_for_device(phys, size, dir);
    +
    + iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
    + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
    + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
    + return iova;
    }

    static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
    size_t size, enum dma_data_direction dir, unsigned long attrs)
    {
    - __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
    + struct iommu_domain *domain = iommu_get_dma_domain(dev);
    + phys_addr_t phys;
    +
    + phys = iommu_iova_to_phys(domain, dma_handle);
    + if (WARN_ON(!phys))
    + return;
    +
    + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
    + arch_sync_dma_for_cpu(phys, size, dir);
    +
    + __iommu_dma_unmap(dev, dma_handle, size);
    +
    + if (unlikely(is_swiotlb_buffer(phys)))
    + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
    }

    /*
    @@ -941,7 +926,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
    int i;

    for_each_sg(sg, s, nents, i)
    - __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
    + iommu_dma_unmap_page(dev, sg_dma_address(s),
    sg_dma_len(s), dir, attrs);
    }

    @@ -952,9 +937,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
    int i;

    for_each_sg(sg, s, nents, i) {
    - sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
    - s->length, dma_get_mask(dev),
    - dev_is_dma_coherent(dev), dir, attrs);
    + sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
    + s->offset, s->length, dir, attrs);
    if (sg_dma_address(s) == DMA_MAPPING_ERROR)
    goto out_unmap;
    sg_dma_len(s) = s->length;
    --
    2.33.0.rc1.237.g0d66db33f3-goog
    \
     
     \ /
      Last update: 2021-08-16 04:59    [W:3.138 / U:0.128 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site