lkml.org 
[lkml]   [2020]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH 10/10] xen/arm: call dma_to_phys on the dma_addr_t parameter of dma_cache_maint
From
Date


On 21/05/2020 00:45, Stefano Stabellini wrote:
> From: Stefano Stabellini <stefano.stabellini@xilinx.com>
>
> Add a struct device* parameter to dma_cache_maint.
>
> Translate the dma_addr_t parameter of dma_cache_maint by calling
> dma_to_phys. Do it for the first page and all the following pages, in
> case of multipage handling.

The term 'page' is confusing here. Are we referring to Xen page or Linux
page?

Also, same as patch #8 and #9 regarding the commit message.

>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> ---
> arch/arm/xen/mm.c | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
> index 7639251bcc79..6ddf3b3c1ab5 100644
> --- a/arch/arm/xen/mm.c
> +++ b/arch/arm/xen/mm.c
> @@ -43,15 +43,18 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
> static bool hypercall_cflush = false;
>
> /* buffers in highmem or foreign pages cannot cross page boundaries */
> -static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
> +static void dma_cache_maint(struct device *dev, dma_addr_t handle,
> + size_t size, u32 op)
> {
> struct gnttab_cache_flush cflush;
>
> - cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
> cflush.offset = xen_offset_in_page(handle);
> cflush.op = op;
> + handle &= XEN_PAGE_MASK;
>
> do {
> + cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
> +
> if (size + cflush.offset > XEN_PAGE_SIZE)
> cflush.length = XEN_PAGE_SIZE - cflush.offset;
> else
> @@ -60,7 +63,7 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
> HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
>
> cflush.offset = 0;
> - cflush.a.dev_bus_addr += cflush.length;
> + handle += cflush.length;
> size -= cflush.length;
> } while (size);
> }
> @@ -79,7 +82,7 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
> if (pfn_valid(PFN_DOWN(dma_to_phys(dev, handle))))
> arch_sync_dma_for_cpu(paddr, size, dir);
> else if (dir != DMA_TO_DEVICE)
> - dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
> + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
> }
>
> void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
> @@ -89,9 +92,9 @@ void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
> if (pfn_valid(PFN_DOWN(dma_to_phys(dev, handle))))
> arch_sync_dma_for_device(paddr, size, dir);
> else if (dir == DMA_FROM_DEVICE)
> - dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
> + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
> else
> - dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
> + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
> }
>
> bool xen_arch_need_swiotlb(struct device *dev,
>

Cheers,

--
Julien Grall

\
 
 \ /
  Last update: 2020-05-21 10:26    [W:1.164 / U:0.256 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site