lkml.org 
[lkml]   [2019]   [Jun]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH 5/7] dma-direct: handle DMA_ATTR_NON_CONSISTENT in common code
    From
    Date
    On 14.06.19 16:44, Christoph Hellwig wrote:
    > Only call into arch_dma_alloc if we require an uncached mapping,
    > and remove the parisc code manually doing normal cached
    > DMA_ATTR_NON_CONSISTENT allocations.
    >
    > Signed-off-by: Christoph Hellwig <hch@lst.de>

    Acked-by: Helge Deller <deller@gmx.de> # parisc

    Boot-tested 32-bit kernel on PCX-L and PCX-W2 machines (although
    the patches don't cleanly apply any longer against git head).

    Helge

    > ---
    > arch/parisc/kernel/pci-dma.c | 48 ++++++++++--------------------------
    > kernel/dma/direct.c | 4 +--
    > 2 files changed, 15 insertions(+), 37 deletions(-)
    >
    > diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
    > index 239162355b58..ca35d9a76e50 100644
    > --- a/arch/parisc/kernel/pci-dma.c
    > +++ b/arch/parisc/kernel/pci-dma.c
    > @@ -394,17 +394,20 @@ pcxl_dma_init(void)
    >
    > __initcall(pcxl_dma_init);
    >
    > -static void *pcxl_dma_alloc(struct device *dev, size_t size,
    > - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
    > +void *arch_dma_alloc(struct device *dev, size_t size,
    > + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
    > {
    > unsigned long vaddr;
    > unsigned long paddr;
    > int order;
    >
    > + if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
    > + return NULL;
    > +
    > order = get_order(size);
    > size = 1 << (order + PAGE_SHIFT);
    > vaddr = pcxl_alloc_range(size);
    > - paddr = __get_free_pages(flag | __GFP_ZERO, order);
    > + paddr = __get_free_pages(gfp | __GFP_ZERO, order);
    > flush_kernel_dcache_range(paddr, size);
    > paddr = __pa(paddr);
    > map_uncached_pages(vaddr, size, paddr);
    > @@ -421,44 +424,19 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
    > return (void *)vaddr;
    > }
    >
    > -static void *pcx_dma_alloc(struct device *dev, size_t size,
    > - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
    > -{
    > - void *addr;
    > -
    > - if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
    > - return NULL;
    > -
    > - addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
    > - if (addr)
    > - *dma_handle = (dma_addr_t)virt_to_phys(addr);
    > -
    > - return addr;
    > -}
    > -
    > -void *arch_dma_alloc(struct device *dev, size_t size,
    > - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
    > -{
    > -
    > - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
    > - return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
    > - else
    > - return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
    > -}
    > -
    > void arch_dma_free(struct device *dev, size_t size, void *vaddr,
    > dma_addr_t dma_handle, unsigned long attrs)
    > {
    > int order = get_order(size);
    >
    > - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
    > - size = 1 << (order + PAGE_SHIFT);
    > - unmap_uncached_pages((unsigned long)vaddr, size);
    > - pcxl_free_range((unsigned long)vaddr, size);
    > + WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
    > + boot_cpu_data.cpu_type != pcxl);
    >
    > - vaddr = __va(dma_handle);
    > - }
    > - free_pages((unsigned long)vaddr, get_order(size));
    > + size = 1 << (order + PAGE_SHIFT);
    > + unmap_uncached_pages((unsigned long)vaddr, size);
    > + pcxl_free_range((unsigned long)vaddr, size);
    > +
    > + free_pages((unsigned long)__va(dma_handle), order);
    > }
    >
    > void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
    > diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
    > index c2893713bf80..fc354f4f490b 100644
    > --- a/kernel/dma/direct.c
    > +++ b/kernel/dma/direct.c
    > @@ -191,7 +191,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
    > dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
    > {
    > if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
    > - !dev_is_dma_coherent(dev))
    > + dma_alloc_need_uncached(dev, attrs))
    > return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
    > return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
    > }
    > @@ -200,7 +200,7 @@ void dma_direct_free(struct device *dev, size_t size,
    > void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
    > {
    > if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
    > - !dev_is_dma_coherent(dev))
    > + dma_alloc_need_uncached(dev, attrs))
    > arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
    > else
    > dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
    >

    \
     
     \ /
      Last update: 2019-06-25 14:25    [W:3.010 / U:0.228 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site