lkml.org 
[lkml]   [2021]   [Nov]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 1/3] dma: swiotlb: Allow restricted-dma-pool to customize IO_TLB_SEGSIZE
    Date
    Default IO_TLB_SEGSIZE is 128, but some use cases requires more slabs.
    Otherwise swiotlb_find_slots() will fail.

    This patch allows each mem pool to decide their own io-tlb-segsize
    through dt property.

    Signed-off-by: Hsin-Yi Wang <hsinyi@chromium.org>
    ---
    include/linux/swiotlb.h | 1 +
    kernel/dma/swiotlb.c | 34 ++++++++++++++++++++++++++--------
    2 files changed, 27 insertions(+), 8 deletions(-)

    diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
    index 569272871375c4..73b3312f23e65b 100644
    --- a/include/linux/swiotlb.h
    +++ b/include/linux/swiotlb.h
    @@ -95,6 +95,7 @@ struct io_tlb_mem {
    unsigned long nslabs;
    unsigned long used;
    unsigned int index;
    + unsigned int io_tlb_segsize;
    spinlock_t lock;
    struct dentry *debugfs;
    bool late_alloc;
    diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
    index 8e840fbbed7c7a..021eef1844ca4c 100644
    --- a/kernel/dma/swiotlb.c
    +++ b/kernel/dma/swiotlb.c
    @@ -145,9 +145,10 @@ void swiotlb_print_info(void)
    (mem->nslabs << IO_TLB_SHIFT) >> 20);
    }

    -static inline unsigned long io_tlb_offset(unsigned long val)
    +static inline unsigned long io_tlb_offset(unsigned long val,
    + unsigned long io_tlb_segsize)
    {
    - return val & (IO_TLB_SEGSIZE - 1);
    + return val & (io_tlb_segsize - 1);
    }

    static inline unsigned long nr_slots(u64 val)
    @@ -186,13 +187,16 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
    mem->end = mem->start + bytes;
    mem->index = 0;
    mem->late_alloc = late_alloc;
    + if (!mem->io_tlb_segsize)
    + mem->io_tlb_segsize = IO_TLB_SEGSIZE;

    if (swiotlb_force == SWIOTLB_FORCE)
    mem->force_bounce = true;

    spin_lock_init(&mem->lock);
    for (i = 0; i < mem->nslabs; i++) {
    - mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
    + mem->slots[i].list = mem->io_tlb_segsize -
    + io_tlb_offset(i, mem->io_tlb_segsize);
    mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
    mem->slots[i].alloc_size = 0;
    }
    @@ -523,7 +527,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
    alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
    }
    for (i = index - 1;
    - io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
    + io_tlb_offset(i, mem->io_tlb_segsize) != mem->io_tlb_segsize - 1 &&
    mem->slots[i].list; i--)
    mem->slots[i].list = ++count;

    @@ -603,7 +607,7 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
    * with slots below and above the pool being returned.
    */
    spin_lock_irqsave(&mem->lock, flags);
    - if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
    + if (index + nslots < ALIGN(index + 1, mem->io_tlb_segsize))
    count = mem->slots[index + nslots].list;
    else
    count = 0;
    @@ -623,8 +627,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
    * available (non zero)
    */
    for (i = index - 1;
    - io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
    - i--)
    + io_tlb_offset(i, mem->io_tlb_segsize) != mem->io_tlb_segsize - 1 &&
    + mem->slots[i].list; i--)
    mem->slots[i].list = ++count;
    mem->used -= nslots;
    spin_unlock_irqrestore(&mem->lock, flags);
    @@ -701,7 +705,9 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,

    size_t swiotlb_max_mapping_size(struct device *dev)
    {
    - return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
    + struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
    +
    + return ((size_t)IO_TLB_SIZE) * mem->io_tlb_segsize;
    }

    bool is_swiotlb_active(struct device *dev)
    @@ -788,6 +794,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
    {
    struct io_tlb_mem *mem = rmem->priv;
    unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
    + struct device_node *np;

    /*
    * Since multiple devices can share the same pool, the private data,
    @@ -808,6 +815,17 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,

    set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
    rmem->size >> PAGE_SHIFT);
    +
    + np = of_find_node_by_phandle(rmem->phandle);
    + if (np) {
    + if (!of_property_read_u32(np, "io-tlb-segsize",
    + &mem->io_tlb_segsize)) {
    + if (hweight32(mem->io_tlb_segsize) != 1)
    + mem->io_tlb_segsize = IO_TLB_SEGSIZE;
    + }
    + of_node_put(np);
    + }
    +
    swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
    mem->force_bounce = true;
    mem->for_alloc = true;
    --
    2.34.0.rc2.393.gf8c9666880-goog
    \
     
     \ /
      Last update: 2021-11-23 12:22    [W:2.286 / U:0.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site