lkml.org 
[lkml]   [2018]   [Sep]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 13/21] dpaa_eth: fix iova handling for contiguous frames
    Date
    From: Laurentiu Tudor <laurentiu.tudor@nxp.com>

    The driver relies on the no longer valid assumption that dma addresses
    (iovas) are identical to physical addressees and uses phys_to_virt() to
    make iova -> vaddr conversions. Fix this by adding a function that does
    proper iova -> phys conversions using the iommu api and update the code
    to use it.
    Also, a dma_unmap_single() call had to be moved further down the code
    because iova -> vaddr conversions were required before the unmap.
    For now only the contiguous frame case is handled and the SG case is
    split in a following patch.
    While at it, clean-up a redundant dpaa_bpid2pool() and pass the bp
    as parameter.

    Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
    ---
    .../net/ethernet/freescale/dpaa/dpaa_eth.c | 44 ++++++++++---------
    1 file changed, 24 insertions(+), 20 deletions(-)

    diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
    index ac9e50c8a556..e9e081c3f8cc 100644
    --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
    +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
    @@ -50,6 +50,7 @@
    #include <linux/highmem.h>
    #include <linux/percpu.h>
    #include <linux/dma-mapping.h>
    +#include <linux/iommu.h>
    #include <linux/sort.h>
    #include <soc/fsl/bman.h>
    #include <soc/fsl/qman.h>
    @@ -1595,6 +1596,17 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
    return 0;
    }

    +static phys_addr_t dpaa_iova_to_phys(struct device *dev, dma_addr_t addr)
    +{
    + struct iommu_domain *domain;
    +
    + domain = iommu_get_domain_for_dev(dev);
    + if (domain)
    + return iommu_iova_to_phys(domain, addr);
    + else
    + return addr;
    +}
    +
    /* Cleanup function for outgoing frame descriptors that were built on Tx path,
    * either contiguous frames or scatter/gather ones.
    * Skb freeing is not handled here.
    @@ -1617,7 +1629,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
    int nr_frags, i;
    u64 ns;

    - skbh = (struct sk_buff **)phys_to_virt(addr);
    + skbh = (struct sk_buff **)phys_to_virt(dpaa_iova_to_phys(dev, addr));
    skb = *skbh;

    if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
    @@ -1687,25 +1699,21 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
    * accommodate the shared info area of the skb.
    */
    static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
    - const struct qm_fd *fd)
    + const struct qm_fd *fd,
    + struct dpaa_bp *dpaa_bp,
    + void *vaddr)
    {
    ssize_t fd_off = qm_fd_get_offset(fd);
    - dma_addr_t addr = qm_fd_addr(fd);
    - struct dpaa_bp *dpaa_bp;
    struct sk_buff *skb;
    - void *vaddr;

    - vaddr = phys_to_virt(addr);
    WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));

    - dpaa_bp = dpaa_bpid2pool(fd->bpid);
    - if (!dpaa_bp)
    - goto free_buffer;
    -
    skb = build_skb(vaddr, dpaa_bp->size +
    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
    - if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
    - goto free_buffer;
    + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) {
    + skb_free_frag(vaddr);
    + return NULL;
    + }
    WARN_ON(fd_off != priv->rx_headroom);
    skb_reserve(skb, fd_off);
    skb_put(skb, qm_fd_get_length(fd));
    @@ -1713,10 +1721,6 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
    skb->ip_summed = rx_csum_offload(priv, fd);

    return skb;
    -
    -free_buffer:
    - skb_free_frag(vaddr);
    - return NULL;
    }

    /* Build an skb with the data of the first S/G entry in the linear portion and
    @@ -2302,12 +2306,12 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
    if (!dpaa_bp)
    return qman_cb_dqrr_consume;

    - dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
    -
    /* prefetch the first 64 bytes of the frame or the SGT start */
    - vaddr = phys_to_virt(addr);
    + vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev, addr));
    prefetch(vaddr + qm_fd_get_offset(fd));

    + dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
    +
    /* The only FD types that we may receive are contig and S/G */
    WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));

    @@ -2318,7 +2322,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
    (*count_ptr)--;

    if (likely(fd_format == qm_fd_contig))
    - skb = contig_fd_to_skb(priv, fd);
    + skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
    else
    skb = sg_fd_to_skb(priv, fd);
    if (!skb)
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-09-19 14:38    [W:4.158 / U:0.444 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site