lkml.org 
[lkml]   [2022]   [May]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v2 2/2] net: ti: icssg-prueth: Add ICSSG ethernet driver
From
Date
On Tue, 2022-05-31 at 15:21 +0530, Puranjay Mohan wrote:
[...]
> +static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
> + int budget)
> +{
> + struct net_device *ndev = emac->ndev;
> + struct cppi5_host_desc_t *desc_tx;
> + struct netdev_queue *netif_txq;
> + struct prueth_tx_chn *tx_chn;
> + unsigned int total_bytes = 0;
> + struct sk_buff *skb;
> + dma_addr_t desc_dma;
> + int res, num_tx = 0;
> + void **swdata;
> +
> + tx_chn = &emac->tx_chns[chn];
> +
> + while (budget--) {
> + res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
> + if (res == -ENODATA)
> + break;
> +
> + /* teardown completion */
> + if (cppi5_desc_is_tdcm(desc_dma)) {
> + if (atomic_dec_and_test(&emac->tdown_cnt))
> + complete(&emac->tdown_complete);
> + break;
> + }
> +
> + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
> + desc_dma);
> + swdata = cppi5_hdesc_get_swdata(desc_tx);
> +
> + skb = *(swdata);
> + prueth_xmit_free(tx_chn, desc_tx);
> +
> + ndev = skb->dev;
> + ndev->stats.tx_packets++;
> + ndev->stats.tx_bytes += skb->len;
> + total_bytes += skb->len;
> + napi_consume_skb(skb, budget);

The above is uncorrect. In this loop's last iteration 'budget' will be
0 and napi_consume_skb will wrongly assume the caller is not in NAPI
context.

> +static int prueth_dma_rx_push(struct prueth_emac *emac,
> + struct sk_buff *skb,
> + struct prueth_rx_chn *rx_chn)
> +{
> + struct cppi5_host_desc_t *desc_rx;
> + struct net_device *ndev = emac->ndev;
> + dma_addr_t desc_dma;
> + dma_addr_t buf_dma;
> + u32 pkt_len = skb_tailroom(skb);
> + void **swdata;
> +
> + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
> + if (!desc_rx) {
> + netdev_err(ndev, "rx push: failed to allocate descriptor\n");
> + return -ENOMEM;
> + }
> + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
> +
> + buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
> + if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
> + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
> + netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
> + return -EINVAL;
> + }
> +
> + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
> + PRUETH_NAV_PS_DATA_SIZE);
> + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
> + cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
> +
> + swdata = cppi5_hdesc_get_swdata(desc_rx);
> + *swdata = skb;
> +
> + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
> + desc_rx, desc_dma);
> +}
> +
> +static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
> +{
> + struct prueth_rx_chn *rx_chn = &emac->rx_chns;
> + struct net_device *ndev = emac->ndev;
> + struct cppi5_host_desc_t *desc_rx;
> + dma_addr_t desc_dma, buf_dma;
> + u32 buf_dma_len, pkt_len, port_id = 0;
> + int ret;
> + void **swdata;
> + struct sk_buff *skb, *new_skb;
> +
> + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
> + if (ret) {
> + if (ret != -ENODATA)
> + netdev_err(ndev, "rx pop: failed: %d\n", ret);
> + return ret;
> + }
> +
> + if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
> + return 0;
> +
> + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
> +
> + swdata = cppi5_hdesc_get_swdata(desc_rx);
> + skb = *swdata;
> +
> + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
> + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
> + pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
> + /* firmware adds 4 CRC bytes, strip them */
> + pkt_len -= 4;
> + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
> +
> + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
> + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
> +
> + skb->dev = ndev;
> + if (!netif_running(skb->dev)) {
> + dev_kfree_skb_any(skb);
> + return 0;
> + }
> +
> + new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
> + /* if allocation fails we drop the packet but push the
> + * descriptor back to the ring with old skb to prevent a stall
> + */
> + if (!new_skb) {
> + ndev->stats.rx_dropped++;
> + new_skb = skb;
> + } else {
> + /* send the filled skb up the n/w stack */
> + skb_put(skb, pkt_len);
> + skb->protocol = eth_type_trans(skb, ndev);
> + netif_receive_skb(skb);

This is (apparently) in napi context. You should use napi_gro_receive()
or napi_gro_frags()


Cheers!

Paolo

\
 
 \ /
  Last update: 2022-05-31 13:24    [W:0.134 / U:1.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site