lkml.org 
[lkml]   [2022]   [Nov]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH net v2 3/3] net: nixge: fix tx queue handling
Date
Currently the driver check for available space at the beginning of
nixge_start_xmit(), and when there is not enough space for this packet,
it returns NETDEV_TX_OK, which casues packet loss and memory leak.

Instead the queue should be stopped after the packet is added to the BD
when there may not be enough space for next packet. In addition, the
queue should be wakeup only if there is enough space for a packet with
max frags.

Fixes: 492caffa8a1a ("net: ethernet: nixge: Add support for National Instruments XGE netdev")
Signed-off-by: Zhang Changzhong <zhangchangzhong@huawei.com>
---
drivers/net/ethernet/ni/nixge.c | 54 +++++++++++++++++++++++++++++------------
1 file changed, 38 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 91b7ebc..3776a03 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -457,6 +457,17 @@ static void nixge_tx_skb_unmap(struct nixge_priv *priv,
}
}

+static int nixge_check_tx_bd_space(struct nixge_priv *priv,
+ int num_frag)
+{
+ struct nixge_hw_dma_bd *cur_p;
+
+ cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
+ if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
static void nixge_start_xmit_done(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
@@ -488,19 +499,13 @@ static void nixge_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;

- if (packets)
- netif_wake_queue(ndev);
-}
-
-static int nixge_check_tx_bd_space(struct nixge_priv *priv,
- int num_frag)
-{
- struct nixge_hw_dma_bd *cur_p;
+ if (packets) {
+ /* Matches barrier in nixge_start_xmit */
+ smp_mb();

- cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
- return NETDEV_TX_BUSY;
- return 0;
+ if (!nixge_check_tx_bd_space(priv, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
}

static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
@@ -518,10 +523,15 @@ static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
tx_skb = &priv->tx_skb[priv->tx_bd_tail];

- if (nixge_check_tx_bd_space(priv, num_frag + 1)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_OK;
+ if (unlikely(nixge_check_tx_bd_space(priv, num_frag + 1))) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
+ netif_stop_queue(ndev);
+ if (net_ratelimit())
+ netdev_err(ndev, "BUG! TX Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
}

cur_phys = dma_map_single(ndev->dev.parent, skb->data,
@@ -572,6 +582,18 @@ static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
++priv->tx_bd_tail;
priv->tx_bd_tail %= TX_BD_NUM;

+ /* Stop queue if next transmit may not have space */
+ if (nixge_check_tx_bd_space(priv, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in nixge_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!nixge_check_tx_bd_space(priv, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
return NETDEV_TX_OK;
frag_err:
for (; ii > 0; ii--) {
--
2.9.5
\
 
 \ /
  Last update: 2022-11-15 15:53    [W:0.135 / U:1.364 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site