lkml.org 
[lkml]   [2022]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] net: stmmac: only enable DMA interrupts when ready
Date
In this driver's ->ndo_open() callback, it enables DMA interrupts,
starts the DMA channels, then requests interrupts with request_irq(),
and then finally enables napi.

If RX DMA interrupts are received before napi is enabled, no processing
is done because napi_schedule_prep() will return false. If the network
has a lot of broadcast/multicast traffic, then the RX ring could fill up
completely before napi is enabled. When this happens, no further RX
interrupts will be delivered, and the driver will fail to receive any
packets.

Fix this by only enabling DMA interrupts after all other initialization
is complete.

Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function")
Reported-by: Lars Persson <larper@axis.com>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 28 +++++++++++++++++--
1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6708ca2aa4f7..43978558d6c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2260,6 +2260,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}

+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2902,8 +2919,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);

/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }

/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3759,6 +3778,7 @@ static int stmmac_open(struct net_device *dev)

stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);

return 0;

@@ -6508,8 +6528,10 @@ int stmmac_xdp_open(struct net_device *dev)
}

/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }

/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6570,6 +6592,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);

return 0;

@@ -7447,6 +7470,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);

stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);

mutex_unlock(&priv->lock);
rtnl_unlock();
--
2.34.1
\
 
 \ /
  Last update: 2022-02-24 12:41    [W:1.394 / U:0.344 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site