Messages in this thread Patch in this message |  | | From | Shenwei Wang <> | Subject | [PATCH v5 2/3] net: fec: add xdp statistics | Date | Tue, 15 Nov 2022 14:49:50 -0600 |
| |
Add xdp statistics for ethtool stats and using u64 to record the xdp counters.
Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com> Reported-by: kernel test robot <lkp@intel.com> --- drivers/net/ethernet/freescale/fec.h | 15 +++++ drivers/net/ethernet/freescale/fec_main.c | 74 +++++++++++++++++++++-- 2 files changed, 83 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 61e847b18343..adbe661552be 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -526,6 +526,19 @@ struct fec_enet_priv_txrx_info { struct sk_buff *skb; }; +enum { + RX_XDP_REDIRECT = 0, + RX_XDP_PASS, + RX_XDP_DROP, + RX_XDP_TX, + RX_XDP_TX_ERRORS, + TX_XDP_XMIT, + TX_XDP_XMIT_ERRORS, + + /* The following must be the last one */ + XDP_STATS_TOTAL, +}; + struct fec_enet_priv_tx_q { struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; @@ -546,6 +559,8 @@ struct fec_enet_priv_rx_q { /* page_pool */ struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; + struct u64_stats_sync syncp; + u64 stats[XDP_STATS_TOTAL]; /* rx queue number, in the range 0-7 */ u8 id; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 616eea712ca8..bb2157022022 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1507,7 +1507,8 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, static u32 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, - struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, + u32 stats[], int index) { unsigned int sync, len = xdp->data_end - xdp->data; u32 ret = FEC_ENET_XDP_PASS; @@ -1523,10 +1524,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, switch (act) { case XDP_PASS: + stats[RX_XDP_PASS]++; ret = FEC_ENET_XDP_PASS; break; case XDP_REDIRECT: + stats[RX_XDP_REDIRECT]++; err = xdp_do_redirect(fep->netdev, xdp, prog); if (!err) { ret = FEC_ENET_XDP_REDIR; @@ -1549,6 +1552,7 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: + stats[RX_XDP_DROP]++; ret = FEC_ENET_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(rxq->page_pool, page, sync, true); @@ -1582,6 +1586,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); u32 ret, xdp_result = FEC_ENET_XDP_PASS; u32 data_start = FEC_ENET_XDP_HEADROOM; + u32 xdp_stats[XDP_STATS_TOTAL] = { 0 }; struct xdp_buff xdp; struct page *page; u32 sub_len = 4; @@ -1660,7 +1665,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* subtract 16bit shift and FCS */ xdp_prepare_buff(&xdp, page_address(page), data_start, pkt_len - sub_len, false); - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, + xdp_stats, index); xdp_result |= ret; if (ret != FEC_ENET_XDP_PASS) goto rx_processing_done; @@ -1762,6 +1768,15 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) if (xdp_result & FEC_ENET_XDP_REDIR) xdp_do_flush_map(); + if (xdp_prog) { + int i; + + u64_stats_update_begin(&rxq->syncp); + for (i = 0; i < XDP_STATS_TOTAL; i++) + rxq->stats[i] += xdp_stats[i]; + u64_stats_update_end(&rxq->syncp); + } + return pkt_received; } @@ -2701,6 +2716,16 @@ static const struct fec_stat { #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) +static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { + "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ + "rx_xdp_pass", /* RX_XDP_PASS, */ + "rx_xdp_drop", /* RX_XDP_DROP, */ + "rx_xdp_tx", /* RX_XDP_TX, */ + "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ + "tx_xdp_xmit", /* TX_XDP_XMIT, */ + "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ +}; + static void fec_enet_update_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@ -2710,6 +2735,26 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev) fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); } +static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) +{ + u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; + struct fec_enet_priv_rx_q *rxq; + unsigned int start; + int i, j; + + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + + do { + start = u64_stats_fetch_begin(&rxq->syncp); + for (j = 0; j < XDP_STATS_TOTAL; j++) + xdp_stats[j] += rxq->stats[j]; + } while (u64_stats_fetch_retry(&rxq->syncp, start)); + } + + memcpy(data, xdp_stats, sizeof(xdp_stats)); +} + static void fec_enet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -2719,6 +2764,10 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev, fec_enet_update_ethtool_stats(dev); memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); + data += FEC_STATS_SIZE / sizeof(u64); + + fec_enet_get_xdp_stats(fep, data); + data += XDP_STATS_TOTAL; } static void fec_enet_get_strings(struct net_device *netdev, @@ -2727,9 +2776,14 @@ static void fec_enet_get_strings(struct net_device *netdev, int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(fec_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, - fec_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { + memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { + strscpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } break; case ETH_SS_TEST: net_selftest_get_strings(data); @@ -2741,7 +2795,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(fec_stats); + return (ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL); case ETH_SS_TEST: return net_selftest_get_count(); default: @@ -2752,6 +2806,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static void fec_enet_clear_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_rx_q *rxq; int i; /* Disable MIB statistics counters */ @@ -2760,6 +2815,11 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev) for (i = 0; i < ARRAY_SIZE(fec_stats); i++) writel(0, fep->hwp + fec_stats[i].offset); + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + memset(rxq->stats, 0, sizeof(rxq->stats)); + } + /* Don't disable MIB statistics counters */ writel(0, fep->hwp + FEC_MIB_CTRLSTAT); } @@ -3126,6 +3186,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (i = 0; i < rxq->bd.ring_size; i++) page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); + memset(rxq->stats, 0, sizeof(rxq->stats)); + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) xdp_rxq_info_unreg(&rxq->xdp_rxq); page_pool_destroy(rxq->page_pool); -- 2.34.1
|  |