Blame SOURCES/0001-net-qede-fix-MTU-set-and-max-Rx-length.patch

c7ffa4
From 9e334305178fd3715c17088632544bf58e5836a9 Mon Sep 17 00:00:00 2001
c7ffa4
From: Rasesh Mody <rasesh.mody@cavium.com>
c7ffa4
Date: Sat, 27 Jan 2018 13:15:30 -0800
c7ffa4
Subject: [PATCH] net/qede: fix MTU set and max Rx length
c7ffa4
c7ffa4
This patch fixes issues related to MTU set and max_rx_pkt_len usage.
c7ffa4
 - Adjust MTU during device configuration when jumbo is enabled
c7ffa4
c7ffa4
 - In qede_set_mtu():
c7ffa4
   Return not supported for VF as currently we do not support it.
c7ffa4
c7ffa4
   Cache new mtu value in mtu_new for proper update.
c7ffa4
c7ffa4
   Add check for RXQ allocation before calculating RX buffer size
c7ffa4
   if not allocated defer RX buffer size calculation till RXQ setup.
c7ffa4
c7ffa4
   Add check for before performing device start/stop.
c7ffa4
c7ffa4
 - Use max_rx_pkt_len appropriately
c7ffa4
c7ffa4
 - Change QEDE_ETH_OVERHEAD macro to adjust driver specifics
c7ffa4
c7ffa4
Fixes: 4c4bdadfa9e7 ("net/qede: refactoring multi-queue implementation")
c7ffa4
Fixes: 9a6d30ae6d46 ("net/qede: refactoring vport handling code")
c7ffa4
Fixes: 1ef4c3a5c1f7 ("net/qede: prevent crash while changing MTU dynamically")
c7ffa4
Cc: stable@dpdk.org
c7ffa4
c7ffa4
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
c7ffa4
---
c7ffa4
 drivers/net/qede/qede_ethdev.c | 63 ++++++++++++++++++++++++++++--------------
c7ffa4
 drivers/net/qede/qede_rxtx.c   |  6 ++--
c7ffa4
 drivers/net/qede/qede_rxtx.h   |  2 +-
c7ffa4
 3 files changed, 47 insertions(+), 24 deletions(-)
c7ffa4
c7ffa4
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
c7ffa4
index 323e8ed3b..895a0da61 100644
c7ffa4
--- a/drivers/net/qede/qede_ethdev.c
c7ffa4
+++ b/drivers/net/qede/qede_ethdev.c
c7ffa4
@@ -1414,18 +1414,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
c7ffa4
 			return -ENOMEM;
c7ffa4
 	}
c7ffa4
 
c7ffa4
+	/* If jumbo enabled adjust MTU */
c7ffa4
+	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
c7ffa4
+		eth_dev->data->mtu =
c7ffa4
+				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
c7ffa4
+				ETHER_HDR_LEN - ETHER_CRC_LEN;
c7ffa4
+
c7ffa4
 	/* VF's MTU has to be set using vport-start where as
c7ffa4
 	 * PF's MTU can be updated via vport-update.
c7ffa4
 	 */
c7ffa4
 	if (IS_VF(edev)) {
c7ffa4
-		if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
c7ffa4
+		if (qede_start_vport(qdev, eth_dev->data->mtu))
c7ffa4
 			return -1;
c7ffa4
 	} else {
c7ffa4
-		if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
c7ffa4
+		if (qede_update_mtu(eth_dev, eth_dev->data->mtu))
c7ffa4
 			return -1;
c7ffa4
 	}
c7ffa4
 
c7ffa4
-	qdev->mtu = rxmode->max_rx_pkt_len;
c7ffa4
+	qdev->mtu = eth_dev->data->mtu;
c7ffa4
 	qdev->new_mtu = qdev->mtu;
c7ffa4
 
c7ffa4
 	/* Enable VLAN offloads by default */
c7ffa4
@@ -2306,16 +2312,23 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
c7ffa4
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
c7ffa4
 	struct rte_eth_dev_info dev_info = {0};
c7ffa4
 	struct qede_fastpath *fp;
c7ffa4
+	uint32_t max_rx_pkt_len;
c7ffa4
 	uint32_t frame_size;
c7ffa4
 	uint16_t rx_buf_size;
c7ffa4
 	uint16_t bufsz;
c7ffa4
+	bool restart = false;
c7ffa4
 	int i;
c7ffa4
 
c7ffa4
 	PMD_INIT_FUNC_TRACE(edev);
c7ffa4
+	if (IS_VF(edev))
c7ffa4
+		return -ENOTSUP;
c7ffa4
 	qede_dev_info_get(dev, &dev_info);
c7ffa4
-	frame_size = mtu + QEDE_ETH_OVERHEAD;
c7ffa4
+	max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
c7ffa4
+	frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
c7ffa4
 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
c7ffa4
-		DP_ERR(edev, "MTU %u out of range\n", mtu);
c7ffa4
+		DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
c7ffa4
+		       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
c7ffa4
+			ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
c7ffa4
 		return -EINVAL;
c7ffa4
 	}
c7ffa4
 	if (!dev->data->scattered_rx &&
c7ffa4
@@ -2329,29 +2342,39 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
c7ffa4
 	 */
c7ffa4
 	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
c7ffa4
 	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
c7ffa4
-	qede_dev_stop(dev);
c7ffa4
+	if (dev->data->dev_started) {
c7ffa4
+		dev->data->dev_started = 0;
c7ffa4
+		qede_dev_stop(dev);
c7ffa4
+		restart = true;
c7ffa4
+	}
c7ffa4
 	rte_delay_ms(1000);
c7ffa4
-	qdev->mtu = mtu;
c7ffa4
+	qdev->new_mtu = mtu;
c7ffa4
 	/* Fix up RX buf size for all queues of the port */
c7ffa4
 	for_each_rss(i) {
c7ffa4
 		fp = &qdev->fp_array[i];
c7ffa4
-		bufsz = (uint16_t)rte_pktmbuf_data_room_size(
c7ffa4
-			fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
c7ffa4
-		if (dev->data->scattered_rx)
c7ffa4
-			rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
c7ffa4
-		else
c7ffa4
-			rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
c7ffa4
-		rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
c7ffa4
-		fp->rxq->rx_buf_size = rx_buf_size;
c7ffa4
-		DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
c7ffa4
-	}
c7ffa4
-	qede_dev_start(dev);
c7ffa4
-	if (frame_size > ETHER_MAX_LEN)
c7ffa4
+		if (fp->rxq != NULL) {
c7ffa4
+			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
c7ffa4
+				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
c7ffa4
+			if (dev->data->scattered_rx)
c7ffa4
+				rx_buf_size = bufsz + ETHER_HDR_LEN +
c7ffa4
+					      ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
c7ffa4
+			else
c7ffa4
+				rx_buf_size = frame_size;
c7ffa4
+			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
c7ffa4
+			fp->rxq->rx_buf_size = rx_buf_size;
c7ffa4
+			DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
c7ffa4
+		}
c7ffa4
+	}
c7ffa4
+	if (max_rx_pkt_len > ETHER_MAX_LEN)
c7ffa4
 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
c7ffa4
 	else
c7ffa4
 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
c7ffa4
+	if (!dev->data->dev_started && restart) {
c7ffa4
+		qede_dev_start(dev);
c7ffa4
+		dev->data->dev_started = 1;
c7ffa4
+	}
c7ffa4
 	/* update max frame size */
c7ffa4
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
c7ffa4
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
c7ffa4
 	/* Reassign back */
c7ffa4
 	dev->rx_pkt_burst = qede_recv_pkts;
c7ffa4
 	dev->tx_pkt_burst = qede_xmit_pkts;
c7ffa4
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
c7ffa4
index df248cf7e..810f0f394 100644
c7ffa4
--- a/drivers/net/qede/qede_rxtx.c
c7ffa4
+++ b/drivers/net/qede/qede_rxtx.c
c7ffa4
@@ -84,7 +84,6 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
c7ffa4
 	rxq->port_id = dev->data->port_id;
c7ffa4
 
c7ffa4
 	max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
c7ffa4
-	qdev->mtu = max_rx_pkt_len;
c7ffa4
 
c7ffa4
 	/* Fix up RX buffer size */
c7ffa4
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
c7ffa4
@@ -97,9 +96,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
c7ffa4
 	}
c7ffa4
 
c7ffa4
 	if (dev->data->scattered_rx)
c7ffa4
-		rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
c7ffa4
+		rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
c7ffa4
+				   ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
c7ffa4
 	else
c7ffa4
-		rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
c7ffa4
+		rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
c7ffa4
 	/* Align to cache-line size if needed */
c7ffa4
 	rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
c7ffa4
 
c7ffa4
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
c7ffa4
index 6214c97f3..f1d366613 100644
c7ffa4
--- a/drivers/net/qede/qede_rxtx.h
c7ffa4
+++ b/drivers/net/qede/qede_rxtx.h
c7ffa4
@@ -64,7 +64,7 @@
c7ffa4
 #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
c7ffa4
 					~(QEDE_FW_RX_ALIGN_END - 1))
c7ffa4
 /* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
c7ffa4
-#define QEDE_ETH_OVERHEAD	((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
c7ffa4
+#define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
c7ffa4
 				+ (QEDE_LLC_SNAP_HDR_LEN))
c7ffa4
 
c7ffa4
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
c7ffa4
-- 
c7ffa4
2.14.3
c7ffa4