dpdk/0095-net-hns3-fix-queue-state-when-concurrent-with-reset.patch
speech_white 3a8995b1ad Update DPDK baseline version
Update DPDK version from 19.11 to 20.11 and also support
hns3 PMD for Kunpeng 920 and Kunpeng 930.

Signed-off-by: speech_white <humin29@huawei.com>
2021-06-28 00:52:34 +00:00

109 lines
3.3 KiB
Diff

From 94765554f123475e03c5900686b19c2227ce05ad Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Sat, 10 Apr 2021 09:11:19 +0800
Subject: [PATCH 095/189] net/hns3: fix queue state when concurrent with reset
At the end of the reset, the state of queues need to be restored
according to the states saved in the driver. If the start and stop
operations of the queues are concurrent at this time, it may cause the
final status to be uncertain.
This patch requires queues to acquire the hw lock before starting and
stopping. If the device is being restored due to reset at this time, it
will block until the reset is completed.
Fixes: fa29fe45a7b4 ("net/hns3: support queue start and stop")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index b864350..cbcfb0b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4287,10 +4287,12 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
if (ret) {
hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
@@ -4298,11 +4300,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (ret) {
hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_enable_rxq(rxq, true);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
@@ -4329,12 +4333,14 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_rxq(rxq, false);
hns3_rx_queue_release_mbufs(rxq);
hns3_reset_sw_rxq(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
@@ -4349,16 +4355,19 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
if (ret) {
hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
tx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_init_txq(txq);
hns3_enable_txq(txq, true);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
@@ -4372,6 +4381,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_txq(txq, false);
hns3_tx_queue_release_mbufs(txq);
/*
@@ -4383,6 +4393,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
*/
hns3_init_txq(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
--
2.7.4