Update DPDK version from 19.11 to 20.11 and also support hns3 PMD for Kunpeng 920 and Kunpeng 930. Signed-off-by: speech_white <humin29@huawei.com>
212 lines
5.9 KiB
Diff
212 lines
5.9 KiB
Diff
From 081a3335ccdc6cce70ab9cde9e5df87e2dcd591f Mon Sep 17 00:00:00 2001
|
|
From: Chengchang Tang <tangchengchang@huawei.com>
|
|
Date: Tue, 23 Mar 2021 21:45:56 +0800
|
|
Subject: [PATCH 072/189] net/hns3: fix long task queue pairs reset time
|
|
|
|
Currently, the queue reset process needs to be performed one by one,
|
|
which is inefficient. However, the queues reset in the same function is
|
|
almost at the same stage. To optimize the queue reset process, a new
|
|
function has been added to the firmware command HNS3_OPC_CFG_RST_TRIGGER
|
|
to reset all queues in the same function at a time. And the related
|
|
queue reset MBX message is adjusted in the same way too.
|
|
|
|
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
|
|
Cc: stable@dpdk.org
|
|
|
|
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
|
|
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
|
|
---
|
|
drivers/net/hns3/hns3_cmd.h | 8 ++-
|
|
drivers/net/hns3/hns3_rxtx.c | 125 ++++++++++++++++++++++++++++++++++++-------
|
|
2 files changed, 114 insertions(+), 19 deletions(-)
|
|
|
|
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
|
|
index e704d0c..30aca82 100644
|
|
--- a/drivers/net/hns3/hns3_cmd.h
|
|
+++ b/drivers/net/hns3/hns3_cmd.h
|
|
@@ -933,10 +933,16 @@ struct hns3_reset_tqp_queue_cmd {
|
|
|
|
#define HNS3_CFG_RESET_MAC_B 3
|
|
#define HNS3_CFG_RESET_FUNC_B 7
|
|
+#define HNS3_CFG_RESET_RCB_B 1
|
|
struct hns3_reset_cmd {
|
|
uint8_t mac_func_reset;
|
|
uint8_t fun_reset_vfid;
|
|
- uint8_t rsv[22];
|
|
+ uint8_t fun_reset_rcb;
|
|
+ uint8_t rsv1;
|
|
+ uint16_t fun_reset_rcb_vqid_start;
|
|
+ uint16_t fun_reset_rcb_vqid_num;
|
|
+ uint8_t fun_reset_rcb_return_status;
|
|
+ uint8_t rsv2[15];
|
|
};
|
|
|
|
#define HNS3_QUERY_DEV_SPECS_BD_NUM 4
|
|
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
|
|
index 626f91f..0596c9c 100644
|
|
--- a/drivers/net/hns3/hns3_rxtx.c
|
|
+++ b/drivers/net/hns3/hns3_rxtx.c
|
|
@@ -629,10 +629,6 @@ hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
|
|
uint64_t end;
|
|
int ret;
|
|
|
|
- ret = hns3_tqp_enable(hw, queue_id, false);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
/*
|
|
* In current version VF is not supported when PF is driven by DPDK
|
|
* driver, all task queue pairs are mapped to PF function, so PF's queue
|
|
@@ -679,11 +675,6 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
|
|
uint8_t msg_data[2];
|
|
int ret;
|
|
|
|
- /* Disable VF's queue before send queue reset msg to PF */
|
|
- ret = hns3_tqp_enable(hw, queue_id, false);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
memcpy(msg_data, &queue_id, sizeof(uint16_t));
|
|
|
|
ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
|
|
@@ -695,14 +686,105 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
|
|
}
|
|
|
|
static int
|
|
-hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
|
|
+hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
|
|
{
|
|
- struct hns3_hw *hw = &hns->hw;
|
|
+ struct hns3_reset_cmd *req;
|
|
+ struct hns3_cmd_desc desc;
|
|
+ int ret;
|
|
|
|
- if (hns->is_vf)
|
|
- return hns3vf_reset_tqp(hw, queue_id);
|
|
- else
|
|
- return hns3pf_reset_tqp(hw, queue_id);
|
|
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
|
|
+ req = (struct hns3_reset_cmd *)desc.data;
|
|
+ hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
|
|
+
|
|
+ /*
|
|
+ * The start qid should be the global qid of the first tqp of the
|
|
+ * function which should be reset in this port. Since our PF not
|
|
+ * support take over of VFs, so we only need to reset function 0,
|
|
+ * and its start qid is always 0.
|
|
+ */
|
|
+ req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
|
|
+ req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
|
|
+
|
|
+ ret = hns3_cmd_send(hw, &desc, 1);
|
|
+ if (ret) {
|
|
+ hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ *reset_status = req->fun_reset_rcb_return_status;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+hns3pf_reset_all_tqps(struct hns3_hw *hw)
|
|
+{
|
|
+#define HNS3_RESET_RCB_NOT_SUPPORT 0U
|
|
+#define HNS3_RESET_ALL_TQP_SUCCESS 1U
|
|
+ uint8_t reset_status;
|
|
+ int ret;
|
|
+ int i;
|
|
+
|
|
+ ret = hns3_reset_rcb_cmd(hw, &reset_status);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * If the firmware version is low, it may not support the rcb reset
|
|
+ * which means reset all the tqps at a time. In this case, we should
|
|
+ * reset tqps one by one.
|
|
+ */
|
|
+ if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
|
|
+ for (i = 0; i < hw->cfg_max_queues; i++) {
|
|
+ ret = hns3pf_reset_tqp(hw, i);
|
|
+ if (ret) {
|
|
+ hns3_err(hw,
|
|
+ "fail to reset tqp, queue_id = %d, ret = %d.",
|
|
+ i, ret);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
|
|
+ hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
|
|
+ reset_status);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+hns3vf_reset_all_tqps(struct hns3_hw *hw)
|
|
+{
|
|
+#define HNS3VF_RESET_ALL_TQP_DONE 1U
|
|
+ uint8_t reset_status;
|
|
+ uint8_t msg_data[2];
|
|
+ int ret;
|
|
+ int i;
|
|
+
|
|
+ memset(msg_data, 0, sizeof(uint16_t));
|
|
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
|
|
+ sizeof(msg_data), true, &reset_status,
|
|
+ sizeof(reset_status));
|
|
+ if (ret) {
|
|
+ hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * If the firmware version or kernel PF version is low, it may not
|
|
+ * support the rcb reset which means reset all the tqps at a time.
|
|
+ * In this case, we should reset tqps one by one.
|
|
+ */
|
|
+ for (i = 1; i < hw->cfg_max_queues; i++) {
|
|
+ ret = hns3vf_reset_tqp(hw, i);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
int
|
|
@@ -711,14 +793,21 @@ hns3_reset_all_tqps(struct hns3_adapter *hns)
|
|
struct hns3_hw *hw = &hns->hw;
|
|
int ret, i;
|
|
|
|
+ /* Disable all queues before reset all queues */
|
|
for (i = 0; i < hw->cfg_max_queues; i++) {
|
|
- ret = hns3_reset_tqp(hns, i);
|
|
+ ret = hns3_tqp_enable(hw, i, false);
|
|
if (ret) {
|
|
- hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
|
|
+ hns3_err(hw,
|
|
+ "fail to disable tqps before tqps reset, ret = %d.",
|
|
+ ret);
|
|
return ret;
|
|
}
|
|
}
|
|
- return 0;
|
|
+
|
|
+ if (hns->is_vf)
|
|
+ return hns3vf_reset_all_tqps(hw);
|
|
+ else
|
|
+ return hns3pf_reset_all_tqps(hw);
|
|
}
|
|
|
|
static int
|
|
--
|
|
2.7.4
|
|
|