!98 Update DPDK baseline version

From: @speech_white
Reviewed-by: @wu-changsheng,@MrRlu
Signed-off-by: @MrRlu
This commit is contained in:
openeuler-ci-bot 2021-06-29 12:26:32 +00:00 committed by Gitee
commit ff135bf193
229 changed files with 29255 additions and 4537 deletions

View File

@ -1,53 +0,0 @@
From 62729b425f3b3a9ccb53b7a57f3dcc0db76d039e Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:10:51 +0800
Subject: [PATCH] dpdk:
add-secure-compile-option-and-compile-with-fPIC-for-static-lib
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
lib/librte_eal/common/include/rte_log.h | 1 +
mk/rte.lib.mk | 1 +
mk/target/generic/rte.vars.mk | 2 ++
3 files changed, 4 insertions(+)
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 1bb0e66..6426ea2 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -311,6 +311,7 @@ int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
* - Negative on error.
*/
int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
+ __attribute__((weak))
__attribute__((format(printf,3,0)));
/**
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index 655a1b1..4516d1c 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -6,6 +6,7 @@ include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+CFLAGS += -fPIC
EXTLIB_BUILD ?= n
# VPATH contains at least SRCDIR
diff --git a/mk/target/generic/rte.vars.mk b/mk/target/generic/rte.vars.mk
index 3747221..bf3f4ff 100644
--- a/mk/target/generic/rte.vars.mk
+++ b/mk/target/generic/rte.vars.mk
@@ -75,6 +75,8 @@ ifeq ($(KERNELRELEASE),)
include $(RTE_SDK)/mk/rte.cpuflags.mk
# merge all CFLAGS
+CPU_CFLAGS += -fPIE -pie -fPIC -fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -Wall
+CPU_CFLAGS += -Wl,-z,relro,-z,now,-z,noexecstack -Wtrampolines
CFLAGS := $(CPU_CFLAGS) $(EXECENV_CFLAGS) $(TOOLCHAIN_CFLAGS) $(MACHINE_CFLAGS)
CFLAGS += $(TARGET_CFLAGS)
--
2.19.1

View File

@ -0,0 +1,396 @@
From 8124e9841e2563dc916d4c8b0fce83d1ae470b85 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Thu, 10 Dec 2020 20:48:42 +0800
Subject: [PATCH 001/189] net/hns3: adjust MAC address logging
Here the printing of MAC addresses is adjusted. After the
modification, only some bytes of the MAC address are
displayed.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 53 +++++++++++++++++++++++----------------
drivers/net/hns3/hns3_ethdev.h | 2 ++
drivers/net/hns3/hns3_ethdev_vf.c | 32 +++++++++++------------
3 files changed, 49 insertions(+), 38 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 2011378..d6d3f03 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -102,6 +102,15 @@ static int hns3_remove_mc_addr(struct hns3_hw *hw,
static int hns3_restore_fec(struct hns3_hw *hw);
static int hns3_query_dev_fec_info(struct rte_eth_dev *dev);
+void hns3_ether_format_addr(char *buf, uint16_t size,
+ const struct rte_ether_addr *ether_addr)
+{
+ snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
+ ether_addr->addr_bytes[0],
+ ether_addr->addr_bytes[4],
+ ether_addr->addr_bytes[5]);
+}
+
static void
hns3_pf_disable_irq0(struct hns3_hw *hw)
{
@@ -1449,7 +1458,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
/* check if mac addr is valid */
if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
mac_str);
@@ -1489,7 +1498,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
return -ENOSPC;
}
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
/* check if we just hit the duplicate */
if (ret == 0) {
@@ -1515,7 +1524,7 @@ hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
addr = &hw->mc_addrs[i];
/* Check if there are duplicate addresses */
if (rte_is_same_ether_addr(addr, mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to add mc mac addr, same addrs"
"(%s) is added by the set_mc_mac_addr_list "
@@ -1526,7 +1535,7 @@ hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
ret = hns3_add_mc_addr(hw, mac_addr);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
mac_str, ret);
@@ -1542,7 +1551,7 @@ hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
ret = hns3_remove_mc_addr(hw, mac_addr);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d",
mac_str, ret);
@@ -1576,7 +1585,7 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
if (ret) {
rte_spinlock_unlock(&hw->lock);
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
ret);
@@ -1599,7 +1608,7 @@ hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
/* check if mac addr is valid */
if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
mac_str);
@@ -1635,7 +1644,7 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
ret = hns3_remove_uc_addr_common(hw, mac_addr);
rte_spinlock_unlock(&hw->lock);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
ret);
@@ -1666,7 +1675,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
if (default_addr_setted) {
ret = hns3_remove_uc_addr_common(hw, oaddr);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
oaddr);
hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
mac_str, ret);
@@ -1677,7 +1686,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
ret = hns3_add_uc_addr_common(hw, mac_addr);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
goto err_add_uc_addr;
@@ -1699,7 +1708,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
err_pause_addr_cfg:
ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
if (ret_val) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_warn(hw,
"Failed to roll back to del setted mac addr(%s): %d",
@@ -1710,7 +1719,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
if (rm_succes) {
ret_val = hns3_add_uc_addr_common(hw, oaddr);
if (ret_val) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
oaddr);
hns3_warn(hw,
"Failed to restore old uc mac addr(%s): %d",
@@ -1746,7 +1755,7 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
if (ret) {
err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to %s mac addr(%s) index:%d "
"ret = %d.", del ? "remove" : "restore",
@@ -1795,7 +1804,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
/* Check if mac addr is valid */
if (!rte_is_multicast_ether_addr(mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
mac_str);
@@ -1823,7 +1832,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
if (ret) {
if (ret == -ENOSPC)
hns3_err(hw, "mc mac vlan table is full");
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
}
@@ -1842,7 +1851,7 @@ hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
/* Check if mac addr is valid */
if (!rte_is_multicast_ether_addr(mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
mac_str);
@@ -1870,7 +1879,7 @@ hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
}
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
}
@@ -1899,7 +1908,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
for (i = 0; i < nb_mc_addr; i++) {
addr = &mc_addr_set[i];
if (!rte_is_multicast_ether_addr(addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw,
"failed to set mc mac addr, addr(%s) invalid.",
@@ -1910,7 +1919,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
/* Check if there are duplicate addresses */
for (j = i + 1; j < nb_mc_addr; j++) {
if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
- rte_ether_format_addr(mac_str,
+ hns3_ether_format_addr(mac_str,
RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to set mc mac addr, "
@@ -1927,7 +1936,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) {
if (rte_is_same_ether_addr(addr,
&hw->data->mac_addrs[j])) {
- rte_ether_format_addr(mac_str,
+ hns3_ether_format_addr(mac_str,
RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to set mc mac addr, "
@@ -2101,7 +2110,7 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
ret = hns3_add_mc_addr(hw, addr);
if (ret) {
err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d",
del ? "Remove" : "Restore", mac_str, ret);
@@ -6160,7 +6169,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
rte_eth_random_addr(hw->mac.mac_addr);
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
(struct rte_ether_addr *)hw->mac.mac_addr);
hns3_warn(hw, "default mac_addr from firmware is an invalid "
"unicast address, using random MAC address %s",
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 4c40df1..31f78a1 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -935,6 +935,8 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
bool hns3_is_reset_pending(struct hns3_adapter *hns);
bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
void hns3_update_link_status(struct hns3_hw *hw);
+void hns3_ether_format_addr(char *buf, uint16_t size,
+ const struct rte_ether_addr *ether_addr);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 0366b9d..f09cabc 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -170,7 +170,7 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
RTE_ETHER_ADDR_LEN, false, NULL, 0);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
mac_str, ret);
@@ -190,7 +190,7 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
false, NULL, 0);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
mac_str, ret);
@@ -210,7 +210,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
addr = &hw->mc_addrs[i];
/* Check if there are duplicate addresses */
if (rte_is_same_ether_addr(addr, mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to add mc mac addr, same addrs"
"(%s) is added by the set_mc_mac_addr_list "
@@ -221,7 +221,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
ret = hns3vf_add_mc_mac_addr(hw, mac_addr);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
mac_str, ret);
@@ -256,7 +256,7 @@ hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
rte_spinlock_unlock(&hw->lock);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
ret);
@@ -283,7 +283,7 @@ hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
rte_spinlock_unlock(&hw->lock);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "failed to remove mac addr(%s), ret = %d",
mac_str, ret);
@@ -324,12 +324,12 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
* -EPREM to VF driver through mailbox.
*/
if (ret == -EPERM) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
old_addr);
hns3_warn(hw, "Has permanet mac addr(%s) for vf",
mac_str);
} else {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
mac_str, ret);
@@ -366,7 +366,7 @@ hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
if (ret) {
err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to %s mac addr(%s) index:%d "
"ret = %d.", del ? "remove" : "restore",
@@ -388,7 +388,7 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
NULL, 0);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
mac_str, ret);
@@ -409,7 +409,7 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
NULL, 0);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
mac_addr);
hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
mac_str, ret);
@@ -439,7 +439,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
for (i = 0; i < nb_mc_addr; i++) {
addr = &mc_addr_set[i];
if (!rte_is_multicast_ether_addr(addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw,
"failed to set mc mac addr, addr(%s) invalid.",
@@ -450,7 +450,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
/* Check if there are duplicate addresses */
for (j = i + 1; j < nb_mc_addr; j++) {
if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
- rte_ether_format_addr(mac_str,
+ hns3_ether_format_addr(mac_str,
RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to set mc mac addr, "
@@ -467,7 +467,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) {
if (rte_is_same_ether_addr(addr,
&hw->data->mac_addrs[j])) {
- rte_ether_format_addr(mac_str,
+ hns3_ether_format_addr(mac_str,
RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "failed to set mc mac addr, "
@@ -550,7 +550,7 @@ hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
ret = hns3vf_add_mc_mac_addr(hw, addr);
if (ret) {
err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
addr);
hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
del ? "Remove" : "Restore", mac_str, ret);
@@ -2468,7 +2468,7 @@ hns3vf_check_default_mac_change(struct hns3_hw *hw)
ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
if (!ret) {
rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
&hw->data->mac_addrs[0]);
hns3_warn(hw, "Default MAC address has been changed to:"
" %s by the host PF kernel ethdev driver",
--
2.7.4

View File

@ -1,35 +0,0 @@
From 94cc085f2890fefd1f91c38b245262c4da232e02 Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:31:31 +0800
Subject: [PATCH] dpdk: add secure option in makefile.
reason: add secure option in makefile.
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
mk/exec-env/linux/rte.vars.mk | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/mk/exec-env/linux/rte.vars.mk b/mk/exec-env/linux/rte.vars.mk
index bea3f76..6844281 100644
--- a/mk/exec-env/linux/rte.vars.mk
+++ b/mk/exec-env/linux/rte.vars.mk
@@ -11,10 +11,13 @@
#
# examples for RTE_EXEC_ENV: linux, freebsd
#
+
+SEC_FLAGS = -fstack-protector-all -Wall -Wl,-z,relro,-z,now -Wl,-z,noexecstack -Wtrampolines -fPIC
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
-EXECENV_CFLAGS = -pthread -fPIC
+EXECENV_CFLAGS = -pthread -fPIC $(SEC_FLAGS)
else
-EXECENV_CFLAGS = -pthread
+EXECENV_CFLAGS = -pthread $(SEC_FLAGS)
endif
# include in every library to build
--
2.19.1

View File

@ -0,0 +1,128 @@
From e435d9efcb10bc24f528aacee20a25061a7fb70f Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Thu, 10 Dec 2020 20:48:43 +0800
Subject: [PATCH 002/189] net/hns3: fix FEC state query
As FEC is not supported below 10 Gbps,
CMD(HNS3_OPC_CONFIG_FEC_MODE) offered from
Firmware read will return fail in 10 Gbps device.
This patch will prevent read this CMD when below 10 Gbps,
as this is non-sense.
Fixes: 9bf2ea8dbc65 ("net/hns3: support FEC")
Cc: stable@dpdk.org
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 40 ++++++++++++++++++++++++++--------------
drivers/net/hns3/hns3_ethdev.h | 2 ++
2 files changed, 28 insertions(+), 14 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index d6d3f03..7c34e38 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -100,7 +100,7 @@ static int hns3_add_mc_addr(struct hns3_hw *hw,
static int hns3_remove_mc_addr(struct hns3_hw *hw,
struct rte_ether_addr *mac_addr);
static int hns3_restore_fec(struct hns3_hw *hw);
-static int hns3_query_dev_fec_info(struct rte_eth_dev *dev);
+static int hns3_query_dev_fec_info(struct hns3_hw *hw);
void hns3_ether_format_addr(char *buf, uint16_t size,
const struct rte_ether_addr *ether_addr)
@@ -3010,13 +3010,6 @@ hns3_get_capability(struct hns3_hw *hw)
device_id == HNS3_DEV_ID_200G_RDMA)
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
- ret = hns3_query_dev_fec_info(eth_dev);
- if (ret) {
- PMD_INIT_LOG(ERR,
- "failed to query FEC information, ret = %d", ret);
- return ret;
- }
-
/* Get PCI revision id */
ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
HNS3_PCI_REVISION_ID);
@@ -3148,8 +3141,15 @@ hns3_get_configuration(struct hns3_hw *hw)
}
ret = hns3_get_board_configuration(hw);
- if (ret)
+ if (ret) {
PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
+ return ret;
+ }
+
+ ret = hns3_query_dev_fec_info(hw);
+ if (ret)
+ PMD_INIT_LOG(ERR,
+ "failed to query FEC information, ret = %d", ret);
return ret;
}
@@ -5797,6 +5797,16 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
struct hns3_cmd_desc desc;
int ret;
+ /*
+ * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
+ * in device of link speed
+ * below 10 Gbps.
+ */
+ if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+ *state = 0;
+ return 0;
+ }
+
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
req = (struct hns3_config_fec_cmd *)desc.data;
ret = hns3_cmd_send(hw, &desc, 1);
@@ -6003,14 +6013,14 @@ hns3_restore_fec(struct hns3_hw *hw)
}
static int
-hns3_query_dev_fec_info(struct rte_eth_dev *dev)
+hns3_query_dev_fec_info(struct hns3_hw *hw)
{
- struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
- struct hns3_pf *pf = &hns->pf;
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
+ struct rte_eth_dev *eth_dev = hns->eth_dev;
int ret;
- ret = hns3_fec_get(dev, &pf->fec_mode);
+ ret = hns3_fec_get(eth_dev, &pf->fec_mode);
if (ret)
hns3_err(hw, "query device FEC info failed, ret = %d", ret);
@@ -6096,6 +6106,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
+ hns->eth_dev = eth_dev;
+
eth_dev->process_private = (struct hns3_process_private *)
rte_zmalloc_socket("hns3_filter_list",
sizeof(struct hns3_process_private),
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 31f78a1..8d6b8cd 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -743,6 +743,8 @@ struct hns3_adapter {
struct hns3_vf vf;
};
+ struct rte_eth_dev *eth_dev;
+
bool rx_simple_allowed;
bool rx_vec_allowed;
bool tx_simple_allowed;
--
2.7.4

View File

@ -1,72 +0,0 @@
From dee3ff16473b956d8cfca15baa419e5dfdf47130 Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:14:25 +0800
Subject: [PATCH] dpdk: bugfix the deadlock in rte_eal_init when executes this
function concurrently
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
lib/librte_eal/linux/eal/eal.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index c4233ec..a3bb9c6 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -1128,7 +1128,7 @@ rte_eal_init(int argc, char **argv)
rte_eal_init_alert("Cannot get hugepage information.");
rte_errno = EACCES;
rte_atomic32_clear(&run_once);
- return -1;
+ goto out;
}
}
@@ -1152,7 +1152,7 @@ rte_eal_init(int argc, char **argv)
rte_eal_init_alert("Cannot init logging.");
rte_errno = ENOMEM;
rte_atomic32_clear(&run_once);
- return -1;
+ goto out;
}
#ifdef VFIO_PRESENT
@@ -1160,7 +1160,7 @@ rte_eal_init(int argc, char **argv)
rte_eal_init_alert("Cannot init VFIO");
rte_errno = EAGAIN;
rte_atomic32_clear(&run_once);
- return -1;
+ goto out;
}
#endif
/* in secondary processes, memory init may allocate additional fbarrays
@@ -1170,13 +1170,13 @@ rte_eal_init(int argc, char **argv)
if (rte_eal_memzone_init() < 0) {
rte_eal_init_alert("Cannot init memzone");
rte_errno = ENODEV;
- return -1;
+ goto out;
}
if (rte_eal_memory_init() < 0) {
rte_eal_init_alert("Cannot init memory");
rte_errno = ENOMEM;
- return -1;
+ goto out;
}
/* the directories are locked during eal_hugepage_info_init */
@@ -1297,6 +1297,10 @@ rte_eal_init(int argc, char **argv)
rte_option_init();
return fctret;
+
+out:
+ eal_hugedirs_unlock();
+ return -1;
}
static int
--
2.19.1

View File

@ -0,0 +1,64 @@
From 0114915c86b2cadefe4c0323f28868c4f11be2f2 Mon Sep 17 00:00:00 2001
From: Ruifeng Wang <ruifeng.wang@arm.com>
Date: Tue, 12 Jan 2021 02:57:05 +0000
Subject: [PATCH 003/189] net/hns3: fix build with SVE
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Building with SVE extension enabled stopped with error:
error: ACLE function svwhilelt_b64_s32 requires ISA extension sve
18 | #define PG64_256BIT svwhilelt_b64(0, 4)
This is caused by unintentional cflags reset.
Fixed the issue by not touching cflags, and using flags defined by
compiler.
Fixes: 952ebacce4f2 ("net/hns3: support SVE Rx")
Cc: stable@dpdk.org
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
drivers/net/hns3/hns3_rxtx.c | 4 ++--
drivers/net/hns3/meson.build | 1 -
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 88d3bab..5ac36b3 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -10,7 +10,7 @@
#include <rte_io.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
#include <rte_cpuflags.h>
#endif
@@ -2467,7 +2467,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
static bool
hns3_check_sve_support(void)
{
-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
return true;
#endif
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index 45cee34..5674d98 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -32,7 +32,6 @@ deps += ['hash']
if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
sources += files('hns3_rxtx_vec.c')
if cc.get_define('__ARM_FEATURE_SVE', args: machine_args) != ''
- cflags = ['-DCC_SVE_SUPPORT']
sources += files('hns3_rxtx_vec_sve.c')
endif
endif
--
2.7.4

View File

@ -1,73 +0,0 @@
From c2d29472c3ddd1b2d66f34ae4025c9e074913eaa Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:38:13 +0800
Subject: [PATCH] dpdk: master core donot set affinity in libstorage
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
lib/librte_eal/common/eal_private.h | 6 ++++++
lib/librte_eal/linux/eal/eal.c | 12 ++++++++++++
lib/librte_eal/linux/eal/eal_thread.c | 2 +-
3 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 8a9d493..597fd02 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -444,4 +444,10 @@ rte_option_usage(void);
uint64_t
eal_get_baseaddr(void);
+/**
+ * Determine whether the master core needs to set affinity.
+ * The master thread in the LibStorage application cannot set affinity.
+ **/
+bool
+eal_is_master_set_affinity(void);
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index a3bb9c6..8bb1842 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -103,6 +103,13 @@ static char runtime_dir[PATH_MAX];
static const char *default_runtime_dir = "/var/run";
+static bool master_set_affinity = true;
+bool
+eal_is_master_set_affinity(void)
+{
+ return master_set_affinity;
+}
+
int
eal_create_runtime_dir(void)
{
@@ -985,6 +992,11 @@ rte_eal_init(int argc, char **argv)
strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
thread_id = pthread_self();
+ /* Master thread don't set affinity in LibStorage application */
+ if (strstr(logid, "LibStorage") != NULL) {
+ master_set_affinity = false;
+ }
+
eal_reset_internal_config(&internal_config);
/* set log level as early as possible */
diff --git a/lib/librte_eal/linux/eal/eal_thread.c b/lib/librte_eal/linux/eal/eal_thread.c
index 379773b..5b06108 100644
--- a/lib/librte_eal/linux/eal/eal_thread.c
+++ b/lib/librte_eal/linux/eal/eal_thread.c
@@ -84,7 +84,7 @@ void eal_thread_init_master(unsigned lcore_id)
RTE_PER_LCORE(_lcore_id) = lcore_id;
/* set CPU affinity */
- if (eal_thread_set_affinity() < 0)
+ if (eal_is_master_set_affinity() && eal_thread_set_affinity() < 0)
rte_panic("cannot set affinity\n");
}
--
2.19.1

View File

@ -0,0 +1,37 @@
From 7a6944f354740866bc35cf716ce3979999b7396e Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Wed, 6 Jan 2021 11:46:27 +0800
Subject: [PATCH 004/189] net/hns3: fix interception with flow director
The rte_fdir_conf structure has deprecated and users need
to use the specified rule parameters of rte_flow structure
when configure a flow rule. As a result, it is incorrectly
used in the rte_flow API.
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index ee6ec15..f303df4 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1208,11 +1208,6 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Fdir not supported in VF");
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "fdir_conf.mode isn't perfect");
-
step_mngr.items = first_items;
step_mngr.count = ARRAY_SIZE(first_items);
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.7.4

View File

@ -1,28 +0,0 @@
From e970ca944126de31844a323b8e9e014ee2a9e128 Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:44:24 +0800
Subject: [PATCH] dpdk: change the log level in prepare_numa
reason: prevent rushing logs
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
lib/librte_eal/linux/eal/eal_memalloc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/librte_eal/linux/eal/eal_memalloc.c b/lib/librte_eal/linux/eal/eal_memalloc.c
index af6d0d0..cad4934 100644
--- a/lib/librte_eal/linux/eal/eal_memalloc.c
+++ b/lib/librte_eal/linux/eal/eal_memalloc.c
@@ -167,7 +167,7 @@ prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
if (get_mempolicy(oldpolicy, oldmask->maskp,
oldmask->size + 1, 0, 0) < 0) {
- RTE_LOG(ERR, EAL,
+ RTE_LOG(DEBUG, EAL,
"Failed to get current mempolicy: %s. "
"Assuming MPOL_DEFAULT.\n", strerror(errno));
*oldpolicy = MPOL_DEFAULT;
--
2.19.1

View File

@ -0,0 +1,85 @@
From 094b7303b76507e93d4b8e1eaa7e0c819362b67a Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 6 Jan 2021 11:46:28 +0800
Subject: [PATCH 005/189] net/hns3: fix xstats with id and names
Currently, validity check for ids and values in the
hns3_dev_xstats_get_by_id API is incorrect, which will
cause a problem. Namely, if the ID range of the xstats
stats item does not include the basic stats item, the
app can not obtain the corresponding xstats statistics
in hns3_dev_xstats_get_by_id.
Similarly, the hns3_dev_xstats_get_names_by_id interface
also has a problem.
Although the input parameter verification code cannot be
executed due to the implementation of the ethdev framework
interface, the driver needs to ensure the correctness of
the input parameters.
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.c | 24 ++++++++++++++++++++++--
1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 91168ac..1597af3 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -933,9 +933,13 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint32_t i;
int ret;
- if (ids == NULL || size < cnt_stats)
+ if (ids == NULL && values == NULL)
return cnt_stats;
+ if (ids == NULL)
+ if (size < cnt_stats)
+ return cnt_stats;
+
/* Update tqp stats by read register */
ret = hns3_update_tqp_stats(hw);
if (ret) {
@@ -957,6 +961,15 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
return -EINVAL;
}
+ if (ids == NULL && values != NULL) {
+ for (i = 0; i < cnt_stats; i++)
+ memcpy(&values[i], &values_copy[i].value,
+ sizeof(values[i]));
+
+ rte_free(values_copy);
+ return cnt_stats;
+ }
+
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
@@ -1005,9 +1018,16 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
uint64_t len;
uint32_t i;
- if (ids == NULL || xstats_names == NULL)
+ if (xstats_names == NULL)
return cnt_stats;
+ if (ids == NULL) {
+ if (size < cnt_stats)
+ return cnt_stats;
+
+ return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
+ }
+
len = cnt_stats * sizeof(struct rte_eth_xstat_name);
names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
if (names_copy == NULL) {
--
2.7.4

View File

@ -1,35 +0,0 @@
From a78efd329d52e1adf813eb1b76352c2680b75961 Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:49:53 +0800
Subject: [PATCH] dpdk: modification summary
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
lib/librte_eal/linux/eal/eal_interrupts.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal_interrupts.c b/lib/librte_eal/linux/eal/eal_interrupts.c
index 1955324..3d73cce 100644
--- a/lib/librte_eal/linux/eal/eal_interrupts.c
+++ b/lib/librte_eal/linux/eal/eal_interrupts.c
@@ -1070,7 +1070,7 @@ eal_intr_thread_main(__rte_unused void *arg)
*/
if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
&pipe_event) < 0) {
- rte_panic("Error adding fd to %d epoll_ctl, %s\n",
+ RTE_LOG(ERR, EAL, "Error adding fd to %d epoll_ctl, %s\n",
intr_pipe.readfd, strerror(errno));
}
numfds++;
@@ -1089,7 +1089,7 @@ eal_intr_thread_main(__rte_unused void *arg)
*/
if (epoll_ctl(pfd, EPOLL_CTL_ADD,
src->intr_handle.fd, &ev) < 0){
- rte_panic("Error adding fd %d epoll_ctl, %s\n",
+ RTE_LOG(ERR, EAL, "Error adding fd %d epoll_ctl, %s\n",
src->intr_handle.fd, strerror(errno));
}
else
--
2.19.1

View File

@ -0,0 +1,38 @@
From 8cbd6fc2895cdbb0a64c2d2a31e53ff4b0608752 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 6 Jan 2021 11:46:29 +0800
Subject: [PATCH 006/189] net/hns3: fix error code in xstats
The ethdev API has processed the failure to obtain
xstats statistics. Therefore, driver should return
an error code instead of 0 in 'hns3_dev_xstats_get'
API.
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 1597af3..42ec9b8 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -739,9 +739,9 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (!hns->is_vf) {
/* Update Mac stats */
ret = hns3_query_update_mac_stats(dev);
- if (ret) {
+ if (ret < 0) {
hns3_err(hw, "Update Mac stats fail : %d", ret);
- return 0;
+ return ret;
}
/* Get MAC stats from hw->hw_xstats.mac_stats struct */
--
2.7.4

View File

@ -1,26 +0,0 @@
From e7c97339d38f9d2655ca7834a99cc95b7427dd5c Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 19 Mar 2020 17:53:22 +0800
Subject: [PATCH] dpdk: add secure compile option in pmdinfogen Makefile
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
buildtools/pmdinfogen/Makefile | 2 ++
1 file changed, 2 insertions(+)
diff --git a/buildtools/pmdinfogen/Makefile b/buildtools/pmdinfogen/Makefile
index a97a764..af41c74 100644
--- a/buildtools/pmdinfogen/Makefile
+++ b/buildtools/pmdinfogen/Makefile
@@ -15,6 +15,8 @@ HOSTAPP = dpdk-pmdinfogen
SRCS-y += pmdinfogen.c
HOST_CFLAGS += $(HOST_WERROR_FLAGS) -g
+HOST_CFLAGS += -fPIE -fPIC -fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror
HOST_CFLAGS += -I$(RTE_OUTPUT)/include
+HOST_LDFLAGS += -Wl,-z,relro,-z,now -pie
include $(RTE_SDK)/mk/rte.hostapp.mk
--
2.19.1

View File

@ -0,0 +1,119 @@
From d799cf475d2d9b22264cef9c4447e48671e8d76a Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 6 Jan 2021 11:46:30 +0800
Subject: [PATCH 007/189] net/hns3: fix Rx/Tx errors stats
Abnormal errors stats in Rx/Tx datapath are statistics
items in driver, and displayed in xstats. They should
be cleared by the rte_eth_xstats_reset api, instead of
the rte_eth_stats_reset.
Fixes: c4b7d6761d01 ("net/hns3: get Tx abnormal errors in xstats")
Fixes: 521ab3e93361 ("net/hns3: add simple Rx path")
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.c | 59 ++++++++++++++++++++++++++++---------------
1 file changed, 39 insertions(+), 20 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 42ec9b8..62a712b 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -551,7 +551,6 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
struct hns3_hw *hw = &hns->hw;
struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint16_t i;
int ret;
@@ -581,29 +580,15 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
}
}
- /* Clear the Rx BD errors stats */
- for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) {
+ /*
+ * Clear soft stats of rx error packet which will be dropped
+ * in driver.
+ */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
rxq->pkt_len_errors = 0;
rxq->l2_errors = 0;
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
- }
- }
-
- /* Clear the Tx errors stats */
- for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) {
- txq = eth_dev->data->tx_queues[i];
- if (txq) {
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
}
}
@@ -1053,6 +1038,38 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
return size;
}
+static void
+hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
+{
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ int i;
+
+ /* Clear Rx dfx stats */
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq) {
+ rxq->l3_csum_errors = 0;
+ rxq->l4_csum_errors = 0;
+ rxq->ol3_csum_errors = 0;
+ rxq->ol4_csum_errors = 0;
+ }
+ }
+
+ /* Clear Tx dfx stats */
+ for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ txq = dev->data->tx_queues[i];
+ if (txq) {
+ txq->over_length_pkt_cnt = 0;
+ txq->exceed_limit_bd_pkt_cnt = 0;
+ txq->exceed_limit_bd_reassem_fail = 0;
+ txq->unsupported_tunnel_pkt_cnt = 0;
+ txq->queue_full_cnt = 0;
+ txq->pkt_padding_fail_cnt = 0;
+ }
+ }
+}
+
int
hns3_dev_xstats_reset(struct rte_eth_dev *dev)
{
@@ -1068,6 +1085,8 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
/* Clear reset stats */
memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
+ hns3_tqp_dfx_stats_clear(dev);
+
if (hns->is_vf)
return 0;
--
2.7.4

View File

@ -1,92 +0,0 @@
From 145e9a29777cc660bd031670a7aeb8a4d3cb88a8 Mon Sep 17 00:00:00 2001
From: zhuhengbo <zhuhengbo1@huawei.com>
Date: Thu, 30 Apr 2020 02:53:08 -0400
Subject: [PATCH] dpdk: fix cpu flag error in Intel(R) Xeon(R) CPU E5-2620 v3 @
2.40GHz
Signed-off-by: zhuhengbo <zhuhengbo1@huawei.com>
---
config/defconfig_x86_64-cpu_v2-linux-gcc | 1 +
config/defconfig_x86_64-cpu_v2-linuxapp-gcc | 14 ++++++++
mk/machine/cpu_v2/rte.vars.mk | 39 +++++++++++++++++++++
3 files changed, 54 insertions(+)
create mode 120000 config/defconfig_x86_64-cpu_v2-linux-gcc
create mode 100644 config/defconfig_x86_64-cpu_v2-linuxapp-gcc
create mode 100644 mk/machine/cpu_v2/rte.vars.mk
diff --git a/config/defconfig_x86_64-cpu_v2-linux-gcc b/config/defconfig_x86_64-cpu_v2-linux-gcc
new file mode 120000
index 0000000..64f21b6
--- /dev/null
+++ b/config/defconfig_x86_64-cpu_v2-linux-gcc
@@ -0,0 +1 @@
+defconfig_x86_64-cpu_v2-linuxapp-gcc
\ No newline at end of file
diff --git a/config/defconfig_x86_64-cpu_v2-linuxapp-gcc b/config/defconfig_x86_64-cpu_v2-linuxapp-gcc
new file mode 100644
index 0000000..2748e30
--- /dev/null
+++ b/config/defconfig_x86_64-cpu_v2-linuxapp-gcc
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+#include "common_linux"
+
+CONFIG_RTE_MACHINE="cpu_v2"
+
+CONFIG_RTE_ARCH="x86_64"
+CONFIG_RTE_ARCH_X86_64=y
+CONFIG_RTE_ARCH_X86=y
+CONFIG_RTE_ARCH_64=y
+
+CONFIG_RTE_TOOLCHAIN="gcc"
+CONFIG_RTE_TOOLCHAIN_GCC=y
diff --git a/mk/machine/cpu_v2/rte.vars.mk b/mk/machine/cpu_v2/rte.vars.mk
new file mode 100644
index 0000000..ffa7d3f
--- /dev/null
+++ b/mk/machine/cpu_v2/rte.vars.mk
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+#
+# machine:
+#
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=core-avx-i
+
+# On FreeBSD systems, sometimes the correct CPU type is not picked up.
+# To get everything to compile, we need SSE4.2 support, so check if that is
+# reported by compiler. If not, check if the CPU actually supports it, and if
+# so, set the compilation target to be a corei7, minimum target with SSE4.2.
+SSE42_SUPPORT=$(shell $(CC) -march=native -dM -E - </dev/null | grep SSE4_2)
+ifeq ($(SSE42_SUPPORT),)
+ MACHINE_CFLAGS = -march=corei7
+endif
--
2.19.1

View File

@ -0,0 +1,102 @@
From 8e6c3a65eed79da35189319577235f43a7423612 Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Wed, 6 Jan 2021 11:46:31 +0800
Subject: [PATCH 008/189] net/hns3: fix crash with multi-process
In current version, procedure of saving eth_dev in
hns3 PMD init will be called more than twice, one
for primary, the other for secondary. That will cause
segmentation fault in Multi-process as eth_dev will
be changed in secondary process, which is different
from one in primary process.
The initial problem was access to 'rte_eth_devices'
global variable, which is wrong. But current approach
can cause problem for the secondaries, moving 'eth_dev'
to process private can work but before making things
more complex.
This patch deserted the procedure of saving eth_dev in
hns3 PMD init. Instead, it creates an internal function
that gets "struct hns3_hw" as parameter and it can be
called internally without knowing 'eth_dev'and the
.dev_ops can be wrapper to this.
Fixes: 2390bf217f4d ("net/hns3: fix FEC state query")
Cc: stable@dpdk.org
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 16 ++++++++++------
drivers/net/hns3/hns3_ethdev.h | 2 --
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7c34e38..90544fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5821,10 +5821,9 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
}
static int
-hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
{
#define QUERY_ACTIVE_SPEED 1
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_sfp_speed_cmd *resp;
uint32_t tmp_fec_capa;
uint8_t auto_state;
@@ -5885,6 +5884,14 @@ hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
}
static int
+hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return hns3_fec_get_internal(hw, fec_capa);
+}
+
+static int
hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
{
struct hns3_config_fec_cmd *req;
@@ -6017,10 +6024,9 @@ hns3_query_dev_fec_info(struct hns3_hw *hw)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
- struct rte_eth_dev *eth_dev = hns->eth_dev;
int ret;
- ret = hns3_fec_get(eth_dev, &pf->fec_mode);
+ ret = hns3_fec_get_internal(hw, &pf->fec_mode);
if (ret)
hns3_err(hw, "query device FEC info failed, ret = %d", ret);
@@ -6106,8 +6112,6 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- hns->eth_dev = eth_dev;
-
eth_dev->process_private = (struct hns3_process_private *)
rte_zmalloc_socket("hns3_filter_list",
sizeof(struct hns3_process_private),
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 8d6b8cd..31f78a1 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -743,8 +743,6 @@ struct hns3_adapter {
struct hns3_vf vf;
};
- struct rte_eth_dev *eth_dev;
-
bool rx_simple_allowed;
bool rx_vec_allowed;
bool tx_simple_allowed;
--
2.7.4

View File

@ -1,194 +0,0 @@
From bca7c742a8f956212c5ad9661b602676c71b7028 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:24:55 +0800
Subject: [PATCH] dpdk-support-gazelle-01-include
---
lib/librte_eal/common/include/rte_eal.h | 10 ++-
lib/librte_eal/common/include/rte_fbarray.h | 7 ++
lib/librte_eal/common/include/rte_memory.h | 20 +++++-
lib/librte_ring/rte_ring.h | 75 +++++++++++++++++++++
4 files changed, 108 insertions(+), 4 deletions(-)
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index 2f9ed29..ac1dc1d 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -485,9 +485,17 @@ rte_eal_mbuf_user_pool_ops(void);
* @return
* The runtime directory path of DPDK
*/
-const char *
+char *
rte_eal_get_runtime_dir(void);
+/****** APIs for libnet ******/
+char *rte_eal_sec_get_runtime_dir(const int sec_idx);
+struct rte_config *rte_eal_sec_get_configuration(const int sec_idx);
+struct internal_config *rte_eal_sec_get_internal_config(const int sec_idx);
+
+int rte_eal_sec_attach(int argc, char **argv);
+int rte_eal_sec_detach(const char *file_prefix, int length);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_fbarray.h b/lib/librte_eal/common/include/rte_fbarray.h
index 6dccdbe..dffee1e 100644
--- a/lib/librte_eal/common/include/rte_fbarray.h
+++ b/lib/librte_eal/common/include/rte_fbarray.h
@@ -101,6 +101,10 @@ __rte_experimental
int
rte_fbarray_attach(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx);
+
/**
* Deallocate resources for an already allocated and correctly set up
@@ -123,6 +127,9 @@ __rte_experimental
int
rte_fbarray_destroy(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr,
+ const int sec_idx);
/**
* Deallocate resources for an already allocated and correctly set up
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 3d8d0bd..4dd6daa 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -152,7 +152,12 @@ rte_mem_iova2virt(rte_iova_t iova);
__rte_experimental
struct rte_memseg *
rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
-
+/*
+__rte_experimental
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+*/
/**
* Get memseg list corresponding to virtual memory address.
*
@@ -164,7 +169,11 @@ rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
__rte_experimental
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *virt);
-
+/*
+__rte_experimental
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+*/
/**
* Memseg walk function prototype.
*
@@ -282,7 +291,12 @@ rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
__rte_experimental
int
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
-
+/*
+__rte_experimental
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+*/
/**
* Walk each VA-contiguous area without performing any locking.
*
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 2a9f768..0eb3a48 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -953,6 +953,81 @@ rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
r->cons.single, available);
}
+/****** APIs for libnet ******/
+static __rte_always_inline unsigned
+rte_ring_cn_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_head = r->prod.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->cons.head - old_head;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ r->prod.head = old_head + n;
+ rte_smp_rmb();
+
+ DEQUEUE_PTRS(r, &r[1], old_head, obj_table, n, void *);
+ return n;
+}
+
+static __rte_always_inline void
+rte_ring_cn_enqueue(struct rte_ring *r)
+{
+ rte_smp_wmb();
+ r->prod.tail = r->prod.head;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_tail = r->cons.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->prod.tail - old_tail;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ const uint32_t new_tail = old_tail + n;
+ rte_smp_rmb();
+
+ DEQUEUE_PTRS(r, &r[1], old_tail, obj_table, n, void *);
+ rte_smp_rmb();
+
+ r->cons.tail = new_tail;
+ return n;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_enqueue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t capacity = r->capacity;
+ const uint32_t old_head = r->cons.head;
+ rte_smp_rmb();
+
+ const uint32_t entries = capacity + r->cons.tail - old_head;
+ if (n > entries) {
+ return 0;
+ }
+
+ const uint32_t new_head = old_head + n;
+ rte_smp_rmb();
+
+ ENQUEUE_PTRS(r, &r[1], old_head, obj_table, n, void *);
+ rte_smp_wmb();
+
+ r->cons.head = new_head;
+ return n;
+}
+
#ifdef __cplusplus
}
#endif
--
2.23.0

View File

@ -1,189 +0,0 @@
From 8b468249a77d0dc7af6732bcbc4881dba388135d Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:25:39 +0800
Subject: [PATCH] dpdk-support-gazelle-02-include-base
---
config/common_base | 3 +-
lib/librte_eal/common/eal_filesystem.h | 56 ++++++++++++++++++++++----
lib/librte_eal/common/eal_private.h | 25 +++++++++++-
3 files changed, 74 insertions(+), 10 deletions(-)
diff --git a/config/common_base b/config/common_base
index 7dec7ed..57b1349 100644
--- a/config/common_base
+++ b/config/common_base
@@ -95,7 +95,8 @@ CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768
CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072
# global maximum usable amount of VA, in megabytes
CONFIG_RTE_MAX_MEM_MB=524288
-CONFIG_RTE_MAX_MEMZONE=2560
+CONFIG_RTE_MAX_MEMZONE=65535
+CONFIG_RTE_MAX_SECONDARY=256
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_ENABLE_ASSERT=n
CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
diff --git a/lib/librte_eal/common/eal_filesystem.h b/lib/librte_eal/common/eal_filesystem.h
index 5d21f07..e65a183 100644
--- a/lib/librte_eal/common/eal_filesystem.h
+++ b/lib/librte_eal/common/eal_filesystem.h
@@ -23,7 +23,8 @@
/* sets up platform-specific runtime data dir */
int
-eal_create_runtime_dir(void);
+eal_create_runtime_dir(char *runtime_dir, const int buflen,
+ const struct internal_config *conf);
int
eal_clean_runtime_dir(void);
@@ -34,15 +35,27 @@ eal_get_hugefile_prefix(void);
#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
-eal_runtime_config_path(void)
+__eal_runtime_config_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
RUNTIME_CONFIG_FNAME);
return buffer;
}
+static inline const char *
+eal_runtime_config_path(void)
+{
+ return __eal_runtime_config_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_runtime_config_path(const char *runtime_dir)
+{
+ return __eal_runtime_config_path(runtime_dir);
+}
+
/** Path of primary/secondary communication unix socket file. */
#define MP_SOCKET_FNAME "mp_socket"
static inline const char *
@@ -57,12 +70,29 @@ eal_mp_socket_path(void)
#define FBARRAY_NAME_FMT "%s/fbarray_%s"
static inline const char *
-eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
- snprintf(buffer, buflen, FBARRAY_NAME_FMT, rte_eal_get_runtime_dir(),
+__eal_get_fbarray_path(char *buffer, size_t buflen, const char *name,
+ const char *runtime_dir)
+{
+ snprintf(buffer, buflen, FBARRAY_NAME_FMT, runtime_dir,
name);
return buffer;
}
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_get_fbarray_path(char *buffer, size_t buflen,
+ const char *name, const char *runtime_dir)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ runtime_dir);
+}
+
/** Path of hugepage info file. */
#define HUGEPAGE_INFO_FNAME "hugepage_info"
static inline const char *
@@ -78,15 +108,27 @@ eal_hugepage_info_path(void)
/** Path of hugepage data file. */
#define HUGEPAGE_DATA_FNAME "hugepage_data"
static inline const char *
-eal_hugepage_data_path(void)
+__eal_hugepage_data_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
HUGEPAGE_DATA_FNAME);
return buffer;
}
+static inline const char *
+eal_hugepage_data_path(void)
+{
+ return __eal_hugepage_data_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_hugepage_data_path(const char *runtime_dir)
+{
+ return __eal_hugepage_data_path(runtime_dir);
+}
+
/** String format for hugepage map files. */
#define HUGEFILE_FMT "%s/%smap_%d"
static inline const char *
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 597fd02..1fd32a9 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -113,7 +113,8 @@ int rte_eal_cpu_init(void);
* @return
* 0 on success, negative on error
*/
-int rte_eal_memseg_init(void);
+//int rte_eal_memseg_init(void);
+int rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx);
/**
* Map memory
@@ -127,6 +128,9 @@ int rte_eal_memseg_init(void);
*/
int rte_eal_memory_init(void);
+int rte_eal_sec_memory_init(const int sec_idx);
+int rte_eal_sec_memory_cleanup(const int sec_idx);
+
/**
* Configure timers
*
@@ -291,7 +295,8 @@ int rte_eal_hugepage_init(void);
*
* This function is private to the EAL.
*/
-int rte_eal_hugepage_attach(void);
+//int rte_eal_hugepage_attach(void);
+int rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx);
/**
* Find a bus capable of identifying a device.
@@ -450,4 +455,20 @@ eal_get_baseaddr(void);
**/
bool
eal_is_master_set_affinity(void);
+
+
+/****** APIs for libnet ******/
+#include <rte_memory.h>
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+
#endif /* _EAL_PRIVATE_H_ */
--
2.23.0

View File

@ -1,179 +0,0 @@
From 482dc8035cfae1c792ca1fa1a2d79e7ea938ee14 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:26:22 +0800
Subject: [PATCH] dpdk-support-gazelle-03-memory
---
lib/librte_eal/common/eal_common_memory.c | 88 ++++++++++++++++++-----
lib/librte_eal/common/eal_memalloc.h | 7 ++
2 files changed, 79 insertions(+), 16 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 4a9cc1f..842fc9b 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -206,9 +206,9 @@ virt2memseg(const void *addr, const struct rte_memseg_list *msl)
}
static struct rte_memseg_list *
-virt2memseg_list(const void *addr)
+virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
struct rte_memseg_list *msl;
int msl_idx;
@@ -230,7 +230,13 @@ virt2memseg_list(const void *addr)
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *addr)
{
- return virt2memseg_list(addr);
+ return virt2memseg_list(addr, rte_eal_get_configuration());
+}
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
+{
+ return virt2memseg_list(addr, rte_cfg);
}
struct virtiova {
@@ -283,11 +289,25 @@ rte_mem_iova2virt(rte_iova_t iova)
return vi.virt;
}
+static struct rte_memseg *
+__rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return virt2memseg(addr, msl != NULL ? msl :
+ rte_sec_mem_virt2memseg_list(addr, rte_cfg));
+}
+
struct rte_memseg *
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
- return virt2memseg(addr, msl != NULL ? msl :
- rte_mem_virt2memseg_list(addr));
+ return __rte_mem_virt2memseg(addr, msl, rte_eal_get_configuration());
+}
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return __rte_mem_virt2memseg(addr, msl, rte_cfg);
}
static int
@@ -889,10 +909,14 @@ rte_extmem_detach(void *va_addr, size_t len)
}
/* init memory subsystem */
-int
-rte_eal_memory_init(void)
+static int
+__rte_eal_memory_init(__attribute__((__unused__)) const char *runtime_dir,
+ const struct internal_config *internal_cfg,
+ struct rte_config *rte_cfg,
+ const int switch_pri_and_sec,
+ const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
int retval;
RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
@@ -900,25 +924,57 @@ rte_eal_memory_init(void)
return -1;
/* lock mem hotplug here, to prevent races while we init */
- rte_mcfg_mem_read_lock();
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
- if (rte_eal_memseg_init() < 0)
+ if (rte_eal_memseg_init(switch_pri_and_sec, sec_idx) < 0)
goto fail;
- if (eal_memalloc_init() < 0)
- goto fail;
+ if (!internal_cfg->pri_and_sec)
+ if (eal_memalloc_init() < 0)
+ goto fail;
- retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ retval = rte_cfg->process_type == RTE_PROC_PRIMARY ?
rte_eal_hugepage_init() :
- rte_eal_hugepage_attach();
+ rte_eal_hugepage_attach(switch_pri_and_sec, sec_idx);
if (retval < 0)
goto fail;
- if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+ if (internal_cfg->no_shconf == 0 && rte_eal_memdevice_init() < 0)
goto fail;
return 0;
fail:
- rte_mcfg_mem_read_unlock();
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return -1;
}
+
+int
+rte_eal_memory_init(void)
+{
+ const int unused_idx = -1;
+
+ return __rte_eal_memory_init(rte_eal_get_runtime_dir(),
+ &internal_config, rte_eal_get_configuration(),
+ false, unused_idx);
+}
+
+int
+rte_eal_sec_memory_init(const int sec_idx)
+{
+ int ret;
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+
+ ret = __rte_eal_memory_init(rte_eal_sec_get_runtime_dir(sec_idx),
+ rte_eal_sec_get_internal_config(sec_idx), rte_cfg,
+ true, sec_idx);
+
+ rte_rwlock_read_unlock(&rte_cfg->mem_config->memory_hotplug_lock);
+
+ return ret;
+}
+
+int
+rte_eal_sec_memory_cleanup(const int sec_idx)
+{
+ return eal_memalloc_destroy(sec_idx);
+}
diff --git a/lib/librte_eal/common/eal_memalloc.h b/lib/librte_eal/common/eal_memalloc.h
index e953cd8..d5ea6e1 100644
--- a/lib/librte_eal/common/eal_memalloc.h
+++ b/lib/librte_eal/common/eal_memalloc.h
@@ -83,6 +83,10 @@ eal_memalloc_get_seg_fd(int list_idx, int seg_idx);
int
eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd);
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx);
+
/* returns 0 or -errno */
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd);
@@ -93,4 +97,7 @@ eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset);
int
eal_memalloc_init(void);
+int
+eal_memalloc_destroy(const int sec_idx);
+
#endif /* EAL_MEMALLOC_H */
--
2.23.0

View File

@ -1,176 +0,0 @@
From 8eea6474f37eff51eb6f9a178ec6790cf5bed53a Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:27:35 +0800
Subject: [PATCH] dpdk-support-gazelle-03-cfg-options
---
config/rte_config.h | 3 +-
lib/librte_eal/common/eal_common_options.c | 46 ++++++++++++++++++----
lib/librte_eal/common/eal_internal_cfg.h | 2 +
lib/librte_eal/common/eal_options.h | 7 +++-
4 files changed, 49 insertions(+), 9 deletions(-)
diff --git a/config/rte_config.h b/config/rte_config.h
index d30786b..b848b1c 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -39,7 +39,8 @@
#define RTE_MAX_MEMSEG_PER_TYPE 32768
#define RTE_MAX_MEM_MB_PER_TYPE 65536
#define RTE_MAX_MEM_MB 524288
-#define RTE_MAX_MEMZONE 2560
+#define RTE_MAX_MEMZONE 65535
+#define RTE_MAX_SECONDARY 256
#define RTE_MAX_TAILQ 32
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
#define RTE_BACKTRACE 1
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index a7f9c5f..34f4199 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -82,6 +82,7 @@ eal_long_options[] = {
{OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM },
{OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM},
{OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM},
+ {OPT_MAP_PERFECT, 0, NULL, OPT_MAP_PERFECT_NUM },
{0, 0, NULL, 0 }
};
@@ -221,6 +222,7 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->user_mbuf_pool_ops_name = NULL;
CPU_ZERO(&internal_cfg->ctrl_cpuset);
internal_cfg->init_complete = 0;
+ internal_cfg->map_perfect = 0;
}
static int
@@ -1097,7 +1099,7 @@ eal_parse_iova_mode(const char *name)
}
static int
-eal_parse_base_virtaddr(const char *arg)
+eal_parse_base_virtaddr(const char *arg, struct internal_config *conf)
{
char *end;
uint64_t addr;
@@ -1120,7 +1122,7 @@ eal_parse_base_virtaddr(const char *arg)
* it can align to 2MB for x86. So this alignment can also be used
* on x86 and other architectures.
*/
- internal_config.base_virtaddr =
+ conf->base_virtaddr =
RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
return 0;
@@ -1440,7 +1442,7 @@ eal_parse_common_option(int opt, const char *optarg,
}
break;
case OPT_BASE_VIRTADDR_NUM:
- if (eal_parse_base_virtaddr(optarg) < 0) {
+ if (eal_parse_base_virtaddr(optarg, conf) < 0) {
RTE_LOG(ERR, EAL, "invalid parameter for --"
OPT_BASE_VIRTADDR "\n");
return -1;
@@ -1553,11 +1555,33 @@ eal_adjust_config(struct internal_config *internal_cfg)
}
int
-eal_check_common_options(struct internal_config *internal_cfg)
+eal_sec_adjust_config(struct internal_config *internal_cfg)
{
- struct rte_config *cfg = rte_eal_get_configuration();
+ struct internal_config *internal_cfg_head;
+ internal_cfg->process_type = RTE_PROC_SECONDARY;
+
+ internal_cfg_head = rte_eal_sec_get_internal_config(0);
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ if (!internal_cfg_head[i].pri_and_sec)
+ continue;
+ if (internal_cfg == &internal_cfg_head[i])
+ continue;
+ if (!strcmp(internal_cfg_head[i].hugefile_prefix, internal_cfg->hugefile_prefix))
+ return -EALREADY;
+ }
+
+ for (int i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->memory += internal_cfg->socket_mem[i];
+
+ return 0;
+}
- if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
+int
+eal_check_common_options(struct internal_config *internal_cfg,
+ struct rte_config *cfg)
+{
+ if (!internal_cfg->pri_and_sec &&
+ cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
return -1;
}
@@ -1602,7 +1626,7 @@ eal_check_common_options(struct internal_config *internal_cfg)
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
}
- if (internal_config.force_socket_limits && internal_config.legacy_mem) {
+ if (internal_cfg->force_socket_limits && internal_config.legacy_mem) {
RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
" is only supported in non-legacy memory mode\n");
}
@@ -1635,6 +1659,14 @@ eal_check_common_options(struct internal_config *internal_cfg)
"-m or --"OPT_SOCKET_MEM"\n");
}
+ if (internal_cfg->map_perfect || internal_cfg->pri_and_sec) {
+ if (!internal_cfg->legacy_mem || internal_cfg->in_memory || internal_cfg->no_hugetlbfs) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" or "OPT_IN_MEMORY" or "OPT_NO_HUGE" "
+ "is not compatible with --"OPT_MAP_PERFECT" and "OPT_PRI_AND_SEC"\n");
+ return -1;
+ }
+ }
+
return 0;
}
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index a42f349..50d5da1 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -82,6 +82,8 @@ struct internal_config {
rte_cpuset_t ctrl_cpuset; /**< cpuset for ctrl threads */
volatile unsigned int init_complete;
/**< indicates whether EAL has completed initialization */
+ volatile unsigned pri_and_sec;
+ volatile unsigned map_perfect;
};
extern struct internal_config internal_config; /**< Global EAL configuration. */
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index 9855429..b42d41d 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -69,6 +69,10 @@ enum {
OPT_IOVA_MODE_NUM,
#define OPT_MATCH_ALLOCATIONS "match-allocations"
OPT_MATCH_ALLOCATIONS_NUM,
+#define OPT_PRI_AND_SEC "pri-and-sec"
+ OPT_PRI_AND_SEC_NUM,
+#define OPT_MAP_PERFECT "map-perfect"
+ OPT_MAP_PERFECT_NUM,
OPT_LONG_MAX_NUM
};
@@ -79,8 +83,9 @@ int eal_parse_common_option(int opt, const char *argv,
struct internal_config *conf);
int eal_option_device_parse(void);
int eal_adjust_config(struct internal_config *internal_cfg);
+int eal_sec_adjust_config(struct internal_config *internal_cfg);
int eal_cleanup_config(struct internal_config *internal_cfg);
-int eal_check_common_options(struct internal_config *internal_cfg);
+int eal_check_common_options(struct internal_config *internal_cfg, struct rte_config *cfg);
void eal_common_usage(void);
enum rte_proc_type_t eal_proc_type_detect(void);
int eal_plugins_init(void);
--
2.23.0

View File

@ -1,186 +0,0 @@
From ab23196a30701353f626b099fc9c957bcd5bf2a0 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:29:00 +0800
Subject: [PATCH] dpdk-support-gazelle-05-fbarray-hugepageinfo
---
lib/librte_eal/common/eal_common_fbarray.c | 106 ++++++++++++++++---
lib/librte_eal/linux/eal/eal_hugepage_info.c | 2 +-
2 files changed, 95 insertions(+), 13 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_fbarray.c b/lib/librte_eal/common/eal_common_fbarray.c
index 1312f93..b611ffa 100644
--- a/lib/librte_eal/common/eal_common_fbarray.c
+++ b/lib/librte_eal/common/eal_common_fbarray.c
@@ -833,8 +833,9 @@ fail:
return -1;
}
-int
-rte_fbarray_attach(struct rte_fbarray *arr)
+static int
+__rte_fbarray_attach(struct rte_fbarray *arr, const char *runtime_dir,
+ const struct internal_config *internal_cfg)
{
struct mem_area *ma = NULL, *tmp = NULL;
size_t page_sz, mmap_len;
@@ -870,13 +871,15 @@ rte_fbarray_attach(struct rte_fbarray *arr)
mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
- /* check the tailq - maybe user has already mapped this address space */
- rte_spinlock_lock(&mem_area_lock);
+ if (!internal_cfg->pri_and_sec) {
+ /* check the tailq - maybe user has already mapped this address space */
+ rte_spinlock_lock(&mem_area_lock);
- TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
- if (overlap(tmp, arr->data, mmap_len)) {
- rte_errno = EEXIST;
- goto fail;
+ TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
+ if (overlap(tmp, arr->data, mmap_len)) {
+ rte_errno = EEXIST;
+ goto fail;
+ }
}
}
@@ -886,7 +889,7 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (data == NULL)
goto fail;
- eal_get_fbarray_path(path, sizeof(path), arr->name);
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, runtime_dir);
fd = open(path, O_RDWR);
if (fd < 0) {
@@ -903,16 +906,27 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (resize_and_map(fd, data, mmap_len))
goto fail;
+ if (internal_cfg->pri_and_sec) {
+ if (flock(fd, LOCK_UN)) {
+ rte_errno = errno;
+ goto fail;
+ }
+ close(fd);
+ fd = -1;
+ }
+
/* store our new memory area */
ma->addr = data;
ma->fd = fd; /* keep fd until detach/destroy */
ma->len = mmap_len;
- TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
+ if (!internal_cfg->pri_and_sec) {
+ TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
- /* we're done */
+ /* we're done */
- rte_spinlock_unlock(&mem_area_lock);
+ rte_spinlock_unlock(&mem_area_lock);
+ }
return 0;
fail:
if (data)
@@ -924,6 +938,30 @@ fail:
return -1;
}
+int
+rte_fbarray_attach(struct rte_fbarray *arr)
+{
+ return __rte_fbarray_attach(arr, rte_eal_get_runtime_dir(), &internal_config);
+}
+
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct internal_config *internal_cfg = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ internal_cfg = &internal_config;
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ return __rte_fbarray_attach(arr, runtime_dir, internal_cfg);
+}
+
int
rte_fbarray_detach(struct rte_fbarray *arr)
{
@@ -1063,6 +1101,50 @@ out:
return ret;
}
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr,
+ const int sec_idx)
+{
+ int fd, ret;
+ size_t mmap_len;
+ char path[PATH_MAX];
+
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ size_t page_sz = sysconf(_SC_PAGESIZE);
+
+ if (page_sz == (size_t)-1)
+ return -1;
+
+ mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+ munmap(arr->data, mmap_len);
+
+ /* try deleting the file */
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, rte_eal_sec_get_runtime_dir(sec_idx));
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open fbarray file: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ if (flock(fd, LOCK_EX | LOCK_NB)) {
+ RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
+ rte_errno = EBUSY;
+ ret = -1;
+ } else {
+ ret = 0;
+ unlink(path);
+ memset(arr, 0, sizeof(*arr));
+ }
+ close(fd);
+
+ return ret;
+}
+
void *
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
diff --git a/lib/librte_eal/linux/eal/eal_hugepage_info.c b/lib/librte_eal/linux/eal/eal_hugepage_info.c
index 91a4fed..911acec 100644
--- a/lib/librte_eal/linux/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linux/eal/eal_hugepage_info.c
@@ -350,7 +350,7 @@ calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
*/
total_pages = 0;
/* we also don't want to do this for legacy init */
- if (!internal_config.legacy_mem)
+ if (!internal_config.legacy_mem || internal_config.map_perfect)
for (i = 0; i < rte_socket_count(); i++) {
int socket = rte_socket_id_by_idx(i);
unsigned int num_pages =
--
2.23.0

View File

@ -1,205 +0,0 @@
From 5bcdc98f579f8fa2699318b3400f18ee9d629936 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 16:29:41 +0800
Subject: [PATCH] dpdk-support-gazelle-06-memalloc
---
lib/librte_eal/linux/eal/eal_memalloc.c | 127 ++++++++++++++++++++----
1 file changed, 110 insertions(+), 17 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal_memalloc.c b/lib/librte_eal/linux/eal/eal_memalloc.c
index cad4934..8e7f120 100644
--- a/lib/librte_eal/linux/eal/eal_memalloc.c
+++ b/lib/librte_eal/linux/eal/eal_memalloc.c
@@ -95,12 +95,14 @@ static int fallocate_supported = -1; /* unknown */
* they will be initialized at startup, and filled as we allocate/deallocate
* segments.
*/
-static struct {
+struct fd_list{
int *fds; /**< dynamically allocated array of segment lock fd's */
int memseg_list_fd; /**< memseg list fd */
int len; /**< total length of the array */
int count; /**< entries used in an array */
-} fd_list[RTE_MAX_MEMSEG_LISTS];
+};
+static struct fd_list fd_list[RTE_MAX_MEMSEG_LISTS];
+static struct fd_list sec_fd_list[RTE_MAX_SECONDARY][RTE_MAX_MEMSEG_LISTS];
/** local copy of a memory map, used to synchronize memory hotplug in MP */
static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
@@ -1391,13 +1393,13 @@ secondary_msl_create_walk(const struct rte_memseg_list *msl,
}
static int
-alloc_list(int list_idx, int len)
+__alloc_list(int list_idx, int len, struct fd_list *fd_ls)
{
int *data;
int i;
/* single-file segments mode does not need fd list */
- if (!internal_config.single_file_segments) {
+ if (!internal_config.single_file_segments) { // sec todo
/* ensure we have space to store fd per each possible segment */
data = malloc(sizeof(int) * len);
if (data == NULL) {
@@ -1407,19 +1409,31 @@ alloc_list(int list_idx, int len)
/* set all fd's as invalid */
for (i = 0; i < len; i++)
data[i] = -1;
- fd_list[list_idx].fds = data;
- fd_list[list_idx].len = len;
+ fd_ls[list_idx].fds = data;
+ fd_ls[list_idx].len = len;
} else {
- fd_list[list_idx].fds = NULL;
- fd_list[list_idx].len = 0;
+ fd_ls[list_idx].fds = NULL;
+ fd_ls[list_idx].len = 0;
}
- fd_list[list_idx].count = 0;
- fd_list[list_idx].memseg_list_fd = -1;
+ fd_ls[list_idx].count = 0;
+ fd_ls[list_idx].memseg_list_fd = -1;
return 0;
}
+static int
+alloc_list(int list_idx, int len)
+{
+ return __alloc_list(list_idx, len, fd_list);
+}
+
+static int
+sec_alloc_list(int list_idx, int len, struct fd_list *fd_ls)
+{
+ return __alloc_list(list_idx, len, fd_ls);
+}
+
static int
fd_list_create_walk(const struct rte_memseg_list *msl,
void *arg __rte_unused)
@@ -1437,27 +1451,71 @@ fd_list_create_walk(const struct rte_memseg_list *msl,
return alloc_list(msl_idx, len);
}
-int
-eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+static int
+fd_list_destroy_walk(const struct rte_memseg_list *msl, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+ struct fd_list *fd_ls = sec_fd_list[sec_idx];
+ int list_idx;
+
+ list_idx = msl - mcfg->memsegs;
+ if (fd_ls[list_idx].len != 0) {
+ free(fd_ls[list_idx].fds);
+ /* We have closed fd, seeing in function of eal_legacy_hugepage_attach. */
+ //close(fd_ls[list_idx].fds[seg_idx]);
+ }
+ memset(&fd_ls[list_idx], 0, sizeof(fd_ls[list_idx]));
+
+ return 0;
+}
+
+static int
+__eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const struct rte_config *rte_cfg, struct fd_list *fd_ls)
+{
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
/* single file segments mode doesn't support individual segment fd's */
- if (internal_config.single_file_segments)
+ if (internal_config.single_file_segments) // sec todo
return -ENOTSUP;
/* if list is not allocated, allocate it */
- if (fd_list[list_idx].len == 0) {
+ if (fd_ls[list_idx].len == 0) {
int len = mcfg->memsegs[list_idx].memseg_arr.len;
- if (alloc_list(list_idx, len) < 0)
+ if (sec_alloc_list(list_idx, len, fd_ls) < 0)
return -ENOMEM;
}
- fd_list[list_idx].fds[seg_idx] = fd;
+ fd_ls[list_idx].fds[seg_idx] = fd;
return 0;
}
+int
+eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+{
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd,
+ rte_eal_get_configuration(), fd_list);
+}
+
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct rte_config *rte_cfg = NULL;
+ struct fd_list *fd_ls = NULL;
+
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ fd_ls = &fd_list[0];
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ fd_ls = &sec_fd_list[sec_idx][0];
+ }
+
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd, rte_cfg, fd_ls);
+}
+
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd)
{
@@ -1602,3 +1660,38 @@ eal_memalloc_init(void)
return -1;
return 0;
}
+
+int
+eal_memalloc_destroy(const int sec_idx)
+{
+ int msl_idx = 0;
+ struct rte_memseg_list *msl;
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+ msl = &mcfg->memsegs[msl_idx];
+
+ /* skip empty memseg lists */
+ if (msl->memseg_arr.len == 0)
+ continue;
+
+ if (rte_sec_fbarray_destroy(&msl->memseg_arr, sec_idx)) {
+ RTE_LOG(ERR, EAL, "Cannot clear secondary process local memseg lists\n");
+ return -1;
+ }
+
+ if (munmap(msl->base_va, msl->len) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap memseg lists\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+
+ if (fd_list_destroy_walk(msl, sec_idx)) {
+ RTE_LOG(ERR, EAL, "Failed to clear secondary fd_list.\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
--
2.23.0

View File

@ -1,152 +0,0 @@
From 036103c944eac5c6c50e68fc1dad9d72a00b5c2c Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 17:02:45 +0800
Subject: [PATCH] dpdk-support-gazelle-07-eal-add-sec-attach
---
lib/librte_eal/linux/eal/eal.c | 66 ++++++++++++++++++++++++++--------
1 file changed, 52 insertions(+), 14 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index 8bb1842..735afcd 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -103,6 +103,12 @@ static char runtime_dir[PATH_MAX];
static const char *default_runtime_dir = "/var/run";
+/****** APIs for libnet ******/
+static unsigned int sec_count = 0;
+static struct rte_config sec_rte_config[RTE_MAX_SECONDARY];
+static struct internal_config sec_internal_config[RTE_MAX_SECONDARY];
+static char sec_runtime_dir[RTE_MAX_SECONDARY][PATH_MAX];
+
static bool master_set_affinity = true;
bool
eal_is_master_set_affinity(void)
@@ -111,7 +117,8 @@ eal_is_master_set_affinity(void)
}
int
-eal_create_runtime_dir(void)
+eal_create_runtime_dir(char *runtime_dir, const int buflen,
+ const struct internal_config *conf)
{
const char *directory = default_runtime_dir;
const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
@@ -134,8 +141,8 @@ eal_create_runtime_dir(void)
}
/* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
- tmp, eal_get_hugefile_prefix());
+ ret = snprintf(runtime_dir, buflen, "%s/%s",
+ tmp, conf->hugefile_prefix);
if (ret < 0 || ret == sizeof(runtime_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
@@ -246,12 +253,18 @@ eal_clean_runtime_dir(void)
return -1;
}
-const char *
+char *
rte_eal_get_runtime_dir(void)
{
return runtime_dir;
}
+char *
+rte_eal_sec_get_runtime_dir(const int sec_idx)
+{
+ return sec_runtime_dir[sec_idx];
+}
+
/* Return user provided mbuf pool ops name */
const char *
rte_eal_mbuf_user_pool_ops(void)
@@ -266,6 +279,18 @@ rte_eal_get_configuration(void)
return &rte_config;
}
+struct rte_config *
+rte_eal_sec_get_configuration(const int sec_idx)
+{
+ return &sec_rte_config[sec_idx];
+}
+
+struct internal_config *
+rte_eal_sec_get_internal_config(const int sec_idx)
+{
+ return &sec_internal_config[sec_idx];
+}
+
enum rte_iova_mode
rte_eal_iova_mode(void)
{
@@ -395,18 +420,22 @@ rte_eal_config_create(void)
/* attach to an existing shared memory config */
static int
-rte_eal_config_attach(void)
+__rte_eal_config_attach(const int mmap_flags, int *mem_cfg_fd,
+ const char *runtime_dir,
+ const struct internal_config *internal_cfg,
+ struct rte_config *rte_cfg)
{
struct rte_mem_config *mem_config;
+ int mcfg_fd = *mem_cfg_fd;
- const char *pathname = eal_runtime_config_path();
+ const char *pathname = eal_sec_runtime_config_path(runtime_dir);
- if (internal_config.no_shconf)
+ if (internal_cfg->no_shconf)
return 0;
- if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR);
- if (mem_cfg_fd < 0) {
+ if (mcfg_fd < 0){
+ mcfg_fd = open(pathname, O_RDWR);
+ if (mcfg_fd < 0) {
RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n",
pathname);
return -1;
@@ -415,20 +444,29 @@ rte_eal_config_attach(void)
/* map it as read-only first */
mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
- PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
+ mmap_flags, MAP_SHARED, mcfg_fd, 0);
if (mem_config == MAP_FAILED) {
- close(mem_cfg_fd);
- mem_cfg_fd = -1;
+ close(mcfg_fd);
+ mcfg_fd = -1;
RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n",
errno, strerror(errno));
return -1;
}
- rte_config.mem_config = mem_config;
+ rte_cfg->mem_config = mem_config;
+ *mem_cfg_fd = mcfg_fd;
return 0;
}
+static int
+rte_eal_config_attach(void)
+{
+ return __rte_eal_config_attach(PROT_READ, &mem_cfg_fd,
+ rte_eal_get_runtime_dir(), &internal_config,
+ rte_eal_get_configuration());
+}
+
/* reattach the shared config at exact memory location primary process has it */
static int
rte_eal_config_reattach(void)
--
2.23.0

View File

@ -1,194 +0,0 @@
From d3021c3a436580dfcca2156f110c7d9125798021 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 17:16:05 +0800
Subject: [PATCH] dpdk-support-gazelle-08-eal-add-config
---
lib/librte_eal/linux/eal/eal.c | 92 ++++++++++++++++++++++++++++------
1 file changed, 76 insertions(+), 16 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index 735afcd..2de9914 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -569,6 +569,45 @@ rte_config_init(void)
return 0;
}
+static void
+rte_sec_config_init(const int sec_idx)
+{
+ int mem_cfg_fd = -1;
+ int mmap_flags = PROT_READ | PROT_WRITE;
+
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+
+ rte_cfg->process_type = internal_cfg->process_type;
+
+ __rte_eal_config_attach(mmap_flags, &mem_cfg_fd,
+ rte_eal_sec_get_runtime_dir(sec_idx),
+ internal_cfg, rte_cfg);
+
+ close(mem_cfg_fd);
+}
+
+static int
+eal_sec_config_cleanup(const int sec_idx)
+{
+ int ret;
+ struct rte_config *lc_rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *lc_internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+ char *lc_runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+
+ ret = munmap(lc_rte_cfg->mem_config, sizeof(*lc_rte_cfg->mem_config));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap config memory!\n");
+ return -1;
+ }
+
+ memset(lc_rte_cfg, 0, sizeof(*lc_rte_cfg));
+ memset(lc_internal_cfg, 0, sizeof(*lc_internal_cfg));
+ memset(lc_runtime_dir, 0, PATH_MAX);
+
+ return 0;
+}
+
/* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
static void
eal_hugedirs_unlock(void)
@@ -604,6 +643,7 @@ eal_usage(const char *prgname)
" --"OPT_LEGACY_MEM" Legacy memory mode (no dynamic allocation, contiguous segments)\n"
" --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n"
" --"OPT_MATCH_ALLOCATIONS" Free hugepages exactly as allocated\n"
+ " --"OPT_MAP_PERFECT" Map virtual addresses according to configured hugepage size\n"
"\n");
/* Allow the application to print its usage message too if hook is set */
if ( rte_application_usage_hook ) {
@@ -731,7 +771,9 @@ eal_log_level_parse(int argc, char **argv)
/* Parse the argument given in the command line of the application */
static int
-eal_parse_args(int argc, char **argv)
+__eal_parse_args(int argc, char **argv, char *runtime_dir, const int buflen,
+ struct internal_config *internal_cfg,
+ struct rte_config *rte_cfg)
{
int opt, ret;
char **argvopt;
@@ -762,7 +804,7 @@ eal_parse_args(int argc, char **argv)
goto out;
}
- ret = eal_parse_common_option(opt, optarg, &internal_config);
+ ret = eal_parse_common_option(opt, optarg, internal_cfg);
/* common parser is not happy */
if (ret < 0) {
eal_usage(prgname);
@@ -785,9 +827,9 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "Could not store hugepage directory\n");
else {
/* free old hugepage dir */
- if (internal_config.hugepage_dir != NULL)
- free(internal_config.hugepage_dir);
- internal_config.hugepage_dir = hdir;
+ if (internal_cfg->hugepage_dir != NULL)
+ free(internal_cfg->hugepage_dir);
+ internal_cfg->hugepage_dir = hdir;
}
break;
}
@@ -798,34 +840,34 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "Could not store file prefix\n");
else {
/* free old prefix */
- if (internal_config.hugefile_prefix != NULL)
- free(internal_config.hugefile_prefix);
- internal_config.hugefile_prefix = prefix;
+ if (internal_cfg->hugefile_prefix != NULL)
+ free(internal_cfg->hugefile_prefix);
+ internal_cfg->hugefile_prefix = prefix;
}
break;
}
case OPT_SOCKET_MEM_NUM:
if (eal_parse_socket_arg(optarg,
- internal_config.socket_mem) < 0) {
+ internal_cfg->socket_mem) < 0) {
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_MEM "\n");
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_config.force_sockets = 1;
+ internal_cfg->force_sockets = 1;
break;
case OPT_SOCKET_LIMIT_NUM:
if (eal_parse_socket_arg(optarg,
- internal_config.socket_limit) < 0) {
+ internal_cfg->socket_limit) < 0) {
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_LIMIT "\n");
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_config.force_socket_limits = 1;
+ internal_cfg->force_socket_limits = 1;
break;
case OPT_VFIO_INTR_NUM:
@@ -839,7 +881,7 @@ eal_parse_args(int argc, char **argv)
break;
case OPT_CREATE_UIO_DEV_NUM:
- internal_config.create_uio_dev = 1;
+ internal_cfg->create_uio_dev = 1;
break;
case OPT_MBUF_POOL_OPS_NAME_NUM:
@@ -849,11 +891,11 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
else {
/* free old ops name */
- if (internal_config.user_mbuf_pool_ops_name !=
+ if (internal_cfg->user_mbuf_pool_ops_name !=
NULL)
- free(internal_config.user_mbuf_pool_ops_name);
+ free(internal_cfg->user_mbuf_pool_ops_name);
- internal_config.user_mbuf_pool_ops_name =
+ internal_cfg->user_mbuf_pool_ops_name =
ops_name;
}
break;
@@ -914,6 +956,24 @@ eal_parse_args(int argc, char **argv)
return ret;
}
+static int
+eal_parse_args(int argc, char **argv)
+{
+ return __eal_parse_args(argc, argv,
+ rte_eal_get_runtime_dir(), PATH_MAX,
+ &internal_config,
+ rte_eal_get_configuration());
+}
+
+static int
+eal_sec_parse_args(int argc, char **argv, const int sec_idx)
+{
+ return __eal_parse_args(argc, argv,
+ rte_eal_sec_get_runtime_dir(sec_idx), PATH_MAX,
+ rte_eal_sec_get_internal_config(sec_idx),
+ rte_eal_sec_get_configuration(sec_idx));
+}
+
static int
check_socket(const struct rte_memseg_list *msl, void *arg)
{
--
2.23.0

View File

@ -1,161 +0,0 @@
From d81c5f9a3e78ae18f78caeb8791e8e3947151273 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 17:16:50 +0800
Subject: [PATCH] dpdk-support-gazelle-09-eal-add-libnetapi
---
lib/librte_eal/linux/eal/eal.c | 119 +++++++++++++++++++++++++++++++--
1 file changed, 112 insertions(+), 7 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index 2de9914..a1f2b42 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -901,7 +901,11 @@ __eal_parse_args(int argc, char **argv, char *runtime_dir, const int buflen,
break;
}
case OPT_MATCH_ALLOCATIONS_NUM:
- internal_config.match_allocations = 1;
+ internal_cfg->match_allocations = 1;
+ break;
+
+ case OPT_MAP_PERFECT_NUM:
+ internal_cfg->map_perfect = 1;
break;
default:
@@ -924,20 +928,25 @@ __eal_parse_args(int argc, char **argv, char *runtime_dir, const int buflen,
}
/* create runtime data directory */
- if (internal_config.no_shconf == 0 &&
- eal_create_runtime_dir() < 0) {
+ if (internal_cfg->no_shconf == 0 &&
+ eal_create_runtime_dir(runtime_dir, buflen, internal_cfg) < 0) {
RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
ret = -1;
goto out;
}
- if (eal_adjust_config(&internal_config) != 0) {
- ret = -1;
- goto out;
+ if (!internal_cfg->pri_and_sec) {
+ ret = eal_adjust_config(internal_cfg);
+ if (ret != 0)
+ goto out;
+ } else {
+ ret = eal_sec_adjust_config(internal_cfg);
+ if (ret != 0)
+ goto out;
}
/* sanity checks */
- if (eal_check_common_options(&internal_config) != 0) {
+ if (eal_check_common_options(internal_cfg, rte_cfg) != 0) {
eal_usage(prgname);
ret = -1;
goto out;
@@ -1504,3 +1513,99 @@ rte_eal_check_module(const char *module_name)
/* Module has been found */
return 1;
}
+
+
+/****** APIs for libnet ******/
+int
+rte_eal_sec_attach(int argc, char **argv)
+{
+ int ret;
+ int sec_idx = -1;
+ struct internal_config *lc_internal_cfg = NULL;
+
+ if (sec_count >= RTE_MAX_SECONDARY) {
+ RTE_LOG(ERR, EAL, "Too many secondary processes: %d.\n", sec_count);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ if (sec_internal_config[i].pri_and_sec == 0) {
+ sec_internal_config[i].pri_and_sec = 1;
+ sec_idx = i;
+ break;
+ }
+ }
+ lc_internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+
+ eal_reset_internal_config(lc_internal_cfg);
+
+ ret = eal_sec_parse_args(argc, argv, sec_idx);
+ if (ret < 0) {
+ if (ret == -EALREADY) {
+ RTE_LOG(ERR, EAL, "file_refix %s already called initialization.\n",
+ lc_internal_cfg->hugefile_prefix);
+ rte_errno = EALREADY;
+ } else {
+ RTE_LOG(ERR, EAL, "Invalid 'command line' arguments.\n");
+ rte_errno = EINVAL;
+ }
+ return -1;
+ }
+
+ rte_sec_config_init(sec_idx);
+
+ ret = rte_eal_sec_memory_init(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot init memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ sec_count++;
+ return 0;
+}
+
+int
+rte_eal_sec_detach(const char *file_prefix, int length)
+{
+ int ret;
+ int sec_idx = -1;
+
+ if (!file_prefix || length <= 0) {
+ RTE_LOG(ERR, EAL, "Invalid 'file_prefix or length' arguments.\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ if (sec_internal_config[i].pri_and_sec == 0)
+ continue;
+ if (!strncmp(sec_internal_config[i].hugefile_prefix, file_prefix, length)) {
+ sec_idx = i;
+ break;
+ }
+ }
+ if (sec_idx == -1) {
+ RTE_LOG(ERR, EAL, "Cannot find file_prefix %s.\n", file_prefix);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ ret = rte_eal_sec_memory_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ ret = eal_sec_config_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup hugepage sharefile.\n");
+ rte_errno = EACCES;
+ return -1;
+ }
+
+ sec_count--;
+ return 0;
+}
--
2.23.0

View File

@ -1,174 +0,0 @@
From a0305b4a007717f7d297c3c2a61d01f688f29847 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 19:44:26 +0800
Subject: [PATCH] dpdk-support-gazelle-10-eal-memory-inter-config
---
lib/librte_eal/linux/eal/eal_memory.c | 72 +++++++++++++++++++++++----
1 file changed, 61 insertions(+), 11 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal_memory.c b/lib/librte_eal/linux/eal/eal_memory.c
index 43e4ffc..db70ac8 100644
--- a/lib/librte_eal/linux/eal/eal_memory.c
+++ b/lib/librte_eal/linux/eal/eal_memory.c
@@ -1055,10 +1055,10 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
* address to lower address. Here, physical addresses are in
* descending order.
*/
- else if ((prev->physaddr - cur->physaddr) != cur->size)
+ else if (!internal_config.map_perfect && (prev->physaddr - cur->physaddr) != cur->size)
new_memseg = 1;
#else
- else if ((cur->physaddr - prev->physaddr) != cur->size)
+ else if (!internal_config.map_perfect && (cur->physaddr - prev->physaddr) != cur->size)
new_memseg = 1;
#endif
@@ -1457,6 +1457,24 @@ eal_legacy_hugepage_init(void)
/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
+ if (internal_config.map_perfect) {
+ int sys_num_pages = 0;
+ int need_num_pages = 0;
+ struct rte_memseg_list *msl;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ sys_num_pages += internal_config.hugepage_info[i].num_pages[j];
+ }
+
+ for (j = 0; j < RTE_MAX_MEMSEG_LISTS; j++) {
+ msl = &mcfg->memsegs[j];
+ if (internal_config.hugepage_info[i].hugepage_sz == msl->page_sz)
+ need_num_pages += msl->memseg_arr.len;
+ }
+
+ internal_config.hugepage_info[i].num_pages[0] = RTE_MIN(sys_num_pages, need_num_pages);
+ }
+
nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
}
@@ -1537,8 +1555,13 @@ eal_legacy_hugepage_init(void)
goto fail;
}
- qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
- sizeof(struct hugepage_file), cmp_physaddr);
+ /* continuous physical memory does not bring performance improvements,
+ * so no sorting is performed for quick startup.
+ */
+ if (!internal_config.map_perfect) {
+ qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
+ sizeof(struct hugepage_file), cmp_physaddr);
+ }
/* we have processed a num of hugepages of this size, so inc offset */
hp_offset += hpi->num_pages[0];
@@ -2228,11 +2251,20 @@ memseg_primary_init(void)
uint64_t max_mem, max_mem_per_type;
unsigned int max_seglists_per_type;
unsigned int n_memtypes, cur_type;
+ struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
/* no-huge does not need this at all */
if (internal_config.no_hugetlbfs)
return 0;
+ if (internal_config.map_perfect) {
+ memset(used_hp, 0, sizeof(used_hp));
+ ret = eal_sec_set_num_pages(&internal_config, used_hp);
+ if (ret == -1) {
+ RTE_LOG(ERR, EAL, "Cannot get num pages\n");
+ }
+ }
+
/*
* figuring out amount of memory we're going to have is a long and very
* involved process. the basic element we're operating with is a memory
@@ -2329,6 +2361,7 @@ memseg_primary_init(void)
struct memtype *type = &memtypes[cur_type];
uint64_t max_mem_per_list, pagesz;
int socket_id;
+ unsigned int need_n_segs, cur_n_segs;
pagesz = type->page_sz;
socket_id = type->socket_id;
@@ -2372,8 +2405,17 @@ memseg_primary_init(void)
"n_segs:%i socket_id:%i hugepage_sz:%" PRIu64 "\n",
n_seglists, n_segs, socket_id, pagesz);
+ if (internal_config.map_perfect)
+ need_n_segs = eal_sec_get_num_pages(used_hp, pagesz, socket_id);
+ else
+ need_n_segs = n_segs;
+
/* create all segment lists */
- for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {
+ for (cur_seglist = 0; cur_seglist < n_seglists && need_n_segs > 0; cur_seglist++) {
+ cur_n_segs = RTE_MIN(need_n_segs, n_segs);
+ if (internal_config.map_perfect)
+ need_n_segs -= cur_n_segs;
+
if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
RTE_LOG(ERR, EAL,
"No more space in memseg lists, please increase %s\n",
@@ -2400,9 +2442,10 @@ memseg_primary_init(void)
}
static int
-memseg_secondary_init(void)
+memseg_secondary_init(struct rte_config *rte_cfg,
+ const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
int msl_idx = 0;
struct rte_memseg_list *msl;
@@ -2414,7 +2457,7 @@ memseg_secondary_init(void)
if (msl->memseg_arr.len == 0)
continue;
- if (rte_fbarray_attach(&msl->memseg_arr)) {
+ if (rte_sec_fbarray_attach(&msl->memseg_arr, switch_pri_and_sec, sec_idx)) {
RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
return -1;
}
@@ -2430,11 +2473,18 @@ memseg_secondary_init(void)
}
int
-rte_eal_memseg_init(void)
+rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx)
{
/* increase rlimit to maximum */
struct rlimit lim;
+ struct rte_config *rte_cfg = NULL;
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ }
+
if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
/* set limit to maximum */
lim.rlim_cur = lim.rlim_max;
@@ -2458,11 +2508,11 @@ rte_eal_memseg_init(void)
}
#endif
- return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ return rte_cfg->process_type == RTE_PROC_PRIMARY ?
#ifndef RTE_ARCH_64
memseg_primary_init_32() :
#else
memseg_primary_init() :
#endif
- memseg_secondary_init();
+ memseg_secondary_init(rte_cfg, switch_pri_and_sec, sec_idx);
}
--
2.23.0

View File

@ -1,188 +0,0 @@
From 8b1f62be35c36c78793d3fd3935b9898cf957673 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Tue, 30 Mar 2021 19:45:21 +0800
Subject: [PATCH] dpdk-support-gazelle-11-eal-memory-add-sec
---
lib/librte_eal/linux/eal/eal_memory.c | 99 +++++++++++++++++++++++----
1 file changed, 87 insertions(+), 12 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal_memory.c b/lib/librte_eal/linux/eal/eal_memory.c
index db70ac8..ac81f43 100644
--- a/lib/librte_eal/linux/eal/eal_memory.c
+++ b/lib/librte_eal/linux/eal/eal_memory.c
@@ -1880,9 +1880,9 @@ getFileSize(int fd)
* in order to form a contiguous block in the virtual memory space
*/
static int
-eal_legacy_hugepage_attach(void)
+eal_legacy_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = NULL;
struct hugepage_file *hp = NULL;
unsigned int num_hp = 0;
unsigned int i = 0;
@@ -1890,6 +1890,22 @@ eal_legacy_hugepage_attach(void)
off_t size = 0;
int fd, fd_hugepage = -1;
+ struct rte_config *rte_cfg = NULL;
+ struct internal_config *internal_cfg = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ rte_cfg = rte_eal_get_configuration();
+ internal_cfg = &internal_config;
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ mcfg = rte_cfg->mem_config;
+
if (aslr_enabled() > 0) {
RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
"(ASLR) is enabled in the kernel.\n");
@@ -1897,10 +1913,10 @@ eal_legacy_hugepage_attach(void)
"into secondary processes\n");
}
- fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
+ fd_hugepage = open(eal_sec_hugepage_data_path(runtime_dir), O_RDONLY);
if (fd_hugepage < 0) {
RTE_LOG(ERR, EAL, "Could not open %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1908,7 +1924,7 @@ eal_legacy_hugepage_attach(void)
hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
if (hp == MAP_FAILED) {
RTE_LOG(ERR, EAL, "Could not mmap %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1955,13 +1971,13 @@ eal_legacy_hugepage_attach(void)
}
/* find segment data */
- msl = rte_mem_virt2memseg_list(map_addr);
+ msl = rte_sec_mem_virt2memseg_list(map_addr, rte_cfg);
if (msl == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
__func__);
goto fd_error;
}
- ms = rte_mem_virt2memseg(map_addr, msl);
+ ms = rte_sec_mem_virt2memseg(map_addr, msl, rte_cfg);
if (ms == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
__func__);
@@ -1976,8 +1992,16 @@ eal_legacy_hugepage_attach(void)
goto fd_error;
}
+ /* No hugefile lock is required in PRI_AND_SEC mode, close it
+ * to avoid opening too much fd.
+ */
+ if (internal_cfg->pri_and_sec) {
+ close(fd);
+ fd = -1;
+ }
+
/* store segment fd internally */
- if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
+ if (eal_sec_memalloc_set_seg_fd(msl_idx, ms_idx, fd, switch_pri_and_sec, sec_idx) < 0)
RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
rte_strerror(rte_errno));
}
@@ -2026,10 +2050,17 @@ rte_eal_hugepage_init(void)
}
int
-rte_eal_hugepage_attach(void)
+rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- return internal_config.legacy_mem ?
- eal_legacy_hugepage_attach() :
+ struct internal_config *internal_cfg;
+
+ if (!switch_pri_and_sec)
+ internal_cfg = &internal_config;
+ else
+ internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+
+ return internal_cfg->legacy_mem ?
+ eal_legacy_hugepage_attach(switch_pri_and_sec, sec_idx) :
eal_hugepage_attach();
}
@@ -2238,6 +2269,50 @@ memseg_primary_init_32(void)
return 0;
}
+static int
+eal_sec_set_num_pages(struct internal_config *internal_cfg,
+ struct hugepage_info *used_hp)
+{
+ int ret;
+ int hp_sz_idx;
+ uint64_t memory[RTE_MAX_NUMA_NODES];
+
+ if (!internal_cfg || !used_hp) {
+ return -1;
+ }
+
+ for (hp_sz_idx = 0;
+ hp_sz_idx < (int) internal_cfg->num_hugepage_sizes;
+ hp_sz_idx++) {
+ struct hugepage_info *hpi;
+ hpi = &internal_cfg->hugepage_info[hp_sz_idx];
+ used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+ }
+
+ for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+ memory[hp_sz_idx] = internal_cfg->socket_mem[hp_sz_idx];
+
+ ret = calc_num_pages_per_socket(memory,
+ internal_cfg->hugepage_info, used_hp,
+ internal_cfg->num_hugepage_sizes);
+
+ return ret;
+}
+
+static int
+eal_sec_get_num_pages(const struct hugepage_info *used_hp,
+ uint64_t hugepage_sz, int socket)
+{
+ int hp_sz_idx;
+
+ for (hp_sz_idx = 0; hp_sz_idx < MAX_HUGEPAGE_SIZES; hp_sz_idx++) {
+ if (used_hp[hp_sz_idx].hugepage_sz == hugepage_sz)
+ return used_hp[hp_sz_idx].num_pages[socket];
+ }
+
+ return 0;
+}
+
static int __rte_unused
memseg_primary_init(void)
{
@@ -2424,7 +2499,7 @@ memseg_primary_init(void)
}
msl = &mcfg->memsegs[msl_idx++];
- if (alloc_memseg_list(msl, pagesz, n_segs,
+ if (alloc_memseg_list(msl, pagesz, cur_n_segs,
socket_id, cur_seglist))
goto out;
--
2.23.0

View File

@ -0,0 +1,37 @@
From c06b9cd500facfb6a10057490c1ec1090408ff12 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Wed, 6 Jan 2021 11:46:32 +0800
Subject: [PATCH 009/189] net/hns3: remove unnecessary memset
The hns3_cmd_desc has memset when setup and the memset
for req is unnecessary.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_rss.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index e2f0468..b5df374 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -633,16 +633,11 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw)
static void
hns3_rss_tuple_uninit(struct hns3_hw *hw)
{
- struct hns3_rss_input_tuple_cmd *req;
struct hns3_cmd_desc desc;
int ret;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false);
- req = (struct hns3_rss_input_tuple_cmd *)desc.data;
-
- memset(req, 0, sizeof(struct hns3_rss_tuple_cfg));
-
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "RSS uninit tuple failed %d", ret);
--
2.7.4

View File

@ -1,43 +0,0 @@
From 4bda889d737ee2b1fb2381e658bcf4f2a7ca21c8 Mon Sep 17 00:00:00 2001
From: HuangLiming <huangliming5@huawei.com>
Date: Tue, 18 Aug 2020 04:58:53 -0400
Subject: [PATCH] fix error in clearing secondary process memseg lists
Signed-off-by: HuangLiming <huangliming5@huawei.com>
---
lib/librte_eal/common/eal_common_fbarray.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_fbarray.c b/lib/librte_eal/common/eal_common_fbarray.c
index b611ffa..116c695 100644
--- a/lib/librte_eal/common/eal_common_fbarray.c
+++ b/lib/librte_eal/common/eal_common_fbarray.c
@@ -1105,7 +1105,7 @@ int
rte_sec_fbarray_destroy(struct rte_fbarray *arr,
const int sec_idx)
{
- int fd, ret;
+ int fd;
size_t mmap_len;
char path[PATH_MAX];
@@ -1134,15 +1134,13 @@ rte_sec_fbarray_destroy(struct rte_fbarray *arr,
if (flock(fd, LOCK_EX | LOCK_NB)) {
RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
rte_errno = EBUSY;
- ret = -1;
} else {
- ret = 0;
unlink(path);
memset(arr, 0, sizeof(*arr));
}
close(fd);
- return ret;
+ return 0;
}
void *
--
2.21.0

View File

@ -0,0 +1,53 @@
From 2a7fd245e7d1c752bd53df6e0e7967b1dadfe876 Mon Sep 17 00:00:00 2001
From: Steve Yang <stevex.yang@intel.com>
Date: Mon, 18 Jan 2021 07:04:12 +0000
Subject: [PATCH 010/189] net/hns3: fix jumbo frame flag condition for MTU set
The jumbo frame uses the 'RTE_ETHER_MAX_LEN' as boundary condition,
but the Ether overhead is larger than 18 when it supports dual VLAN tags.
That will cause the jumbo flag rx offload is wrong when MTU size is
'RTE_ETHER_MTU'.
This fix will change the boundary condition with 'HSN3_DEFAULT_FRAME_LEN',
that perhaps impacts the cases of the jumbo frame related.
Fixes: 1f5ca0b460cd ("net/hns3: support some device operations")
Fixes: a5475d61fa34 ("net/hns3: support VF")
Cc: stable@dpdk.org
Signed-off-by: Steve Yang <stevex.yang@intel.com>
Acked-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 2 +-
drivers/net/hns3/hns3_ethdev_vf.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 90544fe..bf633a3 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2467,7 +2467,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
rte_spinlock_lock(&hw->lock);
- is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
+ is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
/*
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index f09cabc..ef03fb1 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -928,7 +928,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rte_spinlock_unlock(&hw->lock);
return ret;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
--
2.7.4

View File

@ -1,62 +0,0 @@
From 561a37288d629398f976dfa4e57854b7ea484cc7 Mon Sep 17 00:00:00 2001
From: yuanyunkang <yuanyunkang@huawei.com>
Date: Sat, 22 Aug 2020 14:39:16 +0800
Subject: [PATCH] dpdk:fix coredump when primary process attach without shared
file
Signed-off-by: yuanyunkang <yuanyunkang@huawei.com>
---
lib/librte_eal/linux/eal/eal.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c
index a1f2b42..ff86ff9 100644
--- a/lib/librte_eal/linux/eal/eal.c
+++ b/lib/librte_eal/linux/eal/eal.c
@@ -569,22 +569,28 @@ rte_config_init(void)
return 0;
}
-static void
+static int
rte_sec_config_init(const int sec_idx)
{
int mem_cfg_fd = -1;
int mmap_flags = PROT_READ | PROT_WRITE;
+ int ret = -1;
struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
struct internal_config *internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
rte_cfg->process_type = internal_cfg->process_type;
- __rte_eal_config_attach(mmap_flags, &mem_cfg_fd,
+ ret = __rte_eal_config_attach(mmap_flags, &mem_cfg_fd,
rte_eal_sec_get_runtime_dir(sec_idx),
internal_cfg, rte_cfg);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot attach shared memory\n");
+ return -1;
+ }
close(mem_cfg_fd);
+ return 0;
}
static int
@@ -1553,7 +1559,11 @@ rte_eal_sec_attach(int argc, char **argv)
return -1;
}
- rte_sec_config_init(sec_idx);
+ ret = rte_sec_config_init(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot init sec config\n");
+ return -1;
+ }
ret = rte_eal_sec_memory_init(sec_idx);
if (ret < 0) {
--
2.19.1

View File

@ -0,0 +1,209 @@
From ec2a9e9b56f0bfd4d0926c9f59183d682de9670e Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Thu, 14 Jan 2021 21:33:30 +0800
Subject: [PATCH 011/189] net/hns3: use C11 atomics builtins for resetting
Use C11 atomic builtins with explicit ordering instead of
rte_atomic ops with the resetting member of hns3_reset_data
structure.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_dcb.c | 5 +++--
drivers/net/hns3/hns3_ethdev.c | 8 ++++----
drivers/net/hns3/hns3_ethdev.h | 2 +-
drivers/net/hns3/hns3_ethdev_vf.c | 12 ++++++------
drivers/net/hns3/hns3_intr.c | 8 ++++----
drivers/net/hns3/hns3_rxtx.c | 2 +-
6 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index fb50179..b32d5af 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -633,7 +633,7 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
@@ -1562,7 +1562,8 @@ hns3_dcb_configure(struct hns3_adapter *hns)
int ret;
hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
- if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
+ if (map_changed ||
+ __atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
ret = hns3_dcb_info_update(hns, num_tc);
if (ret) {
hns3_err(hw, "dcb info update failed: %d", ret);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index bf633a3..d0d1d3a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -1017,7 +1017,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1041,7 +1041,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -4872,7 +4872,7 @@ hns3_dev_start(struct rte_eth_dev *dev)
int ret;
PMD_INIT_FUNC_TRACE();
- if (rte_atomic16_read(&hw->reset.resetting))
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5018,7 +5018,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
rte_delay_ms(hw->tqps_num);
rte_spinlock_lock(&hw->lock);
- if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
hns3_stop_tqps(hw);
hns3_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 31f78a1..0d86683 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -350,7 +350,7 @@ struct hns3_reset_data {
enum hns3_reset_stage stage;
rte_atomic16_t schedule;
/* Reset flag, covering the entire reset process */
- rte_atomic16_t resetting;
+ uint16_t resetting;
/* Used to disable sending cmds during reset */
rte_atomic16_t disable_cmd;
/* The reset level being processed */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index ef03fb1..c126384 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -898,7 +898,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* MTU value issued by hns3 VF PMD driver must be less than or equal to
* PF's MTU.
*/
- if (rte_atomic16_read(&hw->reset.resetting)) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -1438,7 +1438,7 @@ hns3vf_request_link_info(struct hns3_hw *hw)
uint8_t resp_msg;
int ret;
- if (rte_atomic16_read(&hw->reset.resetting))
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
return;
ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
&resp_msg, sizeof(resp_msg));
@@ -1471,7 +1471,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
struct hns3_hw *hw = &hns->hw;
int ret;
- if (rte_atomic16_read(&hw->reset.resetting)) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1510,7 +1510,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int tmp_mask;
int ret = 0;
- if (rte_atomic16_read(&hw->reset.resetting)) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
hns3_err(hw, "vf set vlan offload failed during resetting, "
"mask = 0x%x", mask);
return -EIO;
@@ -1957,7 +1957,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
rte_delay_ms(hw->tqps_num);
rte_spinlock_lock(&hw->lock);
- if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3vf_unmap_rx_interrupt(dev);
@@ -2188,7 +2188,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
int ret;
PMD_INIT_FUNC_TRACE();
- if (rte_atomic16_read(&hw->reset.resetting))
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 99c500d..51f19b4 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1761,7 +1761,7 @@ hns3_reset_init(struct hns3_hw *hw)
hw->reset.stage = RESET_STAGE_NONE;
hw->reset.request = 0;
hw->reset.pending = 0;
- rte_atomic16_init(&hw->reset.resetting);
+ hw->reset.resetting = 0;
rte_atomic16_init(&hw->reset.disable_cmd);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
@@ -2011,7 +2011,7 @@ hns3_reset_pre(struct hns3_adapter *hns)
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- rte_atomic16_set(&hns->hw.reset.resetting, 1);
+ __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
hw->reset.stage = RESET_STAGE_DOWN;
ret = hw->reset.ops->stop_service(hns);
gettimeofday(&tv, NULL);
@@ -2098,7 +2098,7 @@ hns3_reset_post(struct hns3_adapter *hns)
/* IMP will wait ready flag before reset */
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
- rte_atomic16_clear(&hns->hw.reset.resetting);
+ __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2223,7 +2223,7 @@ hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level)
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- rte_atomic16_clear(&hns->hw.reset.resetting);
+ __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
hw->reset.stage = RESET_STAGE_NONE;
gettimeofday(&tv, NULL);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 5ac36b3..0badfc9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -3744,7 +3744,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
eth_tx_prep_t prep = NULL;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
+ __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
--
2.7.4

View File

@ -1,31 +0,0 @@
From e5cc58807c8d03554f2c3f0eee3b0b6d6f44278f Mon Sep 17 00:00:00 2001
From: HuangLiming <huangliming5@huawei.com>
Date: Sat, 22 Aug 2020 05:32:47 -0400
Subject: [PATCH] fix fbarray memseg destory error during detach without shared
file
Signed-off-by: HuangLiming <huangliming5@huawei.com>
---
lib/librte_eal/common/eal_common_fbarray.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_fbarray.c b/lib/librte_eal/common/eal_common_fbarray.c
index 116c695..d1aa074 100644
--- a/lib/librte_eal/common/eal_common_fbarray.c
+++ b/lib/librte_eal/common/eal_common_fbarray.c
@@ -1127,9 +1127,9 @@ rte_sec_fbarray_destroy(struct rte_fbarray *arr,
fd = open(path, O_RDONLY);
if (fd < 0) {
- RTE_LOG(ERR, EAL, "Could not open fbarray file: %s\n",
- strerror(errno));
- return -1;
+ RTE_LOG(WARNING, EAL, "Could not open %s: %s, and just skip it\n",
+ path, strerror(errno));
+ return 0;
}
if (flock(fd, LOCK_EX | LOCK_NB)) {
RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
--
2.21.0

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
From 5e554c15982617a89b85aeb71592c20bfa7bdecd Mon Sep 17 00:00:00 2001
From: Renmingshuai <renmingshuai@huawei.com>
Date: Tue, 13 Apr 2021 16:25:43 +0800
Subject: [PATCH] optimize the efficiency of compiling dpdk
---
config/common_base | 5 +++
mk/rte.combinedlib.mk | 10 +++++
mk/rte.lib.mk | 102 +++++++++++++++++++++++++++++++++++++++---
3 files changed, 110 insertions(+), 7 deletions(-)
diff --git a/config/common_base b/config/common_base
index 57b1349..392e6c3 100644
--- a/config/common_base
+++ b/config/common_base
@@ -59,6 +59,11 @@ CONFIG_RTE_ENABLE_LTO=n
#
CONFIG_RTE_BUILD_SHARED_LIB=n
+#
+# Compile to both static library and share library
+#
+CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS=n
+
#
# Use newest code breaking previous ABI
#
diff --git a/mk/rte.combinedlib.mk b/mk/rte.combinedlib.mk
index 9d0f935..1088543 100644
--- a/mk/rte.combinedlib.mk
+++ b/mk/rte.combinedlib.mk
@@ -15,9 +15,16 @@ RTE_LIBNAME := dpdk
COMBINEDLIB := lib$(RTE_LIBNAME)$(EXT)
LIBS := $(filter-out $(COMBINEDLIB), $(sort $(notdir $(wildcard $(RTE_OUTPUT)/lib/*$(EXT)))))
+ifeq ($(CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS),y)
+COMBINEDLIB_SO := lib$(RTE_LIBNAME).so
+LIBS_SO := $(filter-out $(COMBINEDLIB_SO), $(sort $(notdir $(wildcard $(RTE_OUTPUT)/lib/*.so))))
+endif
all: FORCE
$(Q)echo "GROUP ( $(LIBS) )" > $(RTE_OUTPUT)/lib/$(COMBINEDLIB)
+ifeq ($(CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS),y)
+ $(Q)echo "GROUP ( $(LIBS_SO) )" > $(RTE_OUTPUT)/lib/$(COMBINEDLIB_SO)
+endif
#
# Clean all generated files
@@ -25,6 +32,9 @@ all: FORCE
.PHONY: clean
clean:
$(Q)rm -f $(RTE_OUTPUT)/lib/$(COMBINEDLIB)
+ifeq ($(CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS),y)
+ $(Q)rm -f $(RTE_OUTPUT)/lib/$(COMBINEDLIB_SO)
+endif
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index 4516d1c..78f3c27 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -19,13 +19,6 @@ else ifeq ($(LIBABIVER),)
LIBABIVER := 0.$(shell cat $(RTE_SRCDIR)/ABI_VERSION | tr -d '.')
endif
-ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
-LIB := $(patsubst %.a,%.so.$(LIBABIVER),$(LIB))
-ifeq ($(EXTLIB_BUILD),n)
-CPU_LDFLAGS += --version-script=$(SRCDIR)/$(EXPORT_MAP)
-endif
-endif
-
_BUILD = $(LIB)
PREINSTALL = $(SYMLINK-FILES-y)
@@ -34,6 +27,16 @@ _CLEAN = doclean
LDLIBS += $(EXECENV_LDLIBS-y)
+ifeq ($(CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS),y)
+LIB_SO = $(LIB)
+LIB_SO := $(patsubst %.a,%.so.$(LIBABIVER),$(LIB_SO))
+ifeq ($(EXTLIB_BUILD),n)
+CPU_LDFLAGS += --version-script=$(SRCDIR)/$(EXPORT_MAP)
+endif
+_BUILD += $(LIB_SO)
+_INSTALL += $(INSTALL-FILES-y) $(RTE_OUTPUT)/lib/$(LIB_SO)
+endif
+
.PHONY: all
all: install
@@ -74,6 +77,89 @@ ifneq ($(CC_SUPPORTS_Z),false)
NO_UNDEFINED := -z defs
endif
+ifeq ($(CONFIG_RTE_BUILD_BOTH_STATIC_AND_SHARED_LIBS),y)
+O_TO_S = $(LD) -L$(RTE_SDK_BIN)/lib $(_CPU_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -shared $(OBJS-y) $(NO_UNDEFINED) $(LDLIBS) -Wl,-soname,$(LIB_SO) -o $(LIB_SO)
+O_TO_S_STR = $(subst ','\'',$(O_TO_S)) #'# fix syntax highlight
+O_TO_S_DISP = $(if $(V),"$(O_TO_S_STR)"," LD $(@)")
+O_TO_S_DO = @set -e; \
+ echo $(O_TO_S_DISP); \
+ $(O_TO_S) && \
+ echo $(O_TO_S_CMD) > $(call exe2cmd,$(@))
+
+-include .$(LIB_SO).cmd
+
+#
+# Archive objects in .a file if needed
+#
+$(LIB): $(OBJS-y) $(DEP_$(LIB)) FORCE
+ @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+ $(if $(D),\
+ @echo -n "$< -> $@ " ; \
+ echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+ echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_A_STR))) " ; \
+ echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+ echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+ $(if $(or \
+ $(file_missing),\
+ $(call cmdline_changed,$(O_TO_A_STR)),\
+ $(depfile_missing),\
+ $(depfile_newer)),\
+ $(O_TO_A_DO))
+
+$(LIB_SO): $(OBJS-y) $(DEP_$(LIB_SO)) FORCE
+ifeq ($(LIBABIVER),)
+ @echo "Must Specify a $(LIB_SO) ABI version"
+ @false
+endif
+ @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+ $(if $(D),\
+ @echo -n "$< -> $@ " ; \
+ echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+ echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_S_STR))) " ; \
+ echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+ echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+ $(if $(or \
+ $(file_missing),\
+ $(call cmdline_changed,$(O_TO_S_STR)),\
+ $(depfile_missing),\
+ $(depfile_newer)),\
+ $(O_TO_S_DO))
+
+#
+# install lib in $(RTE_OUTPUT)/lib
+#
+$(RTE_OUTPUT)/lib/$(LIB): $(LIB)
+ @echo " INSTALL-LIB $(LIB)"
+ @[ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib
+ cp -f $(LIB) $(RTE_OUTPUT)/lib
+
+$(RTE_OUTPUT)/lib/$(LIB_SO): $(LIB_SO)
+ @echo " INSTALL-LIB $(LIB_SO)"
+ @[ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib
+ cp -f $(LIB_SO) $(RTE_OUTPUT)/lib
+ ln -s -f $< $(shell echo $@ | sed 's/\.so.*/.so/')
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+ $(Q)rm -rf $(LIB) $(LIB_SO) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+ $(CMDS-all) .$(LIB).cmd $(INSTALL-FILES-all) *.pmd.c *.pmd.o
+ $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+else
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+LIB := $(patsubst %.a,%.so.$(LIBABIVER),$(LIB))
+ifeq ($(EXTLIB_BUILD),n)
+CPU_LDFLAGS += --version-script=$(SRCDIR)/$(EXPORT_MAP)
+endif
+endif
+
O_TO_S = $(LD) -L$(RTE_SDK_BIN)/lib $(_CPU_LDFLAGS) $(EXTRA_LDFLAGS) \
-shared $(OBJS-y) $(NO_UNDEFINED) $(LDLIBS) -Wl,-soname,$(LIB) -o $(LIB)
O_TO_S_STR = $(subst ','\'',$(O_TO_S)) #'# fix syntax highlight
@@ -148,6 +234,8 @@ doclean:
$(CMDS-all) .$(LIB).cmd $(INSTALL-FILES-all) *.pmd.c *.pmd.o
$(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+endif
+
include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
--
2.19.1

View File

@ -0,0 +1,45 @@
From 92f474b6b5f954d20b81576549a25ce8b7bc2a2b Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Thu, 14 Jan 2021 21:33:32 +0800
Subject: [PATCH 013/189] net/hns3: fix VF query link status in dev init
Current hns3vf queried link status in dev init stage, but the link
status should be maintained in dev start stage, this patch fix this.
Also, in the dev start stage, we use quick query instead of delayed
query to make sure update the link status soon.
Fixes: a5475d61fa34 ("net/hns3: support VF")
Fixes: 958edf6627d5 ("net/hns3: fix VF link status")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev_vf.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index c126384..ee89505 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1749,7 +1749,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns)
goto err_init_hardware;
}
- hns3vf_request_link_info(hw);
return 0;
err_init_hardware:
@@ -2238,7 +2237,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
hns3_rx_scattered_calc(dev);
hns3_set_rxtx_function(dev);
hns3_mp_req_start_rxtx(dev);
- rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+ hns3vf_service_handler(dev);
hns3vf_restore_filter(dev);
--
2.7.4

View File

@ -0,0 +1,34 @@
From 1f68430b13d5aed1851c97761163b44b38d4f37d Mon Sep 17 00:00:00 2001
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Date: Thu, 14 Jan 2021 21:33:33 +0800
Subject: [PATCH 014/189] net/hns3: use new opcode for clearing hardware
resource
The original command opcode '0x700A' may cause firmware error,
so '0x700A' is deserted, now use '0x700B' to replace it.
Fixes: 223d9eceaeee ("net/hns3: clear residual hardware configurations on init")
Cc: stable@dpdk.org
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 194c3a7..e40293b 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -203,7 +203,7 @@ enum hns3_opcode_type {
HNS3_OPC_FD_COUNTER_OP = 0x1205,
/* Clear hardware state command */
- HNS3_OPC_CLEAR_HW_STATE = 0x700A,
+ HNS3_OPC_CLEAR_HW_STATE = 0x700B,
/* SFP command */
HNS3_OPC_SFP_GET_SPEED = 0x7104,
--
2.7.4

View File

@ -0,0 +1,59 @@
From 7fab993aa57ba5f2e4bce07949de602e1a40daf1 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 14 Jan 2021 21:33:34 +0800
Subject: [PATCH 015/189] net/hns3: fix register length when dumping registers
Currently, the reg length return by HNS3 is the total length of all the
registers. But for upper layer user, the total register length is the
length multiplied by width. This can lead to a waste of memory and print
some invalid information.
This patch corrects the length and width of the register.
Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_regs.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index b2cc599..32597fe 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -104,6 +104,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
uint32_t cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
uint32_t regs_num_32_bit, regs_num_64_bit;
+ uint32_t dfx_reg_lines;
uint32_t len;
int ret;
@@ -117,7 +118,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1;
len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num +
- tqp_intr_lines * hw->num_msi) * REG_LEN_PER_LINE;
+ tqp_intr_lines * hw->num_msi) * REG_NUM_PER_LINE;
if (!hns->is_vf) {
ret = hns3_get_regs_num(hw, &regs_num_32_bit, &regs_num_64_bit);
@@ -126,8 +127,11 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
ret);
return -ENOTSUP;
}
- len += regs_num_32_bit * sizeof(uint32_t) +
- regs_num_64_bit * sizeof(uint64_t);
+ dfx_reg_lines = regs_num_32_bit * sizeof(uint32_t) /
+ REG_LEN_PER_LINE + 1;
+ dfx_reg_lines += regs_num_64_bit * sizeof(uint64_t) /
+ REG_LEN_PER_LINE + 1;
+ len += dfx_reg_lines * REG_NUM_PER_LINE;
}
*length = len;
--
2.7.4

View File

@ -0,0 +1,152 @@
From 3c4b289d438451fa8e3c520bef4b1a32d68f4bea Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 14 Jan 2021 21:33:35 +0800
Subject: [PATCH 016/189] net/hns3: fix data overwriting during register dump
The data pointer has not moved after BAR register dumped. This causes
the later register to overwrite the previous data.
This patch fix the overwriting by move the pointer after every dump
function. And the missing separator between 32-bit register and the
64-bit register is also added to avoid a parsing error.
Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_regs.c | 70 +++++++++++++++++++++++++-------------------
1 file changed, 40 insertions(+), 30 deletions(-)
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 32597fe..775e096 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -252,63 +252,68 @@ hns3_get_64_bit_regs(struct hns3_hw *hw, uint32_t regs_num, void *data)
return 0;
}
-static void
+static int
+hns3_insert_reg_separator(int reg_num, uint32_t *data)
+{
+ int separator_num;
+ int i;
+
+ separator_num = MAX_SEPARATE_NUM - reg_num % REG_NUM_PER_LINE;
+ for (i = 0; i < separator_num; i++)
+ *data++ = SEPARATOR_VALUE;
+ return separator_num;
+}
+
+static int
hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ uint32_t *origin_data_ptr = data;
uint32_t reg_offset;
- int separator_num;
- int reg_um;
+ int reg_num;
int i, j;
/* fetching per-PF registers values from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addrs) / sizeof(uint32_t);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t);
+ for (i = 0; i < reg_num; i++)
*data++ = hns3_read_dev(hw, cmdq_reg_addrs[i]);
- for (i = 0; i < separator_num; i++)
- *data++ = SEPARATOR_VALUE;
+ data += hns3_insert_reg_separator(reg_num, data);
if (hns->is_vf)
- reg_um = sizeof(common_vf_reg_addrs) / sizeof(uint32_t);
+ reg_num = sizeof(common_vf_reg_addrs) / sizeof(uint32_t);
else
- reg_um = sizeof(common_reg_addrs) / sizeof(uint32_t);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = sizeof(common_reg_addrs) / sizeof(uint32_t);
+ for (i = 0; i < reg_num; i++)
if (hns->is_vf)
*data++ = hns3_read_dev(hw, common_vf_reg_addrs[i]);
else
*data++ = hns3_read_dev(hw, common_reg_addrs[i]);
- for (i = 0; i < separator_num; i++)
- *data++ = SEPARATOR_VALUE;
+ data += hns3_insert_reg_separator(reg_num, data);
- reg_um = sizeof(ring_reg_addrs) / sizeof(uint32_t);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = sizeof(ring_reg_addrs) / sizeof(uint32_t);
for (j = 0; j < hw->tqps_num; j++) {
reg_offset = hns3_get_tqp_reg_offset(j);
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*data++ = hns3_read_dev(hw,
ring_reg_addrs[i] + reg_offset);
- for (i = 0; i < separator_num; i++)
- *data++ = SEPARATOR_VALUE;
+ data += hns3_insert_reg_separator(reg_num, data);
}
- reg_um = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
for (j = 0; j < hw->num_msi; j++) {
reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
- for (i = 0; i < reg_um; i++)
- *data++ = hns3_read_dev(hw,
- tqp_intr_reg_addrs[i] +
+ for (i = 0; i < reg_num; i++)
+ *data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
reg_offset);
- for (i = 0; i < separator_num; i++)
- *data++ = SEPARATOR_VALUE;
+ data += hns3_insert_reg_separator(reg_num, data);
}
+ return data - origin_data_ptr;
}
int
hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
{
+#define HNS3_64_BIT_REG_SIZE (sizeof(uint64_t) / sizeof(uint32_t))
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
uint32_t regs_num_32_bit;
@@ -338,7 +343,7 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
return -ENOTSUP;
/* fetching per-PF registers values from PF PCIe register space */
- hns3_direct_access_regs(hw, data);
+ data += hns3_direct_access_regs(hw, data);
if (hns->is_vf)
return 0;
@@ -355,11 +360,16 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
hns3_err(hw, "Get 32 bit register failed, ret = %d", ret);
return ret;
}
-
data += regs_num_32_bit;
+ data += hns3_insert_reg_separator(regs_num_32_bit, data);
+
ret = hns3_get_64_bit_regs(hw, regs_num_64_bit, data);
- if (ret)
+ if (ret) {
hns3_err(hw, "Get 64 bit register failed, ret = %d", ret);
-
+ return ret;
+ }
+ data += regs_num_64_bit * HNS3_64_BIT_REG_SIZE;
+ data += hns3_insert_reg_separator(regs_num_64_bit *
+ HNS3_64_BIT_REG_SIZE, data);
return ret;
}
--
2.7.4

View File

@ -0,0 +1,38 @@
From a69abf12f46295044e4e59b60e49e73bc66afe10 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 14 Jan 2021 21:33:36 +0800
Subject: [PATCH 017/189] net/hns3: fix dump register out of range
Currently, when dump the queue interrupt registers, the number of
registers that should be dumped is calculated from num_msi. But the
value of num_msi includes the number of misc interrupts. So, for some
hardware version, like kupeng930, it will lead to an illegal access.
This patch replace num_msi with intr_tqps_num which indicate the
number of interrupts used by the tqps.
Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_regs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 775e096..f2cb465 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -300,7 +300,7 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
}
reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
- for (j = 0; j < hw->num_msi; j++) {
+ for (j = 0; j < hw->intr_tqps_num; j++) {
reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
for (i = 0; i < reg_num; i++)
*data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
--
2.7.4

View File

@ -0,0 +1,74 @@
From 3405d3daec40b258341eeccc5e07c0a9cfd29e6e Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Thu, 14 Jan 2021 21:33:37 +0800
Subject: [PATCH 018/189] net/hns3: remove unused assignment for RSS key
The default RSS key does not need to be configured repeatedly
when call hns3_dev_configure function with the NULL RSS key
because the default RSS key has been configured when the PMD
driver run hns3_do_start function with starting device.
Besides, it will not overwrite the initialized key if
rte_eth_dev_configure API will be called directly and RSS key is NULL
after init PMD driver.
Therefore, the assignment for RSS key in hns3_dev_configure
function is unnecessary.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 6 ------
drivers/net/hns3/hns3_ethdev_vf.c | 6 ------
2 files changed, 12 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 2bc28ef..449d967 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2316,7 +2316,6 @@ hns3_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
struct hns3_hw *hw = &hns->hw;
- struct hns3_rss_conf *rss_cfg = &hw->rss_info;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
struct rte_eth_rss_conf rss_conf;
@@ -2363,11 +2362,6 @@ hns3_dev_configure(struct rte_eth_dev *dev)
conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
rss_conf = conf->rx_adv_conf.rss_conf;
hw->rss_dis_flag = false;
- if (rss_conf.rss_key == NULL) {
- rss_conf.rss_key = rss_cfg->key;
- rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
- }
-
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
if (ret)
goto cfg_err;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index ee89505..bb4ec6b 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -773,7 +773,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_rss_conf *rss_cfg = &hw->rss_info;
struct rte_eth_conf *conf = &dev->data->dev_conf;
enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
@@ -816,11 +815,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
hw->rss_dis_flag = false;
rss_conf = conf->rx_adv_conf.rss_conf;
- if (rss_conf.rss_key == NULL) {
- rss_conf.rss_key = rss_cfg->key;
- rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
- }
-
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
if (ret)
goto cfg_err;
--
2.7.4

View File

@ -0,0 +1,704 @@
From e5d1fe93d832492bd12a0a01c37d2e326d2701f4 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Fri, 22 Jan 2021 18:18:39 +0800
Subject: [PATCH 019/189] net/hns3: encapsulate DFX stats in datapath
pkt_len_errors and l2_errors in Rx datapath indicate that driver
needs to discard received packets. And driver does not discard
packets for l3/l4/ol3/ol4_csum_errors in Rx datapath and others
stats in Tx datapath. Therefore, it is necessary for improving
code readability and maintainability to encapsulate error stats
and dfx stats.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 30 ++---
drivers/net/hns3/hns3_rxtx.h | 134 ++++++++++---------
drivers/net/hns3/hns3_rxtx_vec_neon.h | 2 +-
drivers/net/hns3/hns3_stats.c | 243 ++++++++++++++++++++++------------
drivers/net/hns3/hns3_stats.h | 9 +-
5 files changed, 251 insertions(+), 167 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 0badfc9..3d5f74f 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1792,12 +1792,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
- rxq->l2_errors = 0;
- rxq->pkt_len_errors = 0;
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
+ memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
+ memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
@@ -2622,12 +2618,8 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
+ memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
@@ -3350,7 +3342,7 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
/* Fill in tunneling parameters if necessary */
if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
- txq->unsupported_tunnel_pkt_cnt++;
+ txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
return -EINVAL;
}
@@ -3380,17 +3372,17 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
* driver support, the packet will be ignored.
*/
if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
- txq->over_length_pkt_cnt++;
+ txq->dfx_stats.over_length_pkt_cnt++;
return -EINVAL;
}
max_non_tso_bd_num = txq->max_non_tso_bd_num;
if (unlikely(nb_buf > max_non_tso_bd_num)) {
- txq->exceed_limit_bd_pkt_cnt++;
+ txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
max_non_tso_bd_num);
if (ret) {
- txq->exceed_limit_bd_reassem_fail++;
+ txq->dfx_stats.exceed_limit_bd_reassem_fail++;
return ret;
}
*m_seg = new_pkt;
@@ -3528,7 +3520,7 @@ hns3_xmit_pkts_simple(void *tx_queue,
nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_pkts == 0)) {
if (txq->tx_bd_ready == 0)
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
@@ -3580,7 +3572,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
if (nb_tx == 0)
return 0;
@@ -3601,7 +3593,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
if (appended == NULL) {
- txq->pkt_padding_fail_cnt++;
+ txq->dfx_stats.pkt_padding_fail_cnt++;
break;
}
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 6538848..8a0c981 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -266,6 +266,18 @@ struct hns3_entry {
struct rte_mbuf *mbuf;
};
+struct hns3_rx_dfx_stats {
+ uint64_t l3_csum_errors;
+ uint64_t l4_csum_errors;
+ uint64_t ol3_csum_errors;
+ uint64_t ol4_csum_errors;
+};
+
+struct hns3_rx_bd_errors_stats {
+ uint64_t l2_errors;
+ uint64_t pkt_len_errors;
+};
+
struct hns3_rx_queue {
void *io_base;
volatile void *io_head_reg;
@@ -312,12 +324,10 @@ struct hns3_rx_queue {
bool pvid_sw_discard_en;
bool enabled; /* indicate if Rx queue has been enabled */
- uint64_t l2_errors;
- uint64_t pkt_len_errors;
- uint64_t l3_csum_errors;
- uint64_t l4_csum_errors;
- uint64_t ol3_csum_errors;
- uint64_t ol4_csum_errors;
+ /* DFX statistics that driver does not need to discard packets */
+ struct hns3_rx_dfx_stats dfx_stats;
+ /* Error statistics that driver needs to discard packets */
+ struct hns3_rx_bd_errors_stats err_stats;
struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
uint16_t bulk_mbuf_num;
@@ -328,6 +338,57 @@ struct hns3_rx_queue {
struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
};
+/*
+ * The following items are used for the abnormal errors statistics in
+ * the Tx datapath. When upper level application calls the
+ * rte_eth_tx_burst API function to send multiple packets at a time with
+ * burst mode based on hns3 network engine, there are some abnormal
+ * conditions that cause the driver to fail to operate the hardware to
+ * send packets correctly.
+ * Note: When using burst mode to call the rte_eth_tx_burst API function
+ * to send multiple packets at a time. When the first abnormal error is
+ * detected, add one to the relevant error statistics item, and then
+ * exit the loop of sending multiple packets of the function. That is to
+ * say, even if there are multiple packets in which abnormal errors may
+ * be detected in the burst, the relevant error statistics in the driver
+ * will only be increased by one.
+ * The detail description of the Tx abnormal errors statistic items as
+ * below:
+ * - over_length_pkt_cnt
+ * Total number of greater than HNS3_MAX_FRAME_LEN the driver
+ * supported.
+ *
+ * - exceed_limit_bd_pkt_cnt
+ * Total number of exceeding the hardware limited bd which process
+ * a packet needed bd numbers.
+ *
+ * - exceed_limit_bd_reassem_fail
+ * Total number of exceeding the hardware limited bd fail which
+ * process a packet needed bd numbers and reassemble fail.
+ *
+ * - unsupported_tunnel_pkt_cnt
+ * Total number of unsupported tunnel packet. The unsupported tunnel
+ * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
+ * with MPLS-in-UDP RFC 7510 header.
+ *
+ * - queue_full_cnt
+ * Total count which the available bd numbers in current bd queue is
+ * less than the bd numbers with the pkt process needed.
+ *
+ * - pkt_padding_fail_cnt
+ * Total count which the packet length is less than minimum packet
+ * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
+ * hardware in Tx direction and fail to be appended with 0.
+ */
+struct hns3_tx_dfx_stats {
+ uint64_t over_length_pkt_cnt;
+ uint64_t exceed_limit_bd_pkt_cnt;
+ uint64_t exceed_limit_bd_reassem_fail;
+ uint64_t unsupported_tunnel_pkt_cnt;
+ uint64_t queue_full_cnt;
+ uint64_t pkt_padding_fail_cnt;
+};
+
struct hns3_tx_queue {
void *io_base;
volatile void *io_tail_reg;
@@ -411,54 +472,7 @@ struct hns3_tx_queue {
bool pvid_sw_shift_en;
bool enabled; /* indicate if Tx queue has been enabled */
- /*
- * The following items are used for the abnormal errors statistics in
- * the Tx datapath. When upper level application calls the
- * rte_eth_tx_burst API function to send multiple packets at a time with
- * burst mode based on hns3 network engine, there are some abnormal
- * conditions that cause the driver to fail to operate the hardware to
- * send packets correctly.
- * Note: When using burst mode to call the rte_eth_tx_burst API function
- * to send multiple packets at a time. When the first abnormal error is
- * detected, add one to the relevant error statistics item, and then
- * exit the loop of sending multiple packets of the function. That is to
- * say, even if there are multiple packets in which abnormal errors may
- * be detected in the burst, the relevant error statistics in the driver
- * will only be increased by one.
- * The detail description of the Tx abnormal errors statistic items as
- * below:
- * - over_length_pkt_cnt
- * Total number of greater than HNS3_MAX_FRAME_LEN the driver
- * supported.
- *
- * - exceed_limit_bd_pkt_cnt
- * Total number of exceeding the hardware limited bd which process
- * a packet needed bd numbers.
- *
- * - exceed_limit_bd_reassem_fail
- * Total number of exceeding the hardware limited bd fail which
- * process a packet needed bd numbers and reassemble fail.
- *
- * - unsupported_tunnel_pkt_cnt
- * Total number of unsupported tunnel packet. The unsupported tunnel
- * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
- * with MPLS-in-UDP RFC 7510 header.
- *
- * - queue_full_cnt
- * Total count which the available bd numbers in current bd queue is
- * less than the bd numbers with the pkt process needed.
- *
- * - pkt_padding_fail_cnt
- * Total count which the packet length is less than minimum packet
- * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
- * hardware in Tx direction and fail to be appended with 0.
- */
- uint64_t over_length_pkt_cnt;
- uint64_t exceed_limit_bd_pkt_cnt;
- uint64_t exceed_limit_bd_reassem_fail;
- uint64_t unsupported_tunnel_pkt_cnt;
- uint64_t queue_full_cnt;
- uint64_t pkt_padding_fail_cnt;
+ struct hns3_tx_dfx_stats dfx_stats;
};
#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
@@ -511,9 +525,9 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
if (l234_info & BIT(HNS3_RXD_L2E_B))
- rxq->l2_errors++;
+ rxq->err_stats.l2_errors++;
else
- rxq->pkt_len_errors++;
+ rxq->err_stats.pkt_len_errors++;
return -EINVAL;
}
@@ -525,24 +539,24 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- rxq->l3_csum_errors++;
+ rxq->dfx_stats.l3_csum_errors++;
tmp |= HNS3_L3_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- rxq->l4_csum_errors++;
+ rxq->dfx_stats.l4_csum_errors++;
tmp |= HNS3_L4_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
- rxq->ol3_csum_errors++;
+ rxq->dfx_stats.ol3_csum_errors++;
tmp |= HNS3_OUTER_L3_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
- rxq->ol4_csum_errors++;
+ rxq->dfx_stats.ol4_csum_errors++;
tmp |= HNS3_OUTER_L4_CKSUM_ERR;
}
}
diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h
index 54addbf..a693b4b 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_neon.h
+++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h
@@ -42,7 +42,7 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
nb_commit = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_commit == 0)) {
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
nb_tx = nb_commit;
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 62a712b..419d7e2 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -262,34 +262,38 @@ static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
/* The statistic of errors in Rx BD */
static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
- {"RX_PKT_LEN_ERRORS",
+ {"PKT_LEN_ERRORS",
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
- {"L2_RX_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)},
- {"RX_L3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)},
- {"RX_L4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)},
- {"RX_OL3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)},
- {"RX_OL4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)}
+ {"L2_ERRORS",
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
};
-/* The statistic of the Tx errors */
-static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = {
- {"TX_OVER_LENGTH_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
- {"TX_EXCEED_LIMITED_BD_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
- {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
- {"TX_UNSUPPORTED_TUNNEL_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
- {"TX_QUEUE_FULL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)},
- {"TX_SHORT_PKT_PAD_FAIL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
+/* The dfx statistic in Rx datapath */
+static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
+ {"L3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
+ {"L4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
+ {"OL3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
+ {"OL4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
+};
+
+/* The dfx statistic in Tx datapath */
+static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
+ {"OVER_LENGTH_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
+ {"UNSUPPORTED_TUNNEL_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
+ {"QUEUE_FULL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
+ {"SHORT_PKT_PAD_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
};
/* The statistic of rx queue */
@@ -314,8 +318,11 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \
- sizeof(hns3_tx_errors_strings[0]))
+#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
+ sizeof(hns3_rxq_dfx_stats_strings[0]))
+
+#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
+ sizeof(hns3_txq_dfx_stats_strings[0]))
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
sizeof(hns3_rx_queue_strings[0]))
@@ -519,7 +526,8 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
for (i = 0; i != num; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
- cnt = rxq->l2_errors + rxq->pkt_len_errors;
+ cnt = rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
rte_stats->q_errors[i] = cnt;
rte_stats->q_ipackets[i] =
stats->rcb_rx_ring_pktnum[i] - cnt;
@@ -584,11 +592,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
* Clear soft stats of rx error packet which will be dropped
* in driver.
*/
- for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
- rxq->pkt_len_errors = 0;
- rxq->l2_errors = 0;
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
}
}
@@ -621,21 +629,24 @@ static int
hns3_xstats_calc_num(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
- int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS;
- int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS;
- int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS;
- int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS;
+ int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS;
+ int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS;
+ int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS;
+ int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf)
- return bderr_stats + tx_err_stats + rx_queue_stats +
- tx_queue_stats + HNS3_NUM_RESET_XSTATS;
+ return bderr_stats + rx_dfx_stats + tx_dfx_stats +
+ rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS;
else
- return bderr_stats + tx_err_stats + rx_queue_stats +
- tx_queue_stats + HNS3_FIX_NUM_STATS;
+ return bderr_stats + rx_dfx_stats + tx_dfx_stats +
+ rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS;
}
static void
-hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
int *count)
{
struct hns3_adapter *hns = dev->data->dev_private;
@@ -683,6 +694,63 @@ hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
}
}
+static void
+hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_rx_dfx_stats *dfx_stats;
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ dfx_stats = &rxq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_rxq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_tx_dfx_stats *dfx_stats;
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ dfx_stats = &txq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_txq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ hns3_rxq_dfx_stats_get(dev, xstats, count);
+ hns3_txq_dfx_stats_get(dev, xstats, count);
+}
/*
* Retrieve extended(tqp | Mac) statistics of an Ethernet device.
* @param dev
@@ -705,8 +773,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct hns3_hw *hw = &hns->hw;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
+ struct hns3_rx_bd_errors_stats *rx_err_stats;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint16_t i, j;
char *addr;
int count;
@@ -758,26 +826,49 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
rxq = dev->data->rx_queues[j];
- addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
+ if (rxq) {
+ rx_err_stats = &rxq->err_stats;
+ addr = (char *)rx_err_stats +
+ hns3_rx_bd_error_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
+ xstats[count].id = count;
+ count++;
+ }
}
}
- /* Get the Tx errors stats */
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
- txq = dev->data->tx_queues[j];
- addr = (char *)txq + hns3_tx_errors_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
+ hns3_tqp_dfx_stats_get(dev, xstats, &count);
+ hns3_queue_stats_get(dev, xstats, &count);
+
+ return count;
+}
+
+static void
+hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ uint16_t i, j;
+
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "rx_q%u_%s", j,
+ hns3_rxq_dfx_stats_strings[i].name);
+ (*count)++;
}
}
- hns3_get_queue_stats(dev, xstats, &count);
- return count;
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "tx_q%u_%s", j,
+ hns3_txq_dfx_stats_strings[i].name);
+ (*count)++;
+ }
+ }
}
/*
@@ -845,27 +936,19 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "rx_q%u%s", j,
+ "rx_q%u_%s", j,
hns3_rx_bd_error_strings[i].name);
count++;
}
}
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "tx_q%u%s", j,
- hns3_tx_errors_strings[i].name);
- count++;
- }
- }
+ hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "rx_q%u%s", j, hns3_rx_queue_strings[i].name);
+ "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
count++;
}
}
@@ -874,7 +957,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "tx_q%u%s", j, hns3_tx_queue_strings[i].name);
+ "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
count++;
}
}
@@ -1043,30 +1126,22 @@ hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
{
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
- int i;
+ uint16_t i;
/* Clear Rx dfx stats */
- for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (rxq) {
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
- }
+ if (rxq)
+ memset(&rxq->dfx_stats, 0,
+ sizeof(struct hns3_rx_dfx_stats));
}
/* Clear Tx dfx stats */
- for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (txq) {
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
- }
+ if (txq)
+ memset(&txq->dfx_stats, 0,
+ sizeof(struct hns3_tx_dfx_stats));
}
}
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 9fcd5f9..12842cd 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -127,10 +127,13 @@ struct hns3_reset_stats;
(offsetof(struct hns3_reset_stats, f))
#define HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(f) \
- (offsetof(struct hns3_rx_queue, f))
+ (offsetof(struct hns3_rx_bd_errors_stats, f))
-#define HNS3_TX_ERROR_STATS_FIELD_OFFSET(f) \
- (offsetof(struct hns3_tx_queue, f))
+#define HNS3_RXQ_DFX_STATS_FIELD_OFFSET(f) \
+ (offsetof(struct hns3_rx_dfx_stats, f))
+
+#define HNS3_TXQ_DFX_STATS_FIELD_OFFSET(f) \
+ (offsetof(struct hns3_tx_dfx_stats, f))
int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
--
2.7.4

View File

@ -0,0 +1,505 @@
From e658ac6ea47f304cb8b0c9e8e100dd1016bcac0b Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Fri, 22 Jan 2021 18:18:40 +0800
Subject: [PATCH 020/189] net/hns3: move queue stats to xstats
One of the hot discussions in community recently was moving queue stats
to xstats. In this solution, a temporary
'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' device flag is created to implement
the smooth switch. And the first half of this work has been completed in
the ethdev framework. Now driver needs to remove the flag from the
driver initialization process and does the rest of work.
For better readability and reasonability, per-queue stats also should be
cleared when rte_eth_stats is cleared. Otherwise, the sum of one item in
per-queue stats may be greater than corresponding item in rte_eth_stats.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 2 -
drivers/net/hns3/hns3_ethdev_vf.c | 2 -
drivers/net/hns3/hns3_rxtx.c | 2 +
drivers/net/hns3/hns3_rxtx.h | 13 ++
drivers/net/hns3/hns3_stats.c | 241 +++++++++++++++++++++++++++++++-------
drivers/net/hns3/hns3_stats.h | 6 +
6 files changed, 221 insertions(+), 45 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 449d967..7c51e83 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6148,8 +6148,6 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
-
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index bb4ec6b..37135d7 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2746,8 +2746,6 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
-
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 3d5f74f..30f1e06 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1792,6 +1792,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
+ memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
@@ -2618,6 +2619,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
+ memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
rte_spinlock_lock(&hw->lock);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 8a0c981..331b507 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -266,6 +266,12 @@ struct hns3_entry {
struct rte_mbuf *mbuf;
};
+struct hns3_rx_basic_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
struct hns3_rx_dfx_stats {
uint64_t l3_csum_errors;
uint64_t l4_csum_errors;
@@ -324,6 +330,7 @@ struct hns3_rx_queue {
bool pvid_sw_discard_en;
bool enabled; /* indicate if Rx queue has been enabled */
+ struct hns3_rx_basic_stats basic_stats;
/* DFX statistics that driver does not need to discard packets */
struct hns3_rx_dfx_stats dfx_stats;
/* Error statistics that driver needs to discard packets */
@@ -338,6 +345,11 @@ struct hns3_rx_queue {
struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
};
+struct hns3_tx_basic_stats {
+ uint64_t packets;
+ uint64_t bytes;
+};
+
/*
* The following items are used for the abnormal errors statistics in
* the Tx datapath. When upper level application calls the
@@ -472,6 +484,7 @@ struct hns3_tx_queue {
bool pvid_sw_shift_en;
bool enabled; /* indicate if Tx queue has been enabled */
+ struct hns3_tx_basic_stats basic_stats;
struct hns3_tx_dfx_stats dfx_stats;
};
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 419d7e2..3ba09e2 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -11,6 +11,24 @@
#include "hns3_logs.h"
#include "hns3_regs.h"
+/* The statistics of the per-rxq basic stats */
+static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
+ {"packets",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
+ {"bytes",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
+ {"errors",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
+};
+
+/* The statistics of the per-txq basic stats */
+static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
+ {"packets",
+ HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
+ {"bytes",
+ HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
+};
+
/* MAC statistics */
static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
{"mac_tx_mac_pause_num",
@@ -330,6 +348,12 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
#define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
sizeof(hns3_tx_queue_strings[0]))
+#define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
+ sizeof(hns3_rxq_basic_stats_strings[0]))
+
+#define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
+ sizeof(hns3_txq_basic_stats_strings[0]))
+
#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
HNS3_NUM_RESET_XSTATS)
@@ -508,9 +532,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
struct hns3_hw *hw = &hns->hw;
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint64_t cnt;
- uint64_t num;
uint16_t i;
int ret;
@@ -522,25 +544,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
}
/* Get the error stats of received packets */
- num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues);
- for (i = 0; i != num; ++i) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
cnt = rxq->err_stats.l2_errors +
rxq->err_stats.pkt_len_errors;
- rte_stats->q_errors[i] = cnt;
- rte_stats->q_ipackets[i] =
- stats->rcb_rx_ring_pktnum[i] - cnt;
rte_stats->ierrors += cnt;
}
}
- /* Get the error stats of transmitted packets */
- num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues);
- for (i = 0; i < num; i++) {
- txq = eth_dev->data->tx_queues[i];
- if (txq)
- rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i];
- }
rte_stats->oerrors = 0;
rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd -
@@ -600,6 +611,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
}
}
+ /*
+ * 'packets' in hns3_tx_basic_stats and hns3_rx_basic_stats come
+ * from hw->tqp_stats. And clearing tqp stats is like clearing
+ * their source.
+ */
hns3_tqp_stats_clear(hw);
return 0;
@@ -628,21 +644,26 @@ hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
static int
hns3_xstats_calc_num(struct rte_eth_dev *dev)
{
+#define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
+ HNS3_NUM_RXQ_DFX_XSTATS + \
+ HNS3_NUM_RX_QUEUE_STATS + \
+ HNS3_NUM_RXQ_BASIC_STATS)
+#define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
+ HNS3_NUM_TX_QUEUE_STATS + \
+ HNS3_NUM_TXQ_BASIC_STATS)
+
struct hns3_adapter *hns = dev->data->dev_private;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
- int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS;
- int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS;
- int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS;
- int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS;
- int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS;
+ int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
+ int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
if (hns->is_vf)
- return bderr_stats + rx_dfx_stats + tx_dfx_stats +
- rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS;
+ return rx_comm_stats_num + tx_comm_stats_num +
+ HNS3_NUM_RESET_XSTATS;
else
- return bderr_stats + rx_dfx_stats + tx_dfx_stats +
- rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS;
+ return rx_comm_stats_num + tx_comm_stats_num +
+ HNS3_FIX_NUM_STATS;
}
static void
@@ -751,6 +772,118 @@ hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
hns3_rxq_dfx_stats_get(dev, xstats, count);
hns3_txq_dfx_stats_get(dev, xstats, count);
}
+
+static void
+hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+ struct hns3_rx_basic_stats *rxq_stats;
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ rxq_stats = &rxq->basic_stats;
+ rxq_stats->errors = rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
+ rxq_stats->packets = stats->rcb_rx_ring_pktnum[i] -
+ rxq_stats->errors;
+ rxq_stats->bytes = 0;
+ for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
+ val = (char *)rxq_stats +
+ hns3_rxq_basic_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+ struct hns3_tx_basic_stats *txq_stats;
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ txq_stats = &txq->basic_stats;
+ txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
+ txq_stats->bytes = 0;
+ for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
+ val = (char *)txq_stats +
+ hns3_txq_basic_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static int
+hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ /* Update tqp stats by read register */
+ ret = hns3_update_tqp_stats(hw);
+ if (ret) {
+ hns3_err(hw, "Update tqp stats fail, ret = %d.", ret);
+ return ret;
+ }
+
+ hns3_rxq_basic_stats_get(dev, xstats, count);
+ hns3_txq_basic_stats_get(dev, xstats, count);
+
+ return 0;
+}
+
+/*
+ * The function is only called by hns3_dev_xstats_reset to clear
+ * basic stats of per-queue. TQP stats are all cleared in hns3_stats_reset
+ * which is called before this function.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev)
+{
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq)
+ memset(&rxq->basic_stats, 0,
+ sizeof(struct hns3_rx_basic_stats));
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq)
+ memset(&txq->basic_stats, 0,
+ sizeof(struct hns3_tx_basic_stats));
+ }
+}
+
/*
* Retrieve extended(tqp | Mac) statistics of an Ethernet device.
* @param dev
@@ -789,6 +922,10 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count = 0;
+ ret = hns3_tqp_basic_stats_get(dev, xstats, &count);
+ if (ret < 0)
+ return ret;
+
if (!hns->is_vf) {
/* Update Mac stats */
ret = hns3_query_update_mac_stats(dev);
@@ -844,28 +981,55 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
static void
+hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "rx_q%u_%s", i,
+ hns3_rxq_basic_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "tx_q%u_%s", i,
+ hns3_txq_basic_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+}
+
+static void
hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
uint32_t *count)
{
uint16_t i, j;
- for (j = 0; j < dev->data->nb_rx_queues; j++) {
- for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
snprintf(xstats_names[*count].name,
sizeof(xstats_names[*count].name),
- "rx_q%u_%s", j,
- hns3_rxq_dfx_stats_strings[i].name);
+ "rx_q%u_%s", i,
+ hns3_rxq_dfx_stats_strings[j].name);
(*count)++;
}
}
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
snprintf(xstats_names[*count].name,
sizeof(xstats_names[*count].name),
- "tx_q%u_%s", j,
- hns3_txq_dfx_stats_strings[i].name);
+ "tx_q%u_%s", i,
+ hns3_txq_dfx_stats_strings[j].name);
(*count)++;
}
}
@@ -908,6 +1072,8 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
if (xstats_names == NULL)
return cnt_stats;
+ hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
+
/* Note: size limited checked in rte_eth_xstats_get_names() */
if (!hns->is_vf) {
/* Get MAC name from hw->hw_xstats.mac_stats struct */
@@ -999,7 +1165,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint32_t count_value;
uint64_t len;
uint32_t i;
- int ret;
if (ids == NULL && values == NULL)
return cnt_stats;
@@ -1008,13 +1173,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
if (size < cnt_stats)
return cnt_stats;
- /* Update tqp stats by read register */
- ret = hns3_update_tqp_stats(hw);
- if (ret) {
- hns3_err(hw, "Update tqp stats fail : %d", ret);
- return ret;
- }
-
len = cnt_stats * sizeof(struct rte_eth_xstat);
values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
if (values_copy == NULL) {
@@ -1157,11 +1315,12 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
+ hns3_tqp_basic_stats_clear(dev);
+ hns3_tqp_dfx_stats_clear(dev);
+
/* Clear reset stats */
memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
- hns3_tqp_dfx_stats_clear(dev);
-
if (hns->is_vf)
return 0;
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 12842cd..d213be5 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -135,6 +135,12 @@ struct hns3_reset_stats;
#define HNS3_TXQ_DFX_STATS_FIELD_OFFSET(f) \
(offsetof(struct hns3_tx_dfx_stats, f))
+#define HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(f) \
+ (offsetof(struct hns3_rx_basic_stats, f))
+
+#define HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(f) \
+ (offsetof(struct hns3_tx_basic_stats, f))
+
int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int n);
--
2.7.4

View File

@ -0,0 +1,97 @@
From 17327e444434b1062063a03060d6cc5a1876aa3f Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:41 +0800
Subject: [PATCH 021/189] net/hns3: refactor converting descriptor error
Use errno array instead of switch-case for refactor
the hns3_cmd_convert_err_code function.
Besides, we add a type for ROH(RDMA Over HCCS) check
cmdq return error in Kunpeng930 NIC hardware.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 54 ++++++++++++++++++++++-----------------------
drivers/net/hns3/hns3_cmd.h | 1 +
2 files changed, 27 insertions(+), 28 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index f58f4f7..4c301cb 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -247,34 +247,32 @@ hns3_is_special_opcode(uint16_t opcode)
static int
hns3_cmd_convert_err_code(uint16_t desc_ret)
{
- switch (desc_ret) {
- case HNS3_CMD_EXEC_SUCCESS:
- return 0;
- case HNS3_CMD_NO_AUTH:
- return -EPERM;
- case HNS3_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HNS3_CMD_QUEUE_FULL:
- return -EXFULL;
- case HNS3_CMD_NEXT_ERR:
- return -ENOSR;
- case HNS3_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HNS3_CMD_PARA_ERR:
- return -EINVAL;
- case HNS3_CMD_RESULT_ERR:
- return -ERANGE;
- case HNS3_CMD_TIMEOUT:
- return -ETIME;
- case HNS3_CMD_HILINK_ERR:
- return -ENOLINK;
- case HNS3_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HNS3_CMD_INVALID:
- return -EBADR;
- default:
- return -EREMOTEIO;
- }
+ static const struct {
+ uint16_t imp_errcode;
+ int linux_errcode;
+ } hns3_cmdq_status[] = {
+ {HNS3_CMD_EXEC_SUCCESS, 0},
+ {HNS3_CMD_NO_AUTH, -EPERM},
+ {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HNS3_CMD_QUEUE_FULL, -EXFULL},
+ {HNS3_CMD_NEXT_ERR, -ENOSR},
+ {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
+ {HNS3_CMD_PARA_ERR, -EINVAL},
+ {HNS3_CMD_RESULT_ERR, -ERANGE},
+ {HNS3_CMD_TIMEOUT, -ETIME},
+ {HNS3_CMD_HILINK_ERR, -ENOLINK},
+ {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HNS3_CMD_INVALID, -EBADR},
+ {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
+ };
+
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
+ if (hns3_cmdq_status[i].imp_errcode == desc_ret)
+ return hns3_cmdq_status[i].linux_errcode;
+
+ return -EREMOTEIO;
}
static int
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index e40293b..6152f6e 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -52,6 +52,7 @@ enum hns3_cmd_return_status {
HNS3_CMD_HILINK_ERR = 9,
HNS3_CMD_QUEUE_ILLEGAL = 10,
HNS3_CMD_INVALID = 11,
+ HNS3_CMD_ROH_CHECK_FAIL = 12
};
enum hns3_cmd_status {
--
2.7.4

View File

@ -0,0 +1,134 @@
From 2d64078bc27c055bfeb486230ea64eebe1cb65cb Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:42 +0800
Subject: [PATCH 022/189] net/hns3: refactor flow checks into own functions
Here moves some judgement conditions to a separated function
for parsing IPv4 hdr and TCP hdr in hns3_parse_normal function.
Also, move the check of the selected input tuple of RSS to a
separated functions named hns3_rss_input_tuple_supported
in order to enhance scalability and complexity.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 69 ++++++++++++++++++++++++++++++--------------
1 file changed, 48 insertions(+), 21 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index f303df4..889fa2f 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -525,6 +525,17 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
return 0;
}
+static bool
+hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+ if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum)
+ return false;
+
+ return true;
+}
+
static int
hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
struct rte_flow_error *error)
@@ -546,11 +557,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
ipv4_mask = item->mask;
- if (ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.time_to_live ||
- ipv4_mask->hdr.hdr_checksum) {
+ if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
@@ -648,6 +655,18 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
return 0;
}
+static bool
+hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
+{
+ if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp)
+ return false;
+
+ return true;
+}
+
static int
hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
struct rte_flow_error *error)
@@ -670,10 +689,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
tcp_mask = item->mask;
- if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
+ if (!hns3_check_tcp_mask_supported(tcp_mask)) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
@@ -1328,6 +1344,28 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out,
return 0;
}
+static bool
+hns3_rss_input_tuple_supported(struct hns3_hw *hw,
+ const struct rte_flow_action_rss *rss)
+{
+ /*
+ * For IP packet, it is not supported to use src/dst port fields to RSS
+ * hash for the following packet types.
+ * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
+ * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
+ * port fields to RSS hash for IPV6 SCTP packet type. However, the
+ * Kunpeng930 and future kunpeng series support to use src/dst port
+ * fields to RSS hash for IPv6 SCTP packet type.
+ */
+ if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
+ (rss->types & ETH_RSS_IP ||
+ (!hw->rss_info.ipv6_sctp_offload_supported &&
+ rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+ return false;
+
+ return true;
+}
+
/*
* This function is used to parse rss action validatation.
*/
@@ -1386,18 +1424,7 @@ hns3_parse_rss_filter(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
"RSS hash key must be exactly 40 bytes");
- /*
- * For Kunpeng920 and Kunpeng930 NIC hardware, it is not supported to
- * use dst port/src port fields to RSS hash for the following packet
- * types.
- * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
- * Besides, for Kunpeng920, The NIC hardware is not supported to use
- * src/dst port fields to RSS hash for IPV6 SCTP packet type.
- */
- if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
- (rss->types & ETH_RSS_IP ||
- (!hw->rss_info.ipv6_sctp_offload_supported &&
- rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+ if (!hns3_rss_input_tuple_supported(hw, rss))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->types,
--
2.7.4

View File

@ -0,0 +1,190 @@
From badcc5f38dcf223578f870574653fdd20e00c6f8 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Fri, 22 Jan 2021 18:18:43 +0800
Subject: [PATCH 023/189] net/hns3: reconstruct Rx interrupt map
This patch reconstruct the Rx interrupt map to reduce the cyclic
complexity and improve readability and maintainability.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 59 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_ethdev_vf.c | 55 ++++++++++++++++++------------------
2 files changed, 56 insertions(+), 58 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7c51e83..f3ce639 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4782,27 +4782,28 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
uint16_t q_id;
int ret;
- if (dev->data->dev_conf.intr_conf.rxq == 0)
+ /*
+ * hns3 needs a separate interrupt to be used as event interrupt which
+ * could not be shared with task queue pair, so KERNEL drivers need
+ * support multiple interrupt vectors.
+ */
+ if (dev->data->dev_conf.intr_conf.rxq == 0 ||
+ !rte_intr_cap_multiple(intr_handle))
return 0;
- /* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ intr_vector = hw->used_rx_queues;
+ /* creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
- /* check and configure queue intr-vector mapping */
- if (rte_intr_cap_multiple(intr_handle) ||
- !RTE_ETH_DEV_SRIOV(dev).active) {
- intr_vector = hw->used_rx_queues;
- /* creates event fd for each intr vector when MSIX is used */
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -EINVAL;
- }
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ if (intr_handle->intr_vec == NULL) {
intr_handle->intr_vec =
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %u rx_queues"
- " intr_vec", hw->used_rx_queues);
+ hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
+ hw->used_rx_queues);
ret = -ENOMEM;
goto alloc_intr_vec_error;
}
@@ -4812,28 +4813,26 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
vec = RTE_INTR_VEC_RXTX_OFFSET;
base = RTE_INTR_VEC_RXTX_OFFSET;
}
- if (rte_intr_dp_is_en(intr_handle)) {
- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3_bind_ring_with_vector(hw, vec, true,
- HNS3_RING_TYPE_RX,
- q_id);
- if (ret)
- goto bind_vector_error;
- intr_handle->intr_vec[q_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX, q_id);
+ if (ret)
+ goto bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ /*
+ * If there are not enough efds (e.g. not enough interrupt),
+ * remaining queues will be bond to the last interrupt.
+ */
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
}
rte_intr_enable(intr_handle);
return 0;
bind_vector_error:
- rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec) {
- free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
- return ret;
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
alloc_intr_vec_error:
rte_intr_efd_disable(intr_handle);
return ret;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 37135d7..3a1d4cb 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2085,21 +2085,22 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
uint16_t q_id;
int ret;
- if (dev->data->dev_conf.intr_conf.rxq == 0)
+ /*
+ * hns3 needs a separate interrupt to be used as event interrupt which
+ * could not be shared with task queue pair, so KERNEL drivers need
+ * support multiple interrupt vectors.
+ */
+ if (dev->data->dev_conf.intr_conf.rxq == 0 ||
+ !rte_intr_cap_multiple(intr_handle))
return 0;
- /* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ intr_vector = hw->used_rx_queues;
+ /* It creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
- /* check and configure queue intr-vector mapping */
- if (rte_intr_cap_multiple(intr_handle) ||
- !RTE_ETH_DEV_SRIOV(dev).active) {
- intr_vector = hw->used_rx_queues;
- /* It creates event fd for each intr vector when MSIX is used */
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -EINVAL;
- }
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ if (intr_handle->intr_vec == NULL) {
intr_handle->intr_vec =
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
@@ -2115,28 +2116,26 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
vec = RTE_INTR_VEC_RXTX_OFFSET;
base = RTE_INTR_VEC_RXTX_OFFSET;
}
- if (rte_intr_dp_is_en(intr_handle)) {
- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3vf_bind_ring_with_vector(hw, vec, true,
- HNS3_RING_TYPE_RX,
- q_id);
- if (ret)
- goto vf_bind_vector_error;
- intr_handle->intr_vec[q_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX, q_id);
+ if (ret)
+ goto vf_bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ /*
+ * If there are not enough efds (e.g. not enough interrupt),
+ * remaining queues will be bond to the last interrupt.
+ */
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
}
rte_intr_enable(intr_handle);
return 0;
vf_bind_vector_error:
- rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec) {
- free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
- return ret;
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
vf_alloc_intr_vec_error:
rte_intr_efd_disable(intr_handle);
return ret;
--
2.7.4

View File

@ -0,0 +1,197 @@
From ea7fb351ca32444916ea099c644d1f29295ffdeb Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:44 +0800
Subject: [PATCH 024/189] net/hns3: extract common checks for flow director
When parse flow director with all types, it needs to judge the spec
of item and mask of item for all packet types. The judgement is the
same for all types. Therefore, we move it into the concentrated
location.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 84 +++++++++++---------------------------------
1 file changed, 20 insertions(+), 64 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 889fa2f..9b161f4 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -433,17 +433,12 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
}
static int
-hns3_parse_eth(const struct rte_flow_item *item,
- struct hns3_fdir_rule *rule, struct rte_flow_error *error)
+hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+ struct rte_flow_error *error __rte_unused)
{
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
/* Only used to describe the protocol stack. */
if (item->spec == NULL && item->mask == NULL)
return 0;
@@ -483,11 +478,6 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
rule->key_conf.vlan_num++;
if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
return rte_flow_error_set(error, EINVAL,
@@ -543,14 +533,10 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
+
/* Only used to describe the protocol stack. */
if (item->spec == NULL && item->mask == NULL)
return 0;
@@ -606,11 +592,6 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_ipv6 *ipv6_spec;
const struct rte_flow_item_ipv6 *ipv6_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
@@ -674,11 +655,6 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
rule->key_conf.spec.ip_proto = IPPROTO_TCP;
rule->key_conf.mask.ip_proto = IPPROTO_MASK;
@@ -722,11 +698,6 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_udp *udp_spec;
const struct rte_flow_item_udp *udp_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
rule->key_conf.spec.ip_proto = IPPROTO_UDP;
rule->key_conf.mask.ip_proto = IPPROTO_MASK;
@@ -768,11 +739,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
-
hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
rule->key_conf.mask.ip_proto = IPPROTO_MASK;
@@ -904,15 +870,6 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_vxlan *vxlan_spec;
const struct rte_flow_item_vxlan *vxlan_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
- else if (item->spec && (item->mask == NULL))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Tunnel packets must configure with mask");
-
hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -955,15 +912,6 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_nvgre *nvgre_spec;
const struct rte_flow_item_nvgre *nvgre_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
- else if (item->spec && (item->mask == NULL))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Tunnel packets must configure with mask");
-
hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
rule->key_conf.spec.outer_proto = IPPROTO_GRE;
rule->key_conf.mask.outer_proto = IPPROTO_MASK;
@@ -1013,15 +961,6 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
const struct rte_flow_item_geneve *geneve_spec;
const struct rte_flow_item_geneve *geneve_mask;
- if (item->spec == NULL && item->mask)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Can't configure FDIR with mask but without spec");
- else if (item->spec && (item->mask == NULL))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Tunnel packets must configure with mask");
-
hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
@@ -1058,6 +997,17 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
{
int ret;
+ if (item->spec == NULL && item->mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't configure FDIR with mask "
+ "but without spec");
+ else if (item->spec && (item->mask == NULL))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Tunnel packets must configure "
+ "with mask");
+
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_VXLAN:
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
@@ -1086,6 +1036,12 @@ hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
{
int ret;
+ if (item->spec == NULL && item->mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't configure FDIR with mask "
+ "but without spec");
+
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
ret = hns3_parse_eth(item, rule, error);
--
2.7.4

View File

@ -0,0 +1,122 @@
From 2e88b488d2b8f3086b7d94179722066b9915a7b9 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:45 +0800
Subject: [PATCH 025/189] net/hns3: refactor reset event report function
Here encapsulate the process code of the imp reset report and
global reset report into function in order to reduce the
complexity of the hns3_check_event_cause function.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 69 +++++++++++++++++++++++++++---------------
1 file changed, 45 insertions(+), 24 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f3ce639..817d1dc 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -123,6 +123,47 @@ hns3_pf_enable_irq0(struct hns3_hw *hw)
}
static enum hns3_evt_cause
+hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
+ uint32_t *vec_val)
+{
+ struct hns3_hw *hw = &hns->hw;
+
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
+ if (!is_delay) {
+ hw->reset.stats.imp_cnt++;
+ hns3_warn(hw, "IMP reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "IMP reset detected, don't clear reset status");
+ }
+
+ return HNS3_VECTOR0_EVENT_RST;
+}
+
+static enum hns3_evt_cause
+hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
+ uint32_t *vec_val)
+{
+ struct hns3_hw *hw = &hns->hw;
+
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
+ *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
+ if (!is_delay) {
+ hw->reset.stats.global_cnt++;
+ hns3_warn(hw, "Global reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw,
+ "Global reset detected, don't clear reset status");
+ }
+
+ return HNS3_VECTOR0_EVENT_RST;
+}
+
+static enum hns3_evt_cause
hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
{
struct hns3_hw *hw = &hns->hw;
@@ -131,12 +172,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
uint32_t hw_err_src_reg;
uint32_t val;
enum hns3_evt_cause ret;
+ bool is_delay;
/* fetch the events from their corresponding regs */
vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
+ is_delay = clearval == NULL ? true : false;
/*
* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event and defer the
@@ -145,35 +188,13 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
* from H/W just for the mailbox.
*/
if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
- hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
- val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
- if (clearval) {
- hw->reset.stats.imp_cnt++;
- hns3_warn(hw, "IMP reset detected, clear reset status");
- } else {
- hns3_schedule_delayed_reset(hns);
- hns3_warn(hw, "IMP reset detected, don't clear reset status");
- }
-
- ret = HNS3_VECTOR0_EVENT_RST;
+ ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
goto out;
}
/* Global reset */
if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
- hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
- val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
- if (clearval) {
- hw->reset.stats.global_cnt++;
- hns3_warn(hw, "Global reset detected, clear reset status");
- } else {
- hns3_schedule_delayed_reset(hns);
- hns3_warn(hw, "Global reset detected, don't clear reset status");
- }
-
- ret = HNS3_VECTOR0_EVENT_RST;
+ ret = hns3_proc_global_reset_event(hns, is_delay, &val);
goto out;
}
--
2.7.4

View File

@ -0,0 +1,70 @@
From 64c98e007bf57084bab1be0256d5d8cabf8e5b29 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:46 +0800
Subject: [PATCH 026/189] net/hns3: fix memory leak on secondary process exit
The secondary process is applied a memory for the process_private
during initialization. Therefore, the memory needs to be released
when exiting.
Fixes: c203571b3602 ("net/hns3: register and add log interface")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 7 +++++--
drivers/net/hns3/hns3_ethdev_vf.c | 12 +++++++++---
2 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 817d1dc..2a5689c 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6263,8 +6263,11 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
+ return 0;
+ }
if (hw->adapter_state < HNS3_NIC_CLOSING)
hns3_dev_close(eth_dev);
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 3a1d4cb..948d914 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1971,8 +1971,11 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev)
struct hns3_hw *hw = &hns->hw;
int ret = 0;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
return 0;
+ }
if (hw->adapter_state == HNS3_NIC_STARTED)
ret = hns3vf_dev_stop(eth_dev);
@@ -2839,8 +2842,11 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
+ return 0;
+ }
if (hw->adapter_state < HNS3_NIC_CLOSING)
hns3vf_dev_close(eth_dev);
--
2.7.4

View File

@ -0,0 +1,244 @@
From 30c133006d7929fafad11a379fc18f3d23dc0178 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Fri, 22 Jan 2021 18:18:47 +0800
Subject: [PATCH 027/189] net/hns3: fix interrupt resources in Rx interrupt
mode
For Kunpeng930, the NIC engine support 1280 tqps being taken over by
a PF. In this case, a maximum of 1281 interrupt resources are also
supported in this PF. To support the maximum number of queues, several
patches are made. But the interrupt related modification are missing.
So, in RX interrupt mode, a large number of queues will be aggregated
into one interrupt due to insufficient interrupts. It will lead to
waste of interrupt resources and reduces usability.
To utilize all these interrupt resources, related IMP command has been
extended. And, the I/O address of the extended interrupt resources are
different from the existing ones. So, a function used for calculating
the address offset has been added.
Fixes: 76d794566d43 ("net/hns3: maximize queue number")
Fixes: 27911a6e62e5 ("net/hns3: add Rx interrupts compatibility")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 8 ++++++--
drivers/net/hns3/hns3_ethdev.c | 17 +++++++++--------
drivers/net/hns3/hns3_regs.c | 2 +-
drivers/net/hns3/hns3_regs.h | 24 +++++++++++++++---------
drivers/net/hns3/hns3_rxtx.c | 28 +++++++++++++++++++++++-----
drivers/net/hns3/hns3_rxtx.h | 1 +
6 files changed, 55 insertions(+), 25 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 6152f6e..dc97a1a 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -776,12 +776,16 @@ enum hns3_int_gl_idx {
#define HNS3_TQP_ID_M GENMASK(12, 2)
#define HNS3_INT_GL_IDX_S 13
#define HNS3_INT_GL_IDX_M GENMASK(14, 13)
+#define HNS3_TQP_INT_ID_L_S 0
+#define HNS3_TQP_INT_ID_L_M GENMASK(7, 0)
+#define HNS3_TQP_INT_ID_H_S 8
+#define HNS3_TQP_INT_ID_H_M GENMASK(15, 8)
struct hns3_ctrl_vector_chain_cmd {
- uint8_t int_vector_id;
+ uint8_t int_vector_id; /* the low order of the interrupt id */
uint8_t int_cause_num;
uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD];
uint8_t vfid;
- uint8_t rsv;
+ uint8_t int_vector_id_h; /* the high order of the interrupt id */
};
struct hns3_config_max_frm_size_cmd {
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 2a5689c..4356860 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2232,7 +2232,7 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
}
static int
-hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
enum hns3_ring_type queue_type, uint16_t queue_id)
{
struct hns3_cmd_desc desc;
@@ -2241,13 +2241,15 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
enum hns3_cmd_status status;
enum hns3_opcode_type op;
uint16_t tqp_type_and_id = 0;
- const char *op_str;
uint16_t type;
uint16_t gl;
- op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+ op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
hns3_cmd_setup_basic_desc(&desc, op, false);
- req->int_vector_id = vector_id;
+ req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
+ HNS3_TQP_INT_ID_L_S);
+ req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
+ HNS3_TQP_INT_ID_H_S);
if (queue_type == HNS3_RING_TYPE_RX)
gl = HNS3_RING_GL_RX;
@@ -2263,11 +2265,10 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
gl);
req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
req->int_cause_num = 1;
- op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.",
- op_str, queue_id, req->int_vector_id, status);
+ en ? "Map" : "Unmap", queue_id, vector_id, status);
return status;
}
@@ -4797,8 +4798,8 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
+ uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
+ uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
uint32_t intr_vector;
uint16_t q_id;
int ret;
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index f2cb465..8afe132 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -301,7 +301,7 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
for (j = 0; j < hw->intr_tqps_num; j++) {
- reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
+ reg_offset = hns3_get_tqp_intr_reg_offset(j);
for (i = 0; i < reg_num; i++)
*data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
reg_offset);
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 81a0af5..39fc5d1 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -95,15 +95,21 @@
#define HNS3_MIN_EXTEND_QUEUE_ID 1024
/* bar registers for tqp interrupt */
-#define HNS3_TQP_INTR_CTRL_REG 0x20000
-#define HNS3_TQP_INTR_GL0_REG 0x20100
-#define HNS3_TQP_INTR_GL1_REG 0x20200
-#define HNS3_TQP_INTR_GL2_REG 0x20300
-#define HNS3_TQP_INTR_RL_REG 0x20900
-#define HNS3_TQP_INTR_TX_QL_REG 0x20e00
-#define HNS3_TQP_INTR_RX_QL_REG 0x20f00
-
-#define HNS3_TQP_INTR_REG_SIZE 4
+#define HNS3_TQP_INTR_REG_BASE 0x20000
+#define HNS3_TQP_INTR_EXT_REG_BASE 0x30000
+#define HNS3_TQP_INTR_CTRL_REG 0
+#define HNS3_TQP_INTR_GL0_REG 0x100
+#define HNS3_TQP_INTR_GL1_REG 0x200
+#define HNS3_TQP_INTR_GL2_REG 0x300
+#define HNS3_TQP_INTR_RL_REG 0x900
+#define HNS3_TQP_INTR_TX_QL_REG 0xe00
+#define HNS3_TQP_INTR_RX_QL_REG 0xf00
+#define HNS3_TQP_INTR_RL_EN_B 6
+
+#define HNS3_MIN_EXT_TQP_INTR_ID 64
+#define HNS3_TQP_INTR_LOW_ORDER_OFFSET 0x4
+#define HNS3_TQP_INTR_HIGH_ORDER_OFFSET 0x1000
+
#define HNS3_TQP_INTR_GL_MAX 0x1FE0
#define HNS3_TQP_INTR_GL_DEFAULT 20
#define HNS3_TQP_INTR_GL_UNIT_1US BIT(31)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 30f1e06..1991b4e 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -834,6 +834,24 @@ hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
return ret;
}
+uint32_t
+hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queues > 64 */
+ if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
+ reg_offset = HNS3_TQP_INTR_REG_BASE +
+ tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+ else
+ reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
+ tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
+ tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+
+ return reg_offset;
+}
void
hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
@@ -847,7 +865,7 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
return;
- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
else
@@ -864,7 +882,7 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
if (rl_value > HNS3_TQP_INTR_RL_MAX)
return;
- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = HNS3_RL_USEC_TO_REG(rl_value);
if (value > 0)
value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
@@ -885,10 +903,10 @@ hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
return;
- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
}
@@ -897,7 +915,7 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
{
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 331b507..8f5ae5c 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -680,6 +680,7 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
+uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
uint8_t gl_idx, uint16_t gl_value);
void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
--
2.7.4

View File

@ -0,0 +1,137 @@
From 65ffb8730c023e703e90dfefde782d1b70e830bf Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:48 +0800
Subject: [PATCH 028/189] net/hns3: rename RSS functions
Rename some function about RSS implement functions
in order to make the functions naming style more
reasonable and consistency.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 2 +-
drivers/net/hns3/hns3_ethdev_vf.c | 2 +-
drivers/net/hns3/hns3_flow.c | 2 +-
drivers/net/hns3/hns3_rss.c | 12 ++++++------
drivers/net/hns3/hns3_rss.h | 4 ++--
5 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 4356860..94b6e44 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4685,7 +4685,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
goto err_fdir;
}
- hns3_set_default_rss_args(hw);
+ hns3_rss_set_default_args(hw);
ret = hns3_enable_hw_error_intr(hns, true);
if (ret) {
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 948d914..7eb0b11 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1832,7 +1832,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
if (ret)
goto err_set_tc_queue;
- hns3_set_default_rss_args(hw);
+ hns3_rss_set_default_args(hw);
return 0;
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 9b161f4..8a5179d 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1469,7 +1469,7 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
if (ret)
return ret;
- ret = hns3_set_rss_algo_key(hw, rss_config->key);
+ ret = hns3_rss_set_algo_key(hw, rss_config->key);
if (ret)
return ret;
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index b5df374..7d1a297 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -193,7 +193,7 @@ static const struct {
* Used to set algorithm, key_offset and hash key of rss.
*/
int
-hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key)
+hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key)
{
#define HNS3_KEY_OFFSET_MAX 3
#define HNS3_SET_HASH_KEY_BYTE_FOUR 2
@@ -245,7 +245,7 @@ hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key)
* Used to configure the tuple selection for RSS hash input.
*/
static int
-hns3_set_rss_input_tuple(struct hns3_hw *hw)
+hns3_rss_set_input_tuple(struct hns3_hw *hw)
{
struct hns3_rss_conf *rss_config = &hw->rss_info;
struct hns3_rss_input_tuple_cmd *req;
@@ -443,7 +443,7 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev,
ret = -EINVAL;
goto conf_err;
}
- ret = hns3_set_rss_algo_key(hw, key);
+ ret = hns3_rss_set_algo_key(hw, key);
if (ret)
goto conf_err;
}
@@ -649,7 +649,7 @@ hns3_rss_tuple_uninit(struct hns3_hw *hw)
* Set the default rss configuration in the init of driver.
*/
void
-hns3_set_default_rss_args(struct hns3_hw *hw)
+hns3_rss_set_default_args(struct hns3_hw *hw)
{
struct hns3_rss_conf *rss_cfg = &hw->rss_info;
uint16_t queue_num = hw->alloc_rss_size;
@@ -696,12 +696,12 @@ hns3_config_rss(struct hns3_adapter *hns)
hns3_rss_uninit(hns);
/* Configure RSS hash algorithm and hash key offset */
- ret = hns3_set_rss_algo_key(hw, hash_key);
+ ret = hns3_rss_set_algo_key(hw, hash_key);
if (ret)
return ret;
/* Configure the tuple selection for RSS hash input */
- ret = hns3_set_rss_input_tuple(hw);
+ ret = hns3_rss_set_input_tuple(hw);
if (ret)
return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 6d1d25f..05d5c26 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -102,7 +102,7 @@ int hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
int hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
-void hns3_set_default_rss_args(struct hns3_hw *hw);
+void hns3_rss_set_default_args(struct hns3_hw *hw);
int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir,
uint16_t size);
int hns3_rss_reset_indir_table(struct hns3_hw *hw);
@@ -111,7 +111,7 @@ void hns3_rss_uninit(struct hns3_adapter *hns);
int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
struct hns3_rss_tuple_cfg *tuple,
uint64_t rss_hf);
-int hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key);
+int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key);
int hns3_restore_rss_filter(struct rte_eth_dev *dev);
#endif /* _HNS3_RSS_H_ */
--
2.7.4

View File

@ -0,0 +1,98 @@
From 2cfeae90323331be6a395bae314e170bfb3ff3b2 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:49 +0800
Subject: [PATCH 029/189] net/hns3: adjust some comments
Fix some error comments and remove some meaningless comments.
Fixes: f8e7fcbfd0b8 ("net/hns3: support flow action of queue region")
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: c37ca66f2b27 ("net/hns3: support RSS")
Fixes: ec674cb742e5 ("net/hns3: fix flushing RSS rule")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 8a5179d..f2bff1e 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -91,9 +91,9 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
/*
* This function is used to find rss general action.
* 1. As we know RSS is used to spread packets among several queues, the flow
- * API provide the struct rte_flow_action_rss, user could config it's field
+ * API provide the struct rte_flow_action_rss, user could config its field
* sush as: func/level/types/key/queue to control RSS function.
- * 2. The flow API also support queue region configuration for hns3. It was
+ * 2. The flow API also supports queue region configuration for hns3. It was
* implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
* which action is RSS queues region.
* 3. When action is RSS, we use the following rule to distinguish:
@@ -128,11 +128,11 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[],
rss = act->conf;
if (have_eth && rss->conf.queue_num) {
/*
- * Patter have ETH and action's queue_num > 0, indicate this is
+ * Pattern have ETH and action's queue_num > 0, indicate this is
* queue region configuration.
* Because queue region is implemented by FDIR + RSS in hns3
- * hardware, it need enter FDIR process, so here return NULL to
- * avoid enter RSS process.
+ * hardware, it needs to enter FDIR process, so here return NULL
+ * to avoid enter RSS process.
*/
return NULL;
}
@@ -405,7 +405,6 @@ hns3_handle_actions(struct rte_eth_dev *dev,
return 0;
}
-/* Parse to get the attr and action info of flow director rule. */
static int
hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
{
@@ -782,7 +781,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
}
/*
- * Check items before tunnel, save inner configs to outer configs,and clear
+ * Check items before tunnel, save inner configs to outer configs, and clear
* inner configs.
* The key consists of two parts: meta_data and tuple keys.
* Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
@@ -1473,10 +1472,8 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
if (ret)
return ret;
- /* Update algorithm of hw */
hw->rss_info.conf.func = rss_config->func;
- /* Set flow type supported */
tuple = &hw->rss_info.rss_tuple_sets;
ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
if (ret)
@@ -1561,7 +1558,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev,
if (rss_flow_conf.queue_num) {
/*
* Due the content of queue pointer have been reset to
- * 0, the rss_info->conf.queue should be set NULL
+ * 0, the rss_info->conf.queue should be set to NULL
*/
rss_info->conf.queue = NULL;
rss_info->conf.queue_num = 0;
@@ -1727,7 +1724,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
/*
* Create or destroy a flow rule.
* Theorically one rule can match more than one filters.
- * We will let it use the filter which it hitt first.
+ * We will let it use the filter which it hit first.
* So, the sequence matters.
*/
static struct rte_flow *
--
2.7.4

View File

@ -0,0 +1,45 @@
From d7fa7d59733c34f7d8083a6567ca8a0e3efb1fb6 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:50 +0800
Subject: [PATCH 030/189] net/hns3: remove unnecessary parentheses
Remove unnecessary parentheses as well as keep a reasonable
blank line.
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index f2bff1e..e9d0a0b 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -700,6 +700,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
rule->key_conf.spec.ip_proto = IPPROTO_UDP;
rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+
/* Only used to describe the protocol stack. */
if (item->spec == NULL && item->mask == NULL)
return 0;
@@ -1264,7 +1265,7 @@ hns3_action_rss_same(const struct rte_flow_action_rss *comp,
if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
func_is_same = false;
else
- func_is_same = (with->func ? (comp->func == with->func) : true);
+ func_is_same = with->func ? (comp->func == with->func) : true;
return (func_is_same &&
comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
@@ -1861,6 +1862,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE,
flow, "Flow is NULL");
+
filter_type = flow->filter_type;
switch (filter_type) {
case RTE_ETH_FILTER_FDIR:
--
2.7.4

View File

@ -0,0 +1,31 @@
From dc8b97b9ef327bcb295bc7a2d2e4ffaaca6ba1f0 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Fri, 22 Jan 2021 18:18:51 +0800
Subject: [PATCH 031/189] net/hns3: adjust format specifier for enum
Here uses %d as printing output for enumeration member.
Fixes: c37ca66f2b27 ("net/hns3: support RSS")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index e9d0a0b..3e387ac 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1447,7 +1447,7 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
break;
default:
- hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
+ hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
algo_func);
return -EINVAL;
}
--
2.7.4

View File

@ -0,0 +1,245 @@
From 0112993819b0a5ae9bf35f8a3adf45c6134d69bd Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Fri, 22 Jan 2021 18:18:52 +0800
Subject: [PATCH 032/189] net/hns3: support LSC event report
This patch support LSC (Link Status Change) event report.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 52 +++++++++++++++++++++++++++++++++++----
drivers/net/hns3/hns3_ethdev.h | 4 ++-
drivers/net/hns3/hns3_ethdev_vf.c | 40 +++++++++++++++++++++++++++++-
drivers/net/hns3/hns3_mbx.c | 14 ++++++-----
4 files changed, 97 insertions(+), 13 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 94b6e44..bc77608 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -19,6 +19,7 @@
#define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
#define HNS3_SERVICE_INTERVAL 1000000 /* us */
+#define HNS3_SERVICE_QUICK_INTERVAL 10
#define HNS3_INVALID_PVID 0xFFFF
#define HNS3_FILTER_TYPE_VF 0
@@ -93,6 +94,7 @@ static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev);
+static bool hns3_update_link_status(struct hns3_hw *hw);
static int hns3_add_mc_addr(struct hns3_hw *hw,
struct rte_ether_addr *mac_addr);
@@ -4458,7 +4460,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
return !!link_status;
}
-void
+static bool
hns3_update_link_status(struct hns3_hw *hw)
{
int state;
@@ -4467,7 +4469,36 @@ hns3_update_link_status(struct hns3_hw *hw)
if (state != hw->mac.link_status) {
hw->mac.link_status = state;
hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
+ return true;
}
+
+ return false;
+}
+
+/*
+ * Current, the PF driver get link status by two ways:
+ * 1) Periodic polling in the intr thread context, driver call
+ * hns3_update_link_status to update link status.
+ * 2) Firmware report async interrupt, driver process the event in the intr
+ * thread context, and call hns3_update_link_status to update link status.
+ *
+ * If detect link status changed, driver need report LSE. One method is add the
+ * report LSE logic in hns3_update_link_status.
+ *
+ * But the PF driver ops(link_update) also call hns3_update_link_status to
+ * update link status.
+ * If we report LSE in hns3_update_link_status, it may lead to deadlock in the
+ * bonding application.
+ *
+ * So add the one new API which used only in intr thread context.
+ */
+void
+hns3_update_link_status_and_event(struct hns3_hw *hw)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ bool changed = hns3_update_link_status(hw);
+ if (changed)
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static void
@@ -4479,9 +4510,10 @@ hns3_service_handler(void *param)
if (!hns3_is_reset_pending(hns)) {
hns3_update_speed_duplex(eth_dev);
- hns3_update_link_status(hw);
- } else
+ hns3_update_link_status_and_event(hw);
+ } else {
hns3_warn(hw, "Cancel the query when reset is pending");
+ }
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
}
@@ -5557,8 +5589,10 @@ hns3_stop_service(struct hns3_adapter *hns)
struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- if (hw->adapter_state == HNS3_NIC_STARTED)
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+ hns3_update_link_status_and_event(hw);
+ }
hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
@@ -5601,7 +5635,15 @@ hns3_start_service(struct hns3_adapter *hns)
hns3_set_rxtx_function(eth_dev);
hns3_mp_req_start_rxtx(eth_dev);
if (hw->adapter_state == HNS3_NIC_STARTED) {
- hns3_service_handler(eth_dev);
+ /*
+ * This API parent function already hold the hns3_hw.lock, the
+ * hns3_service_handler may report lse, in bonding application
+ * it will call driver's ops which may acquire the hns3_hw.lock
+ * again, thus lead to deadlock.
+ * We defer calls hns3_service_handler to avoid the deadlock.
+ */
+ rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
+ hns3_service_handler, eth_dev);
/* Enable interrupt of all rx queues before enabling queues */
hns3_dev_all_rx_queue_intr_enable(hw, true);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0d17170..547e991 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -946,11 +946,13 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
bool hns3_is_reset_pending(struct hns3_adapter *hns);
bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
-void hns3_update_link_status(struct hns3_hw *hw);
+void hns3_update_link_status_and_event(struct hns3_hw *hw);
void hns3_ether_format_addr(char *buf, uint16_t size,
const struct rte_ether_addr *ether_addr);
int hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *info);
+void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
+ uint32_t link_speed, uint8_t link_duplex);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 7eb0b11..3a682e5 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1440,6 +1440,41 @@ hns3vf_request_link_info(struct hns3_hw *hw)
hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
}
+void
+hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
+ uint32_t link_speed, uint8_t link_duplex)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ struct hns3_mac *mac = &hw->mac;
+ bool report_lse;
+ bool changed;
+
+ changed = mac->link_status != link_status ||
+ mac->link_speed != link_speed ||
+ mac->link_duplex != link_duplex;
+ if (!changed)
+ return;
+
+ /*
+ * VF's link status/speed/duplex were updated by polling from PF driver,
+ * because the link status/speed/duplex may be changed in the polling
+ * interval, so driver will report lse (lsc event) once any of the above
+ * thress variables changed.
+ * But if the PF's link status is down and driver saved link status is
+ * also down, there are no need to report lse.
+ */
+ report_lse = true;
+ if (link_status == ETH_LINK_DOWN && link_status == mac->link_status)
+ report_lse = false;
+
+ mac->link_status = link_status;
+ mac->link_speed = link_speed;
+ mac->link_duplex = link_duplex;
+
+ if (report_lse)
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
static int
hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
{
@@ -2373,8 +2408,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- if (hw->adapter_state == HNS3_NIC_STARTED)
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+ hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ }
hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index d2a5db8..3e44e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -203,8 +203,9 @@ hns3_cmd_crq_empty(struct hns3_hw *hw)
static void
hns3_mbx_handler(struct hns3_hw *hw)
{
- struct hns3_mac *mac = &hw->mac;
enum hns3_reset_level reset_level;
+ uint8_t link_status, link_duplex;
+ uint32_t link_speed;
uint16_t *msg_q;
uint8_t opcode;
uint32_t tail;
@@ -218,10 +219,11 @@ hns3_mbx_handler(struct hns3_hw *hw)
opcode = msg_q[0] & 0xff;
switch (opcode) {
case HNS3_MBX_LINK_STAT_CHANGE:
- memcpy(&mac->link_speed, &msg_q[2],
- sizeof(mac->link_speed));
- mac->link_status = rte_le_to_cpu_16(msg_q[1]);
- mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
+ memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
+ link_status = rte_le_to_cpu_16(msg_q[1]);
+ link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
+ hns3vf_update_link_status(hw, link_status, link_speed,
+ link_duplex);
break;
case HNS3_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending
@@ -310,7 +312,7 @@ hns3_handle_link_change_event(struct hns3_hw *hw,
if (!req->msg[LINK_STATUS_OFFSET])
hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
- hns3_update_link_status(hw);
+ hns3_update_link_status_and_event(hw);
}
static void
--
2.7.4

View File

@ -0,0 +1,96 @@
From fd6de494db0d040ca42a6f57f202515f537a62b3 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 3 Feb 2021 20:23:47 +0800
Subject: [PATCH 033/189] net/hns3: fix query order of link status and link
info
When link information is updated in the firmware, the link information
is updated first and then the link status is updated. In a 1s periodic
task, PF driver queries the link information and then obtains link
status.
It may lead to a 1s time difference for obtaining valid link information
when the port is up. Therefore, the query order of driver should be
reversed to the order of firmware.
Fixes: 109e4dd1bd7a ("net/hns3: get link state change through mailbox")
Fixes: 59fad0f32135 ("net/hns3: support link update operation")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 27 ++++++++++++++++++++-------
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index bc77608..b624fce 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -93,7 +93,7 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
-static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev);
+static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
static bool hns3_update_link_status(struct hns3_hw *hw);
static int hns3_add_mc_addr(struct hns3_hw *hw,
@@ -2642,8 +2642,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev,
struct rte_eth_link new_link;
if (!hns3_is_reset_pending(hns)) {
- hns3_update_speed_duplex(eth_dev);
hns3_update_link_status(hw);
+ hns3_update_link_info(eth_dev);
}
memset(&new_link, 0, sizeof(new_link));
@@ -4368,11 +4368,9 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
}
static int
-hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
+hns3_update_fiber_link_info(struct hns3_hw *hw)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- struct hns3_pf *pf = &hns->pf;
+ struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
uint32_t speed;
int ret;
@@ -4395,6 +4393,21 @@ hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
}
static int
+hns3_update_link_info(struct rte_eth_dev *eth_dev)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
+ if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
+ return 0;
+ else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
+ ret = hns3_update_fiber_link_info(hw);
+
+ return ret;
+}
+
+static int
hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
{
struct hns3_config_mac_mode_cmd *req;
@@ -4509,8 +4522,8 @@ hns3_service_handler(void *param)
struct hns3_hw *hw = &hns->hw;
if (!hns3_is_reset_pending(hns)) {
- hns3_update_speed_duplex(eth_dev);
hns3_update_link_status_and_event(hw);
+ hns3_update_link_info(eth_dev);
} else {
hns3_warn(hw, "Cancel the query when reset is pending");
}
--
2.7.4

View File

@ -0,0 +1,115 @@
From 721dce02ae8a5ca9da07849d9759310f1e3cafda Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 3 Feb 2021 20:23:48 +0800
Subject: [PATCH 034/189] net/hns3: fix link status change from firmware
When the hardware link status changes, the firmware proactively
reports the link status change message, and then driver update
link status. This feature is lack of a switch to control in PF
driver. Otherwise, this feature does not take effect when the
kernel PF driver that supports the feature is not loaded.
Fixes: 109e4dd1bd7a ("net/hns3: get link state change through mailbox")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 10 ++++++++++
drivers/net/hns3/hns3_ethdev.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 41 insertions(+)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index dc97a1a..ad5e188 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -206,6 +206,9 @@ enum hns3_opcode_type {
/* Clear hardware state command */
HNS3_OPC_CLEAR_HW_STATE = 0x700B,
+ /* Firmware stats command */
+ HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A,
+
/* SFP command */
HNS3_OPC_SFP_GET_SPEED = 0x7104,
@@ -633,6 +636,13 @@ enum hns3_promisc_type {
HNS3_BROADCAST = 3,
};
+#define HNS3_LINK_EVENT_REPORT_EN_B 0
+#define HNS3_NCSI_ERROR_REPORT_EN_B 1
+struct hns3_firmware_compat_cmd {
+ uint32_t compat;
+ uint8_t rsv[20];
+};
+
#define HNS3_MAC_TX_EN_B 6
#define HNS3_MAC_RX_EN_B 7
#define HNS3_MAC_PAD_TX_B 11
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index b624fce..30f09a7 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3919,6 +3919,26 @@ hns3_buffer_alloc(struct hns3_hw *hw)
}
static int
+hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
+{
+ struct hns3_firmware_compat_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint32_t compat = 0;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
+ req = (struct hns3_firmware_compat_cmd *)desc.data;
+
+ if (is_init) {
+ hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
+ hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
+ }
+
+ req->compat = rte_cpu_to_le_32(compat);
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
hns3_mac_init(struct hns3_hw *hw)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
@@ -4610,6 +4630,15 @@ hns3_init_hardware(struct hns3_adapter *hns)
goto err_mac_init;
}
+ /*
+ * Requiring firmware to enable some features, driver can
+ * still work without it.
+ */
+ ret = hns3_firmware_compat_config(hw, true);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "firmware compatible features not "
+ "supported, ret = %d.", ret);
+
return 0;
err_mac_init:
@@ -4746,6 +4775,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
err_enable_intr:
hns3_fdir_filter_uninit(hns);
err_fdir:
+ (void)hns3_firmware_compat_config(hw, false);
hns3_uninit_umv_space(hw);
err_init_hw:
hns3_tqp_stats_uninit(hw);
@@ -4780,6 +4810,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
(void)hns3_config_gro(hw, false);
hns3_promisc_uninit(hw);
hns3_fdir_filter_uninit(hns);
+ (void)hns3_firmware_compat_config(hw, false);
hns3_uninit_umv_space(hw);
hns3_tqp_stats_uninit(hw);
hns3_pf_disable_irq0(hw);
--
2.7.4

View File

@ -0,0 +1,334 @@
From 00cee658ee4db31787baecbaff321d14734f6494 Mon Sep 17 00:00:00 2001
From: Lijun Ou <oulijun@huawei.com>
Date: Wed, 3 Feb 2021 20:23:49 +0800
Subject: [PATCH 035/189] net/hns3: fix RSS indirection table size
The driver should not use the fixed value as the validity check of
RSS indirection table size with HW supported. As a result, it will
cause misjudgment when the RSS RETA size with HW supported have
changed.
Fixes: c37ca66f2b27 ("net/hns3: support RSS")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 11 +++++++++++
drivers/net/hns3/hns3_cmd.h | 7 ++++++-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 18 ++++++++++++++++--
drivers/net/hns3/hns3_ethdev_vf.c | 18 ++++++++++++++++--
drivers/net/hns3/hns3_flow.c | 6 +++---
drivers/net/hns3/hns3_rss.c | 28 ++++++++++++++--------------
drivers/net/hns3/hns3_rss.h | 5 ++---
8 files changed, 69 insertions(+), 26 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 4c301cb..a6ea072 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -430,6 +430,16 @@ static void hns3_parse_capability(struct hns3_hw *hw,
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
}
+static uint32_t
+hns3_build_api_caps(void)
+{
+ uint32_t api_caps = 0;
+
+ hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return rte_cpu_to_le_32(api_caps);
+}
+
static enum hns3_cmd_status
hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
{
@@ -439,6 +449,7 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
resp = (struct hns3_query_version_cmd *)desc.data;
+ resp->api_caps = hns3_build_api_caps();
/* Initialize the cmd function */
ret = hns3_cmd_send(hw, &desc, 1);
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index ad5e188..5640fe4 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -295,11 +295,16 @@ enum HNS3_CAPS_BITS {
HNS3_CAPS_HW_PAD_B,
HNS3_CAPS_STASH_B,
};
+
+enum HNS3_API_CAP_BITS {
+ HNS3_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HNS3_QUERY_CAP_LENGTH 3
struct hns3_query_version_cmd {
uint32_t firmware;
uint32_t hardware;
- uint32_t rsv;
+ uint32_t api_caps;
uint32_t caps[HNS3_QUERY_CAP_LENGTH]; /* capabilities of device */
};
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 5aa374c..7fc6ac9 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -644,7 +644,7 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
* stage of the reset process.
*/
if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
- for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
}
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 30f09a7..df7220b 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2593,7 +2593,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
info->vmdq_queue_num = 0;
- info->reta_size = HNS3_RSS_IND_TBL_SIZE;
+ info->reta_size = hw->rss_ind_tbl_size;
info->hash_key_size = HNS3_RSS_KEY_SIZE;
info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
@@ -2984,6 +2984,20 @@ hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
}
static int
+hns3_check_dev_specifications(struct hns3_hw *hw)
+{
+ if (hw->rss_ind_tbl_size == 0 ||
+ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
+ hns3_err(hw, "the size of hash lookup table configured (%u)"
+ " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
+ HNS3_RSS_IND_TBL_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
hns3_query_dev_specifications(struct hns3_hw *hw)
{
struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
@@ -3003,7 +3017,7 @@ hns3_query_dev_specifications(struct hns3_hw *hw)
hns3_parse_dev_specifications(hw, desc);
- return 0;
+ return hns3_check_dev_specifications(hw);
}
static int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 3a682e5..1b1989e 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1016,7 +1016,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
info->vmdq_queue_num = 0;
- info->reta_size = HNS3_RSS_IND_TBL_SIZE;
+ info->reta_size = hw->rss_ind_tbl_size;
info->hash_key_size = HNS3_RSS_KEY_SIZE;
info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
@@ -1149,6 +1149,20 @@ hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
}
static int
+hns3vf_check_dev_specifications(struct hns3_hw *hw)
+{
+ if (hw->rss_ind_tbl_size == 0 ||
+ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
+ hns3_warn(hw, "the size of hash lookup table configured (%u)"
+ " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
+ HNS3_RSS_IND_TBL_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
hns3vf_query_dev_specifications(struct hns3_hw *hw)
{
struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
@@ -1168,7 +1182,7 @@ hns3vf_query_dev_specifications(struct hns3_hw *hw)
hns3vf_parse_dev_specifications(hw, desc);
- return 0;
+ return hns3vf_check_dev_specifications(hw);
}
static int
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 3e387ac..a601124 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1489,14 +1489,14 @@ hns3_update_indir_table(struct rte_eth_dev *dev,
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
+ uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
uint16_t j;
uint32_t i;
/* Fill in redirection table */
memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
sizeof(hw->rss_info.rss_indirection_tbl));
- for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
+ for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
j %= num;
if (conf->queue[j] >= hw->alloc_rss_size) {
hns3_err(hw, "queue id(%u) set to redirection table "
@@ -1507,7 +1507,7 @@ hns3_update_indir_table(struct rte_eth_dev *dev,
indir_tbl[i] = conf->queue[j];
}
- return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
+ return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
}
static int
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 7d1a297..858e31a 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -312,7 +312,7 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size)
/* Update redirection table of hw */
memcpy(hw->rss_info.rss_indirection_tbl, indir,
- sizeof(hw->rss_info.rss_indirection_tbl));
+ sizeof(uint16_t) * size);
return 0;
}
@@ -324,13 +324,13 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw)
int ret;
lut = rte_zmalloc("hns3_rss_lut",
- HNS3_RSS_IND_TBL_SIZE * sizeof(uint16_t), 0);
+ hw->rss_ind_tbl_size * sizeof(uint16_t), 0);
if (lut == NULL) {
hns3_err(hw, "No hns3_rss_lut memory can be allocated");
return -ENOMEM;
}
- ret = hns3_set_rss_indir_table(hw, lut, HNS3_RSS_IND_TBL_SIZE);
+ ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size);
if (ret)
hns3_err(hw, "RSS uninit indir table failed: %d", ret);
rte_free(lut);
@@ -428,7 +428,7 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev,
} else if (rss_hf && rss_cfg->conf.types == 0) {
/* Enable RSS, restore indirection table by hw's config */
ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
- HNS3_RSS_IND_TBL_SIZE);
+ hw->rss_ind_tbl_size);
if (ret)
goto conf_err;
}
@@ -505,15 +505,15 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
struct hns3_rss_conf *rss_cfg = &hw->rss_info;
- uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */
- uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE];
+ uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
uint16_t idx, shift;
+ uint16_t i;
int ret;
- if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (reta_size != hw->rss_ind_tbl_size) {
hns3_err(hw, "The size of hash lookup table configured (%u)"
"doesn't match the number hardware can supported"
- "(%u)", reta_size, indir_size);
+ "(%u)", reta_size, hw->rss_ind_tbl_size);
return -EINVAL;
}
rte_spinlock_lock(&hw->lock);
@@ -536,7 +536,7 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
}
ret = hns3_set_rss_indir_table(hw, indirection_tbl,
- HNS3_RSS_IND_TBL_SIZE);
+ hw->rss_ind_tbl_size);
rte_spinlock_unlock(&hw->lock);
return ret;
@@ -561,13 +561,13 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
struct hns3_rss_conf *rss_cfg = &hw->rss_info;
- uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */
uint16_t idx, shift;
+ uint16_t i;
- if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (reta_size != hw->rss_ind_tbl_size) {
hns3_err(hw, "The size of hash lookup table configured (%u)"
" doesn't match the number hardware can supported"
- "(%u)", reta_size, indir_size);
+ "(%u)", reta_size, hw->rss_ind_tbl_size);
return -EINVAL;
}
rte_spinlock_lock(&hw->lock);
@@ -662,7 +662,7 @@ hns3_rss_set_default_args(struct hns3_hw *hw)
memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE);
/* Initialize RSS indirection table */
- for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % queue_num;
}
@@ -711,7 +711,7 @@ hns3_config_rss(struct hns3_adapter *hns)
*/
if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
- HNS3_RSS_IND_TBL_SIZE);
+ hw->rss_ind_tbl_size);
if (ret)
goto rss_tuple_uninit;
}
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 05d5c26..94668ed 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -24,9 +24,8 @@
ETH_RSS_L4_DST_ONLY)
#define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */
+#define HNS3_RSS_IND_TBL_SIZE_MAX 2048
#define HNS3_RSS_KEY_SIZE 40
-#define HNS3_RSS_CFG_TBL_NUM \
- (HNS3_RSS_IND_TBL_SIZE / HNS3_RSS_CFG_TBL_SIZE)
#define HNS3_RSS_SET_BITMAP_MSK 0xffff
#define HNS3_RSS_HASH_ALGO_TOEPLITZ 0
@@ -45,7 +44,7 @@ struct hns3_rss_conf {
uint8_t hash_algo; /* hash function type definited by hardware */
uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */
struct hns3_rss_tuple_cfg rss_tuple_sets;
- uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; /* Shadow table */
+ uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */
bool valid; /* check if RSS rule is valid */
/*
--
2.7.4

View File

@ -0,0 +1,37 @@
From aefb4f06db3a837b7e93f8088c1d4882aa9a5041 Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Wed, 3 Feb 2021 20:23:50 +0800
Subject: [PATCH 036/189] net/hns3: constrain TM peak rate
User could config Port or TC's peak rate by TM ops, but hardware does
not support peak rate which lower than 1Mbps. So we constraint TM
peak rate must be at least 1Mbps.
Fixes: c09c7847d892 ("net/hns3: support traffic management")
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_tm.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d1639d4..bcae57a 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -200,6 +200,12 @@ hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+ error->message = "peak rate must be at least 1Mbps";
+ return -EINVAL;
+ }
+
if (profile->peak.size) {
error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
error->message = "peak bucket size not supported";
--
2.7.4

View File

@ -0,0 +1,54 @@
From 93de9a96a68093772f0137a9899616cfd8cea0c6 Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Wed, 3 Feb 2021 20:23:51 +0800
Subject: [PATCH 037/189] net/hns3: remove MPLS from supported flow items
The Kunpeng920 and Kunpeng930 don't support parse MPLS packet, so
remove the type from supported flow items.
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index a601124..c484114 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -44,8 +44,7 @@ static enum rte_flow_item_type first_items[] = {
RTE_FLOW_ITEM_TYPE_NVGRE,
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_GENEVE,
- RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
- RTE_FLOW_ITEM_TYPE_MPLS
+ RTE_FLOW_ITEM_TYPE_VXLAN_GPE
};
static enum rte_flow_item_type L2_next_items[] = {
@@ -65,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = {
static enum rte_flow_item_type L4_next_items[] = {
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_GENEVE,
- RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
- RTE_FLOW_ITEM_TYPE_MPLS
+ RTE_FLOW_ITEM_TYPE_VXLAN_GPE
};
static enum rte_flow_item_type tunnel_next_items[] = {
@@ -1118,8 +1116,7 @@ is_tunnel_packet(enum rte_flow_item_type type)
if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
type == RTE_FLOW_ITEM_TYPE_VXLAN ||
type == RTE_FLOW_ITEM_TYPE_NVGRE ||
- type == RTE_FLOW_ITEM_TYPE_GENEVE ||
- type == RTE_FLOW_ITEM_TYPE_MPLS)
+ type == RTE_FLOW_ITEM_TYPE_GENEVE)
return true;
return false;
}
--
2.7.4

View File

@ -0,0 +1,67 @@
From a2524d07bf2f71c925d363fbb7fcfc6d7def57c4 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Wed, 3 Feb 2021 20:23:52 +0800
Subject: [PATCH 038/189] net/hns3: fix stats flip overflow
Currently, statistics may overflow in some scenarios.
For example, if HW statistics are reset by stats reset operation,
but there are still a lot of residual packets exist in the HW
queues and these packets are error packets, flip may occurred
because the ipacket is obtained by subtracting the number of
software error packets from the number of HW received packets.
This patch verifies the calculation and returns 0 when overflow
may occur.
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 3ba09e2..e0e40ca 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -554,8 +554,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
}
rte_stats->oerrors = 0;
- rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd -
- rte_stats->ierrors;
+ /*
+ * If HW statistics are reset by stats_reset, but a lot of residual
+ * packets exist in the hardware queue and these packets are error
+ * packets, flip overflow may occurred. So return 0 in this case.
+ */
+ rte_stats->ipackets =
+ stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
+ stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
rte_stats->oerrors;
rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
@@ -792,8 +798,15 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
rxq_stats = &rxq->basic_stats;
rxq_stats->errors = rxq->err_stats.l2_errors +
rxq->err_stats.pkt_len_errors;
- rxq_stats->packets = stats->rcb_rx_ring_pktnum[i] -
- rxq_stats->errors;
+ /*
+ * If HW statistics are reset by stats_reset, but a lot of
+ * residual packets exist in the hardware queue and these
+ * packets are error packets, flip overflow may occurred.
+ * So return 0 in this case.
+ */
+ rxq_stats->packets =
+ stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
+ stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
rxq_stats->bytes = 0;
for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
val = (char *)rxq_stats +
--
2.7.4

View File

@ -0,0 +1,338 @@
From 458bb9377c72010ed41f4d2faedad2bd08562cd1 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Wed, 3 Feb 2021 20:23:53 +0800
Subject: [PATCH 039/189] net/hns3: use C11 atomics
Replace all the atomic type with C11 atomic builtins in hns3
PMD.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 13 +++++++------
drivers/net/hns3/hns3_ethdev.c | 21 ++++++++++++---------
drivers/net/hns3/hns3_ethdev.h | 4 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 19 +++++++++++--------
drivers/net/hns3/hns3_intr.c | 22 ++++++++++++++--------
drivers/net/hns3/hns3_mbx.c | 4 ++--
6 files changed, 48 insertions(+), 35 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index a6ea072..9393978 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -202,7 +202,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1,
+ __ATOMIC_RELAXED);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -311,7 +312,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -358,7 +359,7 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
int retval;
uint32_t ntc;
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -535,7 +536,7 @@ hns3_cmd_init(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- rte_atomic16_clear(&hw->reset.disable_cmd);
+ __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -557,7 +558,7 @@ hns3_cmd_init(struct hns3_hw *hw)
return 0;
err_cmd_init:
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
return ret;
}
@@ -583,7 +584,7 @@ hns3_cmd_uninit(struct hns3_hw *hw)
{
rte_spinlock_lock(&hw->cmq.csq.lock);
rte_spinlock_lock(&hw->cmq.crq.lock);
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_cmd_clear_regs(hw);
rte_spinlock_unlock(&hw->cmq.crq.lock);
rte_spinlock_unlock(&hw->cmq.csq.lock);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index df7220b..f54b7c2 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -130,7 +130,7 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
{
struct hns3_hw *hw = &hns->hw;
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
if (!is_delay) {
@@ -150,7 +150,7 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
{
struct hns3_hw *hw = &hns->hw;
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
if (!is_delay) {
@@ -5070,7 +5070,7 @@ hns3_do_stop(struct hns3_adapter *hns)
return ret;
hw->mac.link_status = ETH_LINK_DOWN;
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5613,7 +5613,7 @@ hns3_prepare_reset(struct hns3_adapter *hns)
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5673,7 +5673,7 @@ hns3_stop_service(struct hns3_adapter *hns)
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5795,8 +5795,10 @@ hns3_reset_service(void *param)
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED) {
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5805,7 +5807,7 @@ hns3_reset_service(void *param)
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6325,7 +6327,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
} else {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 547e991..cf42ef1 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -352,11 +352,11 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- rte_atomic16_t schedule;
+ uint16_t schedule;
/* Reset flag, covering the entire reset process */
uint16_t resetting;
/* Used to disable sending cmds during reset */
- rte_atomic16_t disable_cmd;
+ uint16_t disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 1b1989e..42cee37 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1059,7 +1059,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -1934,7 +1934,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
hw->mac.link_status = ETH_LINK_DOWN;
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3vf_configure_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -2410,7 +2410,7 @@ hns3vf_prepare_reset(struct hns3_adapter *hns)
ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
0, true, NULL, 0);
}
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
return ret;
}
@@ -2449,7 +2449,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
hns3vf_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2621,8 +2621,10 @@ hns3vf_reset_service(void *param)
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED) {
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2631,7 +2633,7 @@ hns3vf_reset_service(void *param)
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2854,7 +2856,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
} else {
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 51f19b4..88ce4c6 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1762,7 +1762,7 @@ hns3_reset_init(struct hns3_hw *hw)
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- rte_atomic16_init(&hw->reset.disable_cmd);
+ __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -1779,7 +1779,8 @@ hns3_schedule_reset(struct hns3_adapter *hns)
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
+ __ATOMIC_RELAXED);
return;
}
@@ -1787,11 +1788,14 @@ hns3_schedule_reset(struct hns3_adapter *hns)
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_REQUESTED)
return;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -1808,9 +1812,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns)
return;
}
- if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ SCHEDULE_NONE)
return;
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ __ATOMIC_RELAXED);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -1983,7 +1989,7 @@ hns3_reset_err_handle(struct hns3_adapter *hns)
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 3e44e3b..e745843 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -83,7 +83,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
end = now + HNS3_MAX_RETRY_MS;
while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
(now < end)) {
- if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
hns3_err(hw, "Don't wait for mbx respone because of "
"disable_cmd");
return -EBUSY;
@@ -369,7 +369,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
int i;
while (!hns3_cmd_crq_empty(hw)) {
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
return;
desc = &crq->desc[crq->next_to_use];
--
2.7.4

View File

@ -0,0 +1,65 @@
From 2b24ee3f6acfaa8170a37022aab4b5b93d4dc0ae Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Wed, 3 Feb 2021 20:23:54 +0800
Subject: [PATCH 040/189] net/hns3: fix flow director rule residue on malloc
failure
After FD rule config success, driver will malloc fdir_rule to hold the
rule info, if malloc fail the FD rule in hardware was not cleanup.
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_flow.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index c484114..a016857 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1806,17 +1806,18 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
flow->counter_id = fdir_rule.act_cnt.id;
}
+
+ fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
+ sizeof(struct hns3_fdir_rule_ele),
+ 0);
+ if (fdir_rule_ptr == NULL) {
+ hns3_err(hw, "failed to allocate fdir_rule memory.");
+ ret = -ENOMEM;
+ goto err_fdir;
+ }
+
ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
if (!ret) {
- fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
- sizeof(struct hns3_fdir_rule_ele),
- 0);
- if (fdir_rule_ptr == NULL) {
- hns3_err(hw, "Failed to allocate fdir_rule memory");
- ret = -ENOMEM;
- goto err_fdir;
- }
-
memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
sizeof(struct hns3_fdir_rule));
TAILQ_INSERT_TAIL(&process_list->fdir_list,
@@ -1827,10 +1828,10 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return flow;
}
+ rte_free(fdir_rule_ptr);
err_fdir:
if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
hns3_counter_release(dev, fdir_rule.act_cnt.id);
-
err:
rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to create flow");
--
2.7.4

View File

@ -0,0 +1,76 @@
From 50cb4151490c7814418be61cc54d45ad335c11aa Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Wed, 3 Feb 2021 20:23:55 +0800
Subject: [PATCH 041/189] net/hns3: fix firmware exceptions by concurrent
commands
There are two scenarios that command queue uninit performed
concurrently with the firmware command: asynchronous command
and timeout command.
For asynchronous command, if a large number of functions send
commands, these commands may need to be queued to wait for
firmware processing. If a function is uninited suddenly, CMDQ
clearing and firmware processing may be performed concurrently.
For timeout command, if the command failed due to busy scheduling
of firmware, this command will be processed in the next scheduling.
And this may lead to concurrency.
The preceding concurrency may lead to a firmware exceptions.
This patch add a waiting time to ensure the firmware complete the
processing of left over command when PMD uninit.
Fixes: 737f30e1c3ab ("net/hns3: support command interface with firmware")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 14 +++++++++++++-
drivers/net/hns3/hns3_cmd.h | 1 +
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 9393978..0590898 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -582,9 +582,21 @@ hns3_cmd_destroy_queue(struct hns3_hw *hw)
void
hns3_cmd_uninit(struct hns3_hw *hw)
{
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+
+ /*
+ * A delay is added to ensure that the register cleanup operations
+ * will not be performed concurrently with the firmware command and
+ * ensure that all the reserved commands are executed.
+ * Concurrency may occur in two scenarios: asynchronous command and
+ * timeout command. If the command fails to be executed due to busy
+ * scheduling, the command will be processed in the next scheduling
+ * of the firmware.
+ */
+ rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
+
rte_spinlock_lock(&hw->cmq.csq.lock);
rte_spinlock_lock(&hw->cmq.crq.lock);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_cmd_clear_regs(hw);
rte_spinlock_unlock(&hw->cmq.crq.lock);
rte_spinlock_unlock(&hw->cmq.csq.lock);
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 5640fe4..5010278 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#define HNS3_CMDQ_TX_TIMEOUT 30000
+#define HNS3_CMDQ_CLEAR_WAIT_TIME 200
#define HNS3_CMDQ_RX_INVLD_B 0
#define HNS3_CMDQ_RX_OUTVLD_B 1
#define HNS3_CMD_DESC_ALIGNMENT 4096
--
2.7.4

View File

@ -0,0 +1,50 @@
From 6ceabcab7a4b103f854f338486c1d9fd08349e90 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Wed, 3 Feb 2021 20:23:56 +0800
Subject: [PATCH 042/189] net/hns3: fix VF reset on mailbox failure
Currently, during the VF reset, the VF will send a MBX to inform
PF to reset it and the disable command bit will be set whether
the MBX is successful. Generally, multiple reset attempts are made
after a failure. However, because the command is disabled, all
subsequent reset will all fail.
This patch disable the command only after the MBX message is
successfully.
Fixes: 2790c6464725 ("net/hns3: support device reset")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev_vf.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 42cee37..fc9f3c8 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2404,15 +2404,17 @@ static int
hns3vf_prepare_reset(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- int ret = 0;
+ int ret;
if (hw->reset.level == HNS3_VF_FUNC_RESET) {
ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
0, true, NULL, 0);
+ if (ret)
+ return ret;
}
__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
- return ret;
+ return 0;
}
static int
--
2.7.4

View File

@ -0,0 +1,98 @@
From 835880a4bec5fc2fd64d199f3a899e2864c11252 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 3 Feb 2021 20:23:57 +0800
Subject: [PATCH 043/189] net/hns3: validate requested maximum Rx frame length
When jumbo frame is enabled, the MTU size needs to be modified
based on 'max_rx_pkt_len'. Driver needs to check the validity
of 'max_rx_pkt_len'. And it should be in the range of
HNS3_DEFAULT_FRAME_LEN and HNS3_MAX_FRAME_LEN. Otherwise, it may
cause that the MTU size is inconsistent with jumbo frame offload.
Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 19 +++++++++++++------
drivers/net/hns3/hns3_ethdev_vf.c | 19 +++++++++++++------
2 files changed, 26 insertions(+), 12 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f54b7c2..7ed55b1 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2343,6 +2343,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
struct rte_eth_rss_conf rss_conf;
+ uint32_t max_rx_pkt_len;
uint16_t mtu;
bool gro_en;
int ret;
@@ -2396,12 +2397,18 @@ hns3_dev_configure(struct rte_eth_dev *dev)
* according to the maximum RX packet length.
*/
if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- /*
- * Security of max_rx_pkt_len is guaranteed in dpdk frame.
- * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
- * can safely assign to "uint16_t" type variable.
- */
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+ max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
+ if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
+ max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
+ hns3_err(hw, "maximum Rx packet length must be greater "
+ "than %u and less than %u when jumbo frame enabled.",
+ (uint16_t)HNS3_DEFAULT_FRAME_LEN,
+ (uint16_t)HNS3_MAX_FRAME_LEN);
+ ret = -EINVAL;
+ goto cfg_err;
+ }
+
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
ret = hns3_dev_mtu_set(dev, mtu);
if (ret)
goto cfg_err;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index fc9f3c8..d5157cf 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -778,6 +778,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
struct rte_eth_rss_conf rss_conf;
+ uint32_t max_rx_pkt_len;
uint16_t mtu;
bool gro_en;
int ret;
@@ -825,12 +826,18 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
* according to the maximum RX packet length.
*/
if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- /*
- * Security of max_rx_pkt_len is guaranteed in dpdk frame.
- * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
- * can safely assign to "uint16_t" type variable.
- */
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+ max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
+ if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
+ max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
+ hns3_err(hw, "maximum Rx packet length must be greater "
+ "than %u and less than %u when jumbo frame enabled.",
+ (uint16_t)HNS3_DEFAULT_FRAME_LEN,
+ (uint16_t)HNS3_MAX_FRAME_LEN);
+ ret = -EINVAL;
+ goto cfg_err;
+ }
+
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
ret = hns3vf_dev_mtu_set(dev, mtu);
if (ret)
goto cfg_err;
--
2.7.4

View File

@ -0,0 +1,156 @@
From 6b66b8fd3b82d5f7c7d35b5e1c52d2611abc4317 Mon Sep 17 00:00:00 2001
From: Andrew Boyer <aboyer@pensando.io>
Date: Fri, 29 Jan 2021 14:44:32 -0800
Subject: [PATCH 044/189] drivers/net: redefine array size macros
Replace copies of size(arr)/size(arr[0]) with RTE_DIM().
Eventually all of these macro definitions should be removed.
Signed-off-by: Andrew Boyer <aboyer@pensando.io>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
drivers/net/atlantic/atl_hw_regs.h | 2 +-
drivers/net/axgbe/axgbe_common.h | 2 +-
drivers/net/bnx2x/bnx2x.h | 2 +-
drivers/net/bnx2x/elink.h | 2 +-
drivers/net/ena/ena_ethdev.c | 2 +-
drivers/net/enic/base/vnic_devcmd.h | 2 +-
drivers/net/hns3/hns3_ethdev.h | 2 +-
drivers/net/i40e/base/i40e_osdep.h | 2 +-
drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h | 2 +-
drivers/net/thunderx/base/nicvf_hw.h | 2 +-
10 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/drivers/net/atlantic/atl_hw_regs.h b/drivers/net/atlantic/atl_hw_regs.h
index a2d6ca8..4f6cd35 100644
--- a/drivers/net/atlantic/atl_hw_regs.h
+++ b/drivers/net/atlantic/atl_hw_regs.h
@@ -26,7 +26,7 @@
#define mdelay rte_delay_ms
#define udelay rte_delay_us
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
#define BIT(x) (1UL << (x))
#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index fb97f0b..799382a 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -42,7 +42,7 @@
#define BIT(nr) (1 << (nr))
#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
#endif
#define AXGBE_HZ 250
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 69cc143..e13ab15 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -81,7 +81,7 @@
#endif
#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index dd70ac6..6b2e85f 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -86,7 +86,7 @@ extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1
#define ELINK_EVENT_ID_SFP_POWER_FAULT 2
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
/* Debug prints */
#ifdef ELINK_DEBUG
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 20ff365..b4b8794 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -47,7 +47,7 @@
#define ENA_HASH_KEY_SIZE 40
#define ETH_GSTRING_LEN 32
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
#define ENA_MIN_RING_DESC 128
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index a2f577f..4675e5a 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -63,7 +63,7 @@
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index cf42ef1..6178f0b 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -887,7 +887,7 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
do { \
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 9b50330..9b79ece 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -155,7 +155,7 @@ static inline uint32_t i40e_read_addr(volatile void *addr)
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
-#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
/* memory allocation tracking */
struct i40e_dma_mem {
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
index d46574b..7b64e2d 100644
--- a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
@@ -23,7 +23,7 @@
#endif
#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
#endif
#define NFP_ERRNO(err) (errno = (err), -1)
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index fd13ea8..d6f3a57 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -17,7 +17,7 @@
#define PCI_SUB_DEVICE_ID_CN81XX_NICVF 0xA234
#define PCI_SUB_DEVICE_ID_CN83XX_NICVF 0xA334
-#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define NICVF_ARRAY_SIZE(arr) RTE_DIM(arr)
#define NICVF_GET_RX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
--
2.7.4

View File

@ -0,0 +1,241 @@
From 4d36d14e1683f50904525e26fbf311c0aa677940 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 4 Mar 2021 15:44:41 +0800
Subject: [PATCH 045/189] net/hns3: support module EEPROM dump
This patch add support for dumping module EEPROM.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
drivers/net/hns3/hns3_cmd.h | 16 ++++
drivers/net/hns3/hns3_ethdev.c | 159 ++++++++++++++++++++++++++++++++++++++
3 files changed, 176 insertions(+)
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index f0747e3..5ccaca5 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -38,6 +38,7 @@ Extended stats = Y
Stats per queue = Y
FW version = Y
Registers dump = Y
+Module EEPROM dump = Y
Multiprocess aware = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 5010278..ff424a0 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -211,6 +211,8 @@ enum hns3_opcode_type {
HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A,
/* SFP command */
+ HNS3_OPC_GET_SFP_EEPROM = 0x7100,
+ HNS3_OPC_GET_SFP_EXIST = 0x7101,
HNS3_OPC_SFP_GET_SPEED = 0x7104,
/* Interrupts commands */
@@ -714,6 +716,20 @@ struct hns3_config_auto_neg_cmd {
#define HNS3_MAC_FEC_BASER 1
#define HNS3_MAC_FEC_RS 2
+#define HNS3_SFP_INFO_BD0_LEN 20UL
+#define HNS3_SFP_INFO_BDX_LEN 24UL
+
+struct hns3_sfp_info_bd0_cmd {
+ uint16_t offset;
+ uint16_t read_len;
+ uint8_t data[HNS3_SFP_INFO_BD0_LEN];
+};
+
+struct hns3_sfp_type {
+ uint8_t type;
+ uint8_t ext_type;
+};
+
struct hns3_sfp_speed_cmd {
uint32_t sfp_speed;
uint8_t query_type; /* 0: sfp speed, 1: active fec */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7ed55b1..2a37fcc 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6172,6 +6172,163 @@ hns3_query_dev_fec_info(struct hns3_hw *hw)
return ret;
}
+static bool
+hns3_optical_module_existed(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc;
+ bool existed;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw,
+ "fail to get optical module exist state, ret = %d.\n",
+ ret);
+ return false;
+ }
+ existed = !!desc.data[0];
+
+ return existed;
+}
+
+static int
+hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
+ uint32_t len, uint8_t *data)
+{
+#define HNS3_SFP_INFO_CMD_NUM 6
+#define HNS3_SFP_INFO_MAX_LEN \
+ (HNS3_SFP_INFO_BD0_LEN + \
+ (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
+ struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
+ struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
+ uint16_t read_len;
+ uint16_t copy_len;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
+ true);
+ if (i < HNS3_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+
+ sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
+ read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
+ if (ret) {
+ hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* The data format in BD0 is different with the others. */
+ copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ break;
+
+ copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return (int)read_len;
+}
+
+static int
+hns3_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+ uint32_t offset = info->offset;
+ uint32_t len = info->length;
+ uint8_t *data = info->data;
+ uint32_t read_len = 0;
+
+ if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
+ return -ENOTSUP;
+
+ if (!hns3_optical_module_existed(hw)) {
+ hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
+ return -EIO;
+ }
+
+ while (read_len < len) {
+ int ret;
+ ret = hns3_get_module_eeprom_data(hw, offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (ret < 0)
+ return -EIO;
+ read_len += ret;
+ }
+
+ return 0;
+}
+
+static int
+hns3_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+#define HNS3_SFF8024_ID_SFP 0x03
+#define HNS3_SFF8024_ID_QSFP_8438 0x0c
+#define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d
+#define HNS3_SFF8024_ID_QSFP28_8636 0x11
+#define HNS3_SFF_8636_V1_3 0x03
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+ struct rte_dev_eeprom_info info;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ memset(&info, 0, sizeof(info));
+ info.data = (uint8_t *)&sfp_type;
+ info.length = sizeof(sfp_type);
+ ret = hns3_get_module_eeprom(dev, &info);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case HNS3_SFF8024_ID_SFP:
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ break;
+ case HNS3_SFF8024_ID_QSFP_8438:
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case HNS3_SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case HNS3_SFF8024_ID_QSFP28_8636:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
+ sfp_type.type, sfp_type.ext_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops hns3_eth_dev_ops = {
.dev_configure = hns3_dev_configure,
.dev_start = hns3_dev_start,
@@ -6223,6 +6380,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.vlan_offload_set = hns3_vlan_offload_set,
.vlan_pvid_set = hns3_vlan_pvid_set,
.get_reg = hns3_get_regs,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
.get_dcb_info = hns3_get_dcb_info,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
.fec_get_capability = hns3_fec_get_capability,
--
2.7.4

View File

@ -0,0 +1,255 @@
From b7995f87e190e4ab83ff6a5faea584a4ea4c2198 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 4 Mar 2021 15:44:42 +0800
Subject: [PATCH 046/189] net/hns3: add more registers to dump
This patch makes more registers dumped in the dump_reg API to help
locate the fault.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 13 ++++
drivers/net/hns3/hns3_regs.c | 171 ++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 180 insertions(+), 4 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index ff424a0..2e23f99 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -95,6 +95,19 @@ enum hns3_opcode_type {
HNS3_OPC_QUERY_REG_NUM = 0x0040,
HNS3_OPC_QUERY_32_BIT_REG = 0x0041,
HNS3_OPC_QUERY_64_BIT_REG = 0x0042,
+ HNS3_OPC_DFX_BD_NUM = 0x0043,
+ HNS3_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HNS3_OPC_DFX_SSU_REG_0 = 0x0045,
+ HNS3_OPC_DFX_SSU_REG_1 = 0x0046,
+ HNS3_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HNS3_OPC_DFX_RPU_REG_0 = 0x0048,
+ HNS3_OPC_DFX_RPU_REG_1 = 0x0049,
+ HNS3_OPC_DFX_NCSI_REG = 0x004A,
+ HNS3_OPC_DFX_RTC_REG = 0x004B,
+ HNS3_OPC_DFX_PPP_REG = 0x004C,
+ HNS3_OPC_DFX_RCB_REG = 0x004D,
+ HNS3_OPC_DFX_TQP_REG = 0x004E,
+ HNS3_OPC_DFX_SSU_REG_2 = 0x004F,
HNS3_OPC_QUERY_DEV_SPECS = 0x0050,
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 8afe132..5b14727 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -15,6 +15,8 @@
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(uint32_t))
+static int hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *length);
+
static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_ADDR_L_REG,
HNS3_CMDQ_TX_ADDR_H_REG,
HNS3_CMDQ_TX_DEPTH_REG,
@@ -77,6 +79,21 @@ static const uint32_t tqp_intr_reg_addrs[] = {HNS3_TQP_INTR_CTRL_REG,
HNS3_TQP_INTR_GL2_REG,
HNS3_TQP_INTR_RL_REG};
+static const uint32_t hns3_dfx_reg_opcode_list[] = {
+ HNS3_OPC_DFX_BIOS_COMMON_REG,
+ HNS3_OPC_DFX_SSU_REG_0,
+ HNS3_OPC_DFX_SSU_REG_1,
+ HNS3_OPC_DFX_IGU_EGU_REG,
+ HNS3_OPC_DFX_RPU_REG_0,
+ HNS3_OPC_DFX_RPU_REG_1,
+ HNS3_OPC_DFX_NCSI_REG,
+ HNS3_OPC_DFX_RTC_REG,
+ HNS3_OPC_DFX_PPP_REG,
+ HNS3_OPC_DFX_RCB_REG,
+ HNS3_OPC_DFX_TQP_REG,
+ HNS3_OPC_DFX_SSU_REG_2
+};
+
static int
hns3_get_regs_num(struct hns3_hw *hw, uint32_t *regs_num_32_bit,
uint32_t *regs_num_64_bit)
@@ -123,14 +140,21 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
if (!hns->is_vf) {
ret = hns3_get_regs_num(hw, &regs_num_32_bit, &regs_num_64_bit);
if (ret) {
- hns3_err(hw, "Get register number failed, ret = %d.",
- ret);
- return -ENOTSUP;
+ hns3_err(hw, "fail to get the number of registers, "
+ "ret = %d.", ret);
+ return ret;
}
dfx_reg_lines = regs_num_32_bit * sizeof(uint32_t) /
REG_LEN_PER_LINE + 1;
dfx_reg_lines += regs_num_64_bit * sizeof(uint64_t) /
REG_LEN_PER_LINE + 1;
+
+ ret = hns3_get_dfx_reg_line(hw, &dfx_reg_lines);
+ if (ret) {
+ hns3_err(hw, "fail to get the number of dfx registers, "
+ "ret = %d.", ret);
+ return ret;
+ }
len += dfx_reg_lines * REG_NUM_PER_LINE;
}
@@ -310,6 +334,144 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
return data - origin_data_ptr;
}
+static int
+hns3_get_dfx_reg_bd_num(struct hns3_hw *hw, uint32_t *bd_num_list,
+ uint32_t list_size)
+{
+#define HNS3_GET_DFX_REG_BD_NUM_SIZE 4
+ struct hns3_cmd_desc desc[HNS3_GET_DFX_REG_BD_NUM_SIZE];
+ uint32_t index, desc_index;
+ uint32_t bd_num;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < HNS3_GET_DFX_REG_BD_NUM_SIZE - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_DFX_BD_NUM, true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ /* The last BD does not need a next flag */
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_DFX_BD_NUM, true);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_GET_DFX_REG_BD_NUM_SIZE);
+ if (ret) {
+ hns3_err(hw, "fail to get dfx bd num, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* The first data in the first BD is a reserved field */
+ for (i = 1; i <= list_size; i++) {
+ desc_index = i / HNS3_CMD_DESC_DATA_NUM;
+ index = i % HNS3_CMD_DESC_DATA_NUM;
+ bd_num = rte_le_to_cpu_32(desc[desc_index].data[index]);
+ bd_num_list[i - 1] = bd_num;
+ }
+
+ return 0;
+}
+
+static int
+hns3_dfx_reg_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
+ int bd_num, uint32_t opcode)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < bd_num - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], opcode, true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ /* The last BD does not need a next flag */
+ hns3_cmd_setup_basic_desc(&desc[i], opcode, true);
+
+ ret = hns3_cmd_send(hw, desc, bd_num);
+ if (ret) {
+ hns3_err(hw, "fail to query dfx registers, opcode = 0x%04X, "
+ "ret = %d.\n", opcode, ret);
+ }
+
+ return ret;
+}
+
+static int
+hns3_dfx_reg_fetch_data(struct hns3_cmd_desc *desc, int bd_num, uint32_t *reg)
+{
+ int desc_index;
+ int reg_num;
+ int index;
+ int i;
+
+ reg_num = bd_num * HNS3_CMD_DESC_DATA_NUM;
+ for (i = 0; i < reg_num; i++) {
+ desc_index = i / HNS3_CMD_DESC_DATA_NUM;
+ index = i % HNS3_CMD_DESC_DATA_NUM;
+ *reg++ = desc[desc_index].data[index];
+ }
+ reg_num += hns3_insert_reg_separator(reg_num, reg);
+
+ return reg_num;
+}
+
+static int
+hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *lines)
+{
+ int opcode_num = RTE_DIM(hns3_dfx_reg_opcode_list);
+ uint32_t bd_num_list[opcode_num];
+ uint32_t bd_num, data_len;
+ int ret;
+ int i;
+
+ ret = hns3_get_dfx_reg_bd_num(hw, bd_num_list, opcode_num);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < opcode_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = bd_num * HNS3_CMD_DESC_DATA_NUM * sizeof(uint32_t);
+ *lines += data_len / REG_LEN_PER_LINE + 1;
+ }
+
+ return 0;
+}
+
+static int
+hns3_get_dfx_regs(struct hns3_hw *hw, void **data)
+{
+ int opcode_num = RTE_DIM(hns3_dfx_reg_opcode_list);
+ uint32_t max_bd_num, bd_num, opcode;
+ uint32_t bd_num_list[opcode_num];
+ struct hns3_cmd_desc *cmd_descs;
+ uint32_t *reg_val = (uint32_t *)*data;
+ int ret;
+ int i;
+
+ ret = hns3_get_dfx_reg_bd_num(hw, bd_num_list, opcode_num);
+ if (ret)
+ return ret;
+
+ max_bd_num = 0;
+ for (i = 0; i < opcode_num; i++)
+ max_bd_num = RTE_MAX(bd_num_list[i], max_bd_num);
+
+ cmd_descs = rte_zmalloc(NULL, sizeof(*cmd_descs) * max_bd_num, 0);
+ if (cmd_descs == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < opcode_num; i++) {
+ opcode = hns3_dfx_reg_opcode_list[i];
+ bd_num = bd_num_list[i];
+ if (bd_num == 0)
+ continue;
+ ret = hns3_dfx_reg_cmd_send(hw, cmd_descs, bd_num, opcode);
+ if (ret)
+ break;
+ reg_val += hns3_dfx_reg_fetch_data(cmd_descs, bd_num, reg_val);
+ }
+ rte_free(cmd_descs);
+ *data = (void *)reg_val;
+
+ return ret;
+}
+
int
hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
{
@@ -371,5 +533,6 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
data += regs_num_64_bit * HNS3_64_BIT_REG_SIZE;
data += hns3_insert_reg_separator(regs_num_64_bit *
HNS3_64_BIT_REG_SIZE, data);
- return ret;
+
+ return hns3_get_dfx_regs(hw, (void **)&data);
}
--
2.7.4

View File

@ -0,0 +1,151 @@
From f789787f6d1f096a04d39e3de46e5292a7bde3fe Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Thu, 4 Mar 2021 15:44:43 +0800
Subject: [PATCH 047/189] net/hns3: implement Tx mbuf free on demand
This patch add support tx_done_cleanup ops, which could support for
the API rte_eth_tx_done_cleanup to free consumed mbufs on Tx ring.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
doc/guides/nics/features/hns3_vf.ini | 1 +
drivers/net/hns3/hns3_ethdev.c | 1 +
drivers/net/hns3/hns3_ethdev_vf.c | 1 +
drivers/net/hns3/hns3_rxtx.c | 59 ++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 1 +
6 files changed, 64 insertions(+)
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index 5ccaca5..00a26cd 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -10,6 +10,7 @@ Queue start/stop = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
+Free Tx mbuf on demand = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index 3128b63..f3dd239 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -10,6 +10,7 @@ Queue start/stop = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
+Free Tx mbuf on demand = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 2a37fcc..6e0f3b1 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6388,6 +6388,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.fec_get = hns3_fec_get,
.fec_set = hns3_fec_set,
.tm_ops_get = hns3_tm_ops_get,
+ .tx_done_cleanup = hns3_tx_done_cleanup,
};
static const struct hns3_reset_ops hns3_reset_ops = {
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index d5157cf..5b4c587 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2763,6 +2763,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.vlan_offload_set = hns3vf_vlan_offload_set,
.get_reg = hns3_get_regs,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
+ .tx_done_cleanup = hns3_tx_done_cleanup,
};
static const struct hns3_reset_ops hns3vf_reset_ops = {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 1991b4e..df97018 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -3913,6 +3913,65 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return 0;
}
+static int
+hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+{
+ uint16_t next_to_clean = txq->next_to_clean;
+ uint16_t next_to_use = txq->next_to_use;
+ uint16_t tx_bd_ready = txq->tx_bd_ready;
+ struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
+ struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
+ uint32_t idx;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ for (idx = 0; idx < free_cnt; idx++) {
+ if (next_to_clean == next_to_use)
+ break;
+
+ if (desc->tx.tp_fe_sc_vld_ra_ri &
+ rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ break;
+
+ if (tx_pkt->mbuf != NULL) {
+ rte_pktmbuf_free_seg(tx_pkt->mbuf);
+ tx_pkt->mbuf = NULL;
+ }
+
+ next_to_clean++;
+ tx_bd_ready++;
+ tx_pkt++;
+ desc++;
+ if (next_to_clean == txq->nb_tx_desc) {
+ tx_pkt = txq->sw_ring;
+ desc = txq->tx_ring;
+ next_to_clean = 0;
+ }
+ }
+
+ if (idx > 0) {
+ txq->next_to_clean = next_to_clean;
+ txq->tx_bd_ready = tx_bd_ready;
+ }
+
+ return (int)idx;
+}
+
+int
+hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+
+ if (dev->tx_pkt_burst == hns3_xmit_pkts)
+ return hns3_tx_done_cleanup_full(q, free_cnt);
+ else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
+ return 0;
+ else
+ return -ENOTSUP;
+}
+
uint32_t
hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 8f5ae5c..7118bd4 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -706,5 +706,6 @@ int hns3_start_all_txqs(struct rte_eth_dev *dev);
int hns3_start_all_rxqs(struct rte_eth_dev *dev);
void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
+int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
#endif /* _HNS3_RXTX_H_ */
--
2.7.4

View File

@ -0,0 +1,210 @@
From e5b9ec998c2de659f177332bcdb4868116063b17 Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Thu, 4 Mar 2021 15:44:44 +0800
Subject: [PATCH 048/189] net/hns3: add bytes stats
In current HNS3 PMD, Rx/Tx bytes from packet stats are not
implemented.
This patch implemented Rx/Tx bytes using soft counters.
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 16 ++++++++++++++++
drivers/net/hns3/hns3_rxtx_vec_neon.h | 9 +++++++++
drivers/net/hns3/hns3_rxtx_vec_sve.c | 8 ++++++++
drivers/net/hns3/hns3_stats.c | 18 ++++++++++++++----
4 files changed, 47 insertions(+), 4 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index df97018..897e5fa 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2181,6 +2181,9 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
cksum_err);
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += rxm->pkt_len;
+
rx_pkts[nb_rx++] = rxm;
continue;
pkt_err:
@@ -2401,6 +2404,9 @@ hns3_recv_scattered_pkts(void *rx_queue,
cksum_err);
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += first_seg->pkt_len;
+
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
continue;
@@ -3516,6 +3522,11 @@ hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
hns3_tx_setup_4bd(txdp + i, pkts + i);
+
+ /* Increment bytes counter */
+ uint32_t j;
+ for (j = 0; j < PER_LOOP_NUM; j++)
+ txq->basic_stats.bytes += pkts[i + j]->pkt_len;
}
if (unlikely(leftover > 0)) {
for (i = 0; i < leftover; i++) {
@@ -3523,6 +3534,9 @@ hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
pkts + mainpart + i);
hns3_tx_setup_1bd(txdp + mainpart + i,
pkts + mainpart + i);
+
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
}
}
}
@@ -3661,6 +3675,8 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
desc->tx.tp_fe_sc_vld_ra_ri |=
rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += tx_pkt->pkt_len;
nb_hold += i;
txq->next_to_use = tx_next_use;
txq->tx_bd_ready -= i;
diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h
index a693b4b..68f098f 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_neon.h
+++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h
@@ -61,6 +61,9 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
for (i = 0; i < n; i++, tx_pkts++, tx_desc++) {
hns3_vec_tx(tx_desc, *tx_pkts);
tx_entry[i].mbuf = *tx_pkts;
+
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += (*tx_pkts)->pkt_len;
}
nb_commit -= n;
@@ -72,6 +75,9 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
for (i = 0; i < nb_commit; i++, tx_pkts++, tx_desc++) {
hns3_vec_tx(tx_desc, *tx_pkts);
tx_entry[i].mbuf = *tx_pkts;
+
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += (*tx_pkts)->pkt_len;
}
next_to_use += nb_commit;
@@ -116,6 +122,9 @@ hns3_desc_parse_field(struct hns3_rx_queue *rxq,
if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
hns3_rx_set_cksum_flag(pkt, pkt->packet_type,
cksum_err);
+
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += pkt->pkt_len;
}
return retcode;
diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c
index 8c2c8f6..947c19f 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_sve.c
+++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c
@@ -58,6 +58,9 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B)))
hns3_rx_set_cksum_flag(rx_pkts[i],
rx_pkts[i]->packet_type, cksum_err);
+
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += rx_pkts[i]->pkt_len;
}
return retcode;
@@ -408,6 +411,11 @@ hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen,
offsets, svdup_n_u64(valid_bit));
+ /* Increment bytes counter */
+ uint32_t idx;
+ for (idx = 0; idx < svcntd(); idx++)
+ txq->basic_stats.bytes += pkts[idx]->pkt_len;
+
/* update index for next loop */
i += svcntd();
pkts += svcntd();
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index e0e40ca..777d36a 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -358,6 +358,7 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
HNS3_NUM_RESET_XSTATS)
static void hns3_tqp_stats_clear(struct hns3_hw *hw);
+static void hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev);
/*
* Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
@@ -543,16 +544,26 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
return ret;
}
- /* Get the error stats of received packets */
+ /* Get the error stats and bytes of received packets */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
cnt = rxq->err_stats.l2_errors +
rxq->err_stats.pkt_len_errors;
rte_stats->ierrors += cnt;
+
+ rte_stats->ibytes += rxq->basic_stats.bytes;
}
}
+ /* Get the bytes of received packets */
+ struct hns3_tx_queue *txq;
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq)
+ rte_stats->obytes += txq->basic_stats.bytes;
+ }
+
rte_stats->oerrors = 0;
/*
* If HW statistics are reset by stats_reset, but a lot of residual
@@ -623,6 +634,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
* their source.
*/
hns3_tqp_stats_clear(hw);
+ hns3_tqp_basic_stats_clear(eth_dev);
return 0;
}
@@ -807,7 +819,6 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
rxq_stats->packets =
stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
- rxq_stats->bytes = 0;
for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
val = (char *)rxq_stats +
hns3_rxq_basic_stats_strings[j].offset;
@@ -836,7 +847,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
txq_stats = &txq->basic_stats;
txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
- txq_stats->bytes = 0;
+
for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
val = (char *)txq_stats +
hns3_txq_basic_stats_strings[j].offset;
@@ -1328,7 +1339,6 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- hns3_tqp_basic_stats_clear(dev);
hns3_tqp_dfx_stats_clear(dev);
/* Clear reset stats */
--
2.7.4

View File

@ -0,0 +1,274 @@
From 682546e9b26cf26a4b82a0c022f5c8dd4a7aa7ed Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Thu, 4 Mar 2021 15:44:45 +0800
Subject: [PATCH 049/189] net/hns3: add imissed packet stats
This patch implement Rx imissed stats by querying cmdq.
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 7 +++
drivers/net/hns3/hns3_ethdev.c | 7 +++
drivers/net/hns3/hns3_ethdev.h | 1 +
drivers/net/hns3/hns3_stats.c | 106 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/hns3/hns3_stats.h | 8 ++++
5 files changed, 128 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 2e23f99..93bfa74 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -905,6 +905,13 @@ struct hns3_dev_specs_0_cmd {
uint32_t max_tm_rate;
};
+struct hns3_query_rpu_cmd {
+ uint32_t tc_queue_num;
+ uint32_t rsv1[2];
+ uint32_t rpu_rx_pkt_drop_cnt;
+ uint32_t rsv2[2];
+};
+
#define HNS3_MAX_TQP_NUM_HIP08_PF 64
#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 6e0f3b1..96ad9b0 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4742,6 +4742,13 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
goto err_cmd_init;
}
+ /* Hardware statistics of imissed registers cleared. */
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+ return ret;
+ }
+
hns3_config_all_msix_error(hw, true);
ret = rte_intr_callback_register(&pci_dev->intr_handle,
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 6178f0b..2954422 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -434,6 +434,7 @@ struct hns3_hw {
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
+ struct hns3_rx_missed_stats imissed_stats;
uint32_t fw_version;
uint16_t num_msi;
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 777d36a..87035e3 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -324,6 +324,12 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
{"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
};
+/* The statistic of imissed packet */
+static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
+ {"RPU_DROP_CNT",
+ HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
+};
+
#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
sizeof(hns3_mac_strings[0]))
@@ -354,8 +360,11 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
#define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
sizeof(hns3_txq_basic_stats_strings[0]))
+#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
+ sizeof(hns3_imissed_stats_strings[0]))
+
#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
- HNS3_NUM_RESET_XSTATS)
+ HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
static void hns3_tqp_stats_clear(struct hns3_hw *hw);
static void hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev);
@@ -515,6 +524,52 @@ hns3_update_tqp_stats(struct hns3_hw *hw)
return 0;
}
+static int
+hns3_update_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+ struct hns3_query_rpu_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint64_t cnt;
+ uint32_t tc_num;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
+ req = (struct hns3_query_rpu_cmd *)desc.data;
+
+ /*
+ * tc_num is 0, means rpu stats of all TC channels will be
+ * get from firmware
+ */
+ tc_num = 0;
+ req->tc_queue_num = rte_cpu_to_le_32(tc_num);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "failed to query RPU stats: %d", ret);
+ return ret;
+ }
+
+ cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
+ stats->rpu_rx_drop_cnt += cnt;
+
+ return 0;
+}
+
+int
+hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
+{
+ int ret;
+
+ ret = hns3_update_rpu_drop_stats(hw);
+ if (ret)
+ return ret;
+
+ if (is_clear)
+ memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
+
+ return 0;
+}
+
/*
* Query tqp tx queue statistics ,opcode id: 0x0B03.
* Query tqp rx queue statistics ,opcode id: 0x0B13.
@@ -531,6 +586,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_rx_queue *rxq;
uint64_t cnt;
@@ -544,6 +600,18 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
return ret;
}
+ if (!hns->is_vf) {
+ /* Update imissed stats */
+ ret = hns3_update_imissed_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update imissed stats failed, ret = %d",
+ ret);
+ return ret;
+ }
+
+ rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt;
+ }
+
/* Get the error stats and bytes of received packets */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
@@ -616,6 +684,19 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
}
}
+ if (!hns->is_vf) {
+ /*
+ * Note: Reading hardware statistics of imissed registers will
+ * clear them.
+ */
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d",
+ ret);
+ return ret;
+ }
+ }
+
/*
* Clear soft stats of rx error packet which will be dropped
* in driver.
@@ -928,6 +1009,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_pf *pf = &hns->pf;
struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_bd_errors_stats *rx_err_stats;
@@ -966,6 +1048,21 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
+ ret = hns3_update_imissed_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update imissed stats failed, ret = %d",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
+ addr = (char *)imissed_stats +
+ hns3_imissed_stats_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
+ xstats[count].id = count;
+ count++;
+ }
+
for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
addr = (char *)&pf->abn_int_stats +
hns3_error_int_stats_strings[i].offset;
@@ -1108,6 +1205,13 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
count++;
}
+ for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", hns3_imissed_stats_strings[i].name);
+ count++;
+ }
+
for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index d213be5..01b4f36 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -110,6 +110,10 @@ struct hns3_mac_stats {
uint64_t mac_rx_ctrl_pkt_num;
};
+struct hns3_rx_missed_stats {
+ uint64_t rpu_rx_drop_cnt;
+};
+
/* store statistics names and its offset in stats structure */
struct hns3_xstats_name_offset {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -141,6 +145,9 @@ struct hns3_reset_stats;
#define HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(f) \
(offsetof(struct hns3_tx_basic_stats, f))
+#define HNS3_IMISSED_STATS_FIELD_OFFSET(f) \
+ (offsetof(struct hns3_rx_missed_stats, f))
+
int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int n);
@@ -160,5 +167,6 @@ int hns3_stats_reset(struct rte_eth_dev *dev);
void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err);
int hns3_tqp_stats_init(struct hns3_hw *hw);
void hns3_tqp_stats_uninit(struct hns3_hw *hw);
+int hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear);
#endif /* _HNS3_STATS_H_ */
--
2.7.4

View File

@ -0,0 +1,122 @@
From 73f3a8aa0b5926083482f5e1f9999e246856a2ae Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Thu, 4 Mar 2021 15:44:46 +0800
Subject: [PATCH 050/189] net/hns3: encapsulate port shaping interface
When rate of port changes, the rate limit of the port needs to
be updated. So it is necessary to encapsulate an interface that
configures the rate limit based on the rate.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_dcb.c | 22 +++++++++++++++++-----
drivers/net/hns3/hns3_dcb.h | 2 +-
drivers/net/hns3/hns3_ethdev.c | 10 +++-------
3 files changed, 21 insertions(+), 13 deletions(-)
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 7fc6ac9..ebfc240 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -330,8 +330,8 @@ hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
return shapping_para;
}
-int
-hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
+static int
+hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
{
struct hns3_port_shapping_cmd *shap_cfg_cmd;
struct hns3_shaper_parameter shaper_parameter;
@@ -340,7 +340,7 @@ hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
struct hns3_cmd_desc desc;
int ret;
- ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
+ ret = hns3_shaper_para_calc(hw, speed,
HNS3_SHAPER_LVL_PORT, &shaper_parameter);
if (ret) {
hns3_err(hw, "calculate shaper parameter failed: %d", ret);
@@ -366,12 +366,24 @@ hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
* depends on the firmware version. But driver still needs to
* calculate it and configure to firmware for better compatibility.
*/
- shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
+ shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
return hns3_cmd_send(hw, &desc, 1);
}
+int
+hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
+{
+ int ret;
+
+ ret = hns3_dcb_port_shaper_cfg(hw, speed);
+ if (ret)
+ hns3_err(hw, "configure port shappering failed: ret = %d", ret);
+
+ return ret;
+}
+
static int
hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
@@ -961,7 +973,7 @@ hns3_dcb_shaper_cfg(struct hns3_hw *hw)
{
int ret;
- ret = hns3_dcb_port_shaper_cfg(hw);
+ ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
if (ret) {
hns3_err(hw, "config port shaper failed: %d", ret);
return ret;
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index 8248434..0d25d3b 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -208,7 +208,7 @@ int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
uint16_t nb_tx_q);
int hns3_dcb_cfg_update(struct hns3_adapter *hns);
-int hns3_dcb_port_shaper_cfg(struct hns3_hw *hw);
+int hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed);
int hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate);
int hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate);
uint8_t hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 96ad9b0..94c08e8 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4384,7 +4384,6 @@ static int
hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
{
struct hns3_mac *mac = &hw->mac;
- uint32_t cur_speed = mac->link_speed;
int ret;
duplex = hns3_check_speed_dup(duplex, speed);
@@ -4395,14 +4394,11 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
if (ret)
return ret;
- mac->link_speed = speed;
- ret = hns3_dcb_port_shaper_cfg(hw);
- if (ret) {
- hns3_err(hw, "failed to configure port shaper, ret = %d.", ret);
- mac->link_speed = cur_speed;
+ ret = hns3_port_shaper_update(hw, speed);
+ if (ret)
return ret;
- }
+ mac->link_speed = speed;
mac->link_duplex = duplex;
return 0;
--
2.7.4

View File

@ -0,0 +1,48 @@
From 35469e7e3c26afc79f340b8477bf7ce1dc65746e Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Thu, 4 Mar 2021 15:44:47 +0800
Subject: [PATCH 051/189] net/hns3: fix device capabilities for copper media
type
The configuration operation for PHY is implemented by firmware. And
a capability flag will be report to driver, which means the firmware
supports the PHY driver. However, the current implementation only
supports obtaining the capability bit, but some basic functions of
copper ports in driver, such as, the query of link status and link
info, are not supported.
Therefore, it is necessary for driver to set the copper capability
bit to zero when the firmware supports the configuration of the PHY.
Fixes: 438752358158 ("net/hns3: get device capability from firmware")
Fixes: 95e50325864c ("net/hns3: support copper media type")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 0590898..f0bc177 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -423,8 +423,14 @@ static void hns3_parse_capability(struct hns3_hw *hw,
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
+ /*
+ * Currently, the query of link status and link info on copper ports
+ * are not supported. So it is necessary for driver to set the copper
+ * capability bit to zero when the firmware supports the configuration
+ * of the PHY.
+ */
if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
- hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 0);
if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
--
2.7.4

View File

@ -0,0 +1,279 @@
From ed40b1477365a3c581f487ba3c8261c53dcdc255 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Thu, 4 Mar 2021 15:44:48 +0800
Subject: [PATCH 052/189] net/hns3: support PF device with copper PHYs
The normal operation of devices with copper phys depends on the
initialization and configuration of the PHY chip. The task of
driving the PHY chip is implemented in some firmware versions.
If firmware supports the phy driver, it will report a capability
flag to driver in probing process. The driver determines whether
to support PF device with copper phys based on the capability bit.
If supported, the driver set a flag indicating that the firmware
takes over the PHY, and then the firmware initializes the PHY.
This patch supports the query of link status and link info, and
existing basic features for PF device with copper phys.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 8 +--
drivers/net/hns3/hns3_cmd.h | 37 +++++++++++++
drivers/net/hns3/hns3_ethdev.c | 115 ++++++++++++++++++++++++++++++++++++++---
drivers/net/hns3/hns3_ethdev.h | 5 ++
4 files changed, 152 insertions(+), 13 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index f0bc177..0590898 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -423,14 +423,8 @@ static void hns3_parse_capability(struct hns3_hw *hw,
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
- /*
- * Currently, the query of link status and link info on copper ports
- * are not supported. So it is necessary for driver to set the copper
- * capability bit to zero when the firmware supports the configuration
- * of the PHY.
- */
if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
- hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 0);
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 93bfa74..7f567cb 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -222,6 +222,8 @@ enum hns3_opcode_type {
/* Firmware stats command */
HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A,
+ /* Firmware control phy command */
+ HNS3_OPC_PHY_PARAM_CFG = 0x7025,
/* SFP command */
HNS3_OPC_GET_SFP_EEPROM = 0x7100,
@@ -659,11 +661,46 @@ enum hns3_promisc_type {
#define HNS3_LINK_EVENT_REPORT_EN_B 0
#define HNS3_NCSI_ERROR_REPORT_EN_B 1
+#define HNS3_FIRMWARE_PHY_DRIVER_EN_B 2
struct hns3_firmware_compat_cmd {
uint32_t compat;
uint8_t rsv[20];
};
+/* Bitmap flags in supported, advertising and lp_advertising */
+#define HNS3_PHY_LINK_SPEED_10M_HD_BIT BIT(0)
+#define HNS3_PHY_LINK_SPEED_10M_BIT BIT(1)
+#define HNS3_PHY_LINK_SPEED_100M_HD_BIT BIT(2)
+#define HNS3_PHY_LINK_SPEED_100M_BIT BIT(3)
+#define HNS3_PHY_LINK_MODE_AUTONEG_BIT BIT(6)
+#define HNS3_PHY_LINK_MODE_PAUSE_BIT BIT(13)
+#define HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT BIT(14)
+
+#define HNS3_PHY_PARAM_CFG_BD_NUM 2
+struct hns3_phy_params_bd0_cmd {
+ uint32_t speed;
+#define HNS3_PHY_DUPLEX_CFG_B 0
+ uint8_t duplex;
+#define HNS3_PHY_AUTONEG_CFG_B 0
+ uint8_t autoneg;
+ uint8_t eth_tp_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ uint8_t port;
+ uint8_t transceiver;
+ uint8_t phy_address;
+ uint8_t rsv;
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
+};
+
+struct hns3_phy_params_bd1_cmd {
+ uint8_t master_slave_cfg;
+ uint8_t master_slave_state;
+ uint8_t rsv1[2];
+ uint32_t rsv2[5];
+};
+
#define HNS3_MAC_TX_EN_B 6
#define HNS3_MAC_RX_EN_B 7
#define HNS3_MAC_PAD_TX_B 11
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 94c08e8..6cb6bec 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3090,6 +3090,37 @@ hns3_get_capability(struct hns3_hw *hw)
}
static int
+hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
+{
+ int ret;
+
+ switch (media_type) {
+ case HNS3_MEDIA_TYPE_COPPER:
+ if (!hns3_dev_copper_supported(hw)) {
+ PMD_INIT_LOG(ERR,
+ "Media type is copper, not supported.");
+ ret = -EOPNOTSUPP;
+ } else {
+ ret = 0;
+ }
+ break;
+ case HNS3_MEDIA_TYPE_FIBER:
+ ret = 0;
+ break;
+ case HNS3_MEDIA_TYPE_BACKPLANE:
+ PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
hns3_get_board_configuration(struct hns3_hw *hw)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
@@ -3103,11 +3134,9 @@ hns3_get_board_configuration(struct hns3_hw *hw)
return ret;
}
- if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER &&
- !hns3_dev_copper_supported(hw)) {
- PMD_INIT_LOG(ERR, "media type is copper, not supported.");
- return -EOPNOTSUPP;
- }
+ ret = hns3_check_media_type(hw, cfg.media_type);
+ if (ret)
+ return ret;
hw->mac.media_type = cfg.media_type;
hw->rss_size_max = cfg.rss_size_max;
@@ -3952,6 +3981,8 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
if (is_init) {
hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
+ if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
+ hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
}
req->compat = rte_cpu_to_le_32(compat);
@@ -4429,6 +4460,78 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
}
+static void
+hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
+{
+ struct hns3_phy_params_bd0_cmd *req;
+
+ req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
+ mac->link_speed = rte_le_to_cpu_32(req->speed);
+ mac->link_duplex = hns3_get_bit(req->duplex,
+ HNS3_PHY_DUPLEX_CFG_B);
+ mac->link_autoneg = hns3_get_bit(req->autoneg,
+ HNS3_PHY_AUTONEG_CFG_B);
+ mac->supported_capa = rte_le_to_cpu_32(req->supported);
+ mac->advertising = rte_le_to_cpu_32(req->advertising);
+ mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
+ mac->support_autoneg = !!(mac->supported_capa &
+ HNS3_PHY_LINK_MODE_AUTONEG_BIT);
+}
+
+static int
+hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
+{
+ struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
+ true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
+ if (ret) {
+ hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
+ return ret;
+ }
+
+ hns3_parse_phy_params(desc, mac);
+
+ return 0;
+}
+
+static int
+hns3_update_phy_link_info(struct hns3_hw *hw)
+{
+ struct hns3_mac *mac = &hw->mac;
+ struct hns3_mac mac_info;
+ int ret;
+
+ memset(&mac_info, 0, sizeof(struct hns3_mac));
+ ret = hns3_get_phy_params(hw, &mac_info);
+ if (ret)
+ return ret;
+
+ if (mac_info.link_speed != mac->link_speed) {
+ ret = hns3_port_shaper_update(hw, mac_info.link_speed);
+ if (ret)
+ return ret;
+ }
+
+ mac->link_speed = mac_info.link_speed;
+ mac->link_duplex = mac_info.link_duplex;
+ mac->link_autoneg = mac_info.link_autoneg;
+ mac->supported_capa = mac_info.supported_capa;
+ mac->advertising = mac_info.advertising;
+ mac->lp_advertising = mac_info.lp_advertising;
+ mac->support_autoneg = mac_info.support_autoneg;
+
+ return 0;
+}
+
static int
hns3_update_link_info(struct rte_eth_dev *eth_dev)
{
@@ -4437,7 +4540,7 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev)
int ret = 0;
if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
- return 0;
+ ret = hns3_update_phy_link_info(hw);
else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
ret = hns3_update_fiber_link_info(hw);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 2954422..3cbc2f2 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -180,6 +180,11 @@ struct hns3_mac {
uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */
uint32_t link_speed; /* ETH_SPEED_NUM_ */
+ uint32_t supported_capa; /* supported capability for current media */
+ uint32_t advertising; /* advertised capability in the local part */
+ /* advertised capability in the link partner */
+ uint32_t lp_advertising;
+ uint8_t support_autoneg;
};
struct hns3_fake_queue_data {
--
2.7.4

View File

@ -0,0 +1,442 @@
From dd8dbf370b25e67e3ffaa845960c41c67775baa8 Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Thu, 4 Mar 2021 15:44:49 +0800
Subject: [PATCH 053/189] net/hns3: support Rx descriptor advanced layout
Currently, the driver get packet type by parse the
L3_ID/L4_ID/OL3_ID/OL4_ID from Rx descriptor and then lookup multiple
tables, it's time consuming.
Now Kunpeng930 support advanced RXD layout, which:
1. Combine OL3_ID/OL4_ID to 8bit PTYPE filed, so the driver get packet
type by lookup only one table. Note: L3_ID/L4_ID become reserved
fields.
2. The 1588 timestamp located at Rx descriptor instead of query from
firmware.
3. The L3E/L4E/OL3E/OL4E will be zero when L3L4P is zero, so driver
could optimize the good checksum calculations (when L3E/L4E is zero
then mark PKT_RX_IP_CKSUM_GOOD/PKT_RX_L4_CKSUM_GOOD).
Considering compatibility, the firmware will report capability of
RXD advanced layout, the driver will identify and enable it by default.
This patch only provides basic function: identify and enable the RXD
advanced layout, and lookup ptype table if supported.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 8 +-
drivers/net/hns3/hns3_cmd.h | 5 +
drivers/net/hns3/hns3_ethdev.c | 2 +
drivers/net/hns3/hns3_ethdev.h | 16 +++
drivers/net/hns3/hns3_ethdev_vf.c | 2 +
drivers/net/hns3/hns3_regs.h | 1 +
drivers/net/hns3/hns3_rxtx.c | 200 ++++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 11 +++
8 files changed, 243 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 0590898..8a2cc2d 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -409,8 +409,9 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
return retval;
}
-static void hns3_parse_capability(struct hns3_hw *hw,
- struct hns3_query_version_cmd *cmd)
+static void
+hns3_parse_capability(struct hns3_hw *hw,
+ struct hns3_query_version_cmd *cmd)
{
uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
@@ -429,6 +430,9 @@ static void hns3_parse_capability(struct hns3_hw *hw,
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
+ if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ 1);
}
static uint32_t
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 7f567cb..6ceb655 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -312,6 +312,11 @@ enum HNS3_CAPS_BITS {
HNS3_CAPS_TQP_TXRX_INDEP_B,
HNS3_CAPS_HW_PAD_B,
HNS3_CAPS_STASH_B,
+ HNS3_CAPS_UDP_TUNNEL_CSUM_B,
+ HNS3_CAPS_RAS_IMP_B,
+ HNS3_CAPS_FEC_B,
+ HNS3_CAPS_PAUSE_B,
+ HNS3_CAPS_RXD_ADV_LAYOUT_B,
};
enum HNS3_API_CAP_BITS {
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 6cb6bec..7993d2d 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4970,6 +4970,8 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
return ret;
}
+ hns3_enable_rxd_adv_layout(hw);
+
ret = hns3_init_queues(hns, reset_queue);
if (ret) {
PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 3cbc2f2..52e6c49 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -667,8 +667,13 @@ struct hns3_mp_param {
#define HNS3_OL2TBL_NUM 4
#define HNS3_OL3TBL_NUM 16
#define HNS3_OL4TBL_NUM 16
+#define HNS3_PTYPE_NUM 256
struct hns3_ptype_table {
+ /*
+ * The next fields used to calc packet-type by the
+ * L3_ID/L4_ID/OL3_ID/OL4_ID from the Rx descriptor.
+ */
uint32_t l2l3table[HNS3_L2TBL_NUM][HNS3_L3TBL_NUM];
uint32_t l4table[HNS3_L4TBL_NUM];
uint32_t inner_l2table[HNS3_L2TBL_NUM];
@@ -677,6 +682,13 @@ struct hns3_ptype_table {
uint32_t ol2table[HNS3_OL2TBL_NUM];
uint32_t ol3table[HNS3_OL3TBL_NUM];
uint32_t ol4table[HNS3_OL4TBL_NUM];
+
+ /*
+ * The next field used to calc packet-type by the PTYPE from the Rx
+ * descriptor, it functions only when firmware report the capability of
+ * HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it.
+ */
+ uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_min_aligned;
};
#define HNS3_FIXED_MAX_TQP_NUM_MODE 0
@@ -771,6 +783,7 @@ struct hns3_adapter {
#define HNS3_DEV_SUPPORT_TX_PUSH_B 0x5
#define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6
#define HNS3_DEV_SUPPORT_STASH_B 0x7
+#define HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B 0x9
#define hns3_dev_dcb_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
@@ -801,6 +814,9 @@ struct hns3_adapter {
#define hns3_dev_stash_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
+#define hns3_dev_rxd_adv_layout_supported(hw) \
+ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B)
+
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
(&((struct hns3_adapter *)adapter)->hw)
#define HNS3_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 5b4c587..90951df 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2125,6 +2125,8 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
if (ret)
return ret;
+ hns3_enable_rxd_adv_layout(hw);
+
ret = hns3_init_queues(hns, reset_queue);
if (ret)
hns3_err(hw, "failed to init queues, ret = %d.", ret);
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 39fc5d1..0540554 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -36,6 +36,7 @@
#define HNS3_GLOBAL_RESET_REG 0x20A00
#define HNS3_FUN_RST_ING 0x20C00
#define HNS3_GRO_EN_REG 0x28000
+#define HNS3_RXD_ADV_LAYOUT_EN_REG 0x28008
/* Vector0 register bits for reset */
#define HNS3_VECTOR0_FUNCRESET_INT_B 0
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 897e5fa..09b38d4 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1802,6 +1802,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
+ rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
@@ -1987,6 +1988,193 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
+static void
+hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
+{
+ uint32_t *ptype = tbl->ptype;
+
+ /* Non-tunnel L2 */
+ ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
+ ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
+ ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
+
+ /* Non-tunnel IPv4 */
+ ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* The next ptype is GRE over IPv4 */
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* The next ptype is PTP over IPv4 + UDP */
+ ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* Non-tunnel IPv6 */
+ ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* The next ptype is GRE over IPv6 */
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* Special for PTP over IPv6 + UDP */
+ ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+}
+
void
hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
{
@@ -1997,6 +2185,7 @@ hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
hns3_init_non_tunnel_ptype_tbl(tbl);
hns3_init_tunnel_ptype_tbl(tbl);
+ hns3_init_adv_layout_ptype(tbl);
}
static inline void
@@ -4012,3 +4201,14 @@ hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
else
return fbd_num - driver_hold_bd_num;
}
+
+void
+hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
+{
+ /*
+ * If the hardware support rxd advanced layout, then driver enable it
+ * default.
+ */
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
+}
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 7118bd4..9adeb24 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -88,6 +88,8 @@
#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
#define HNS3_RXD_OL4ID_S 8
#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
+#define HNS3_RXD_PTYPE_S 4
+#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
#define HNS3_RXD_FBHI_S 12
#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
#define HNS3_RXD_FBLI_S 14
@@ -328,6 +330,7 @@ struct hns3_rx_queue {
* point, the pvid_sw_discard_en will be false.
*/
bool pvid_sw_discard_en;
+ bool ptype_en; /* indicate if the ptype field enabled */
bool enabled; /* indicate if Rx queue has been enabled */
struct hns3_rx_basic_stats basic_stats;
@@ -609,6 +612,13 @@ hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
uint32_t l2id, l3id, l4id;
uint32_t ol3id, ol4id, ol2id;
+ uint32_t ptype;
+
+ if (rxq->ptype_en) {
+ ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+ return ptype_tbl->ptype[ptype];
+ }
ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
@@ -707,5 +717,6 @@ int hns3_start_all_rxqs(struct rte_eth_dev *dev);
void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
+void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
#endif /* _HNS3_RXTX_H_ */
--
2.7.4

View File

@ -0,0 +1,78 @@
From 7e76a11ae316966bb1094e3797a7f7c8fe4e3213 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 4 Mar 2021 15:44:50 +0800
Subject: [PATCH 054/189] net/hns3: fix HW buffer size on MTU update
After MTU changed, the buffer used to store packets in HW should be
reallocated. And buffer size is allocated based on the maximum frame
size in the PF struct. However, the value of maximum frame size is
not updated in time when MTU is changed. This would lead to a packet
loss for not enough buffer.
This patch update the maximum frame size before reallocating the HW
buffer. And a rollback operation is added to avoid the side effects
of buffer reallocation failures.
Fixes: 1f5ca0b460cd ("net/hns3: support some device operations")
Fixes: d51867db65c1 ("net/hns3: add initialization")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7993d2d..6a56a05 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2460,17 +2460,33 @@ hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
static int
hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ uint16_t original_mps = hns->pf.mps;
+ int err;
int ret;
ret = hns3_set_mac_mtu(hw, mps);
if (ret) {
- hns3_err(hw, "Failed to set mtu, ret = %d", ret);
+ hns3_err(hw, "failed to set mtu, ret = %d", ret);
return ret;
}
+ hns->pf.mps = mps;
ret = hns3_buffer_alloc(hw);
- if (ret)
- hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
+ if (ret) {
+ hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ err = hns3_set_mac_mtu(hw, original_mps);
+ if (err) {
+ hns3_err(hw, "fail to rollback MTU, err = %d", err);
+ return ret;
+ }
+ hns->pf.mps = original_mps;
return ret;
}
@@ -2505,7 +2521,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->port_id, mtu, ret);
return ret;
}
- hns->pf.mps = (uint16_t)frame_size;
+
if (is_jumbo_frame)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
--
2.7.4

View File

@ -0,0 +1,35 @@
From 82c54c157d1cb684ef41a0ccdec0be4ecfa64a31 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Thu, 4 Mar 2021 15:44:51 +0800
Subject: [PATCH 055/189] net/hns3: remove unused parameter markers
All input parameters in the "hns3_dev_xstats_get_by_id" API are used,
so the rte_unused flag of some variables should be deleted.
Fixes: 3213d584b698 ("net/hns3: fix xstats with id and names")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 01b4f36..70a9c5b 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -156,8 +156,8 @@ int hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int size);
int hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev,
- __rte_unused const uint64_t *ids,
- __rte_unused uint64_t *values,
+ const uint64_t *ids,
+ uint64_t *values,
uint32_t size);
int hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
--
2.7.4

View File

@ -0,0 +1,200 @@
From bd503f5817a2597e8431e02675a8c3847a31992e Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Thu, 4 Mar 2021 15:44:52 +0800
Subject: [PATCH 056/189] net/hns3: fix mbuf leakage
The mbufs of rx queue will be allocated in "hns3_do_start" function.
But these mbufs are not released when "hns3_dev_start" executes
failed.
Fixes: c4ae39b2cfc5 ("net/hns3: fix Rx interrupt after reset")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 45 ++++++++++++++++++++++++---------------
drivers/net/hns3/hns3_ethdev_vf.c | 43 ++++++++++++++++++++++---------------
2 files changed, 54 insertions(+), 34 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 6a56a05..1d56916 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -102,6 +102,7 @@ static int hns3_remove_mc_addr(struct hns3_hw *hw,
struct rte_ether_addr *mac_addr);
static int hns3_restore_fec(struct hns3_hw *hw);
static int hns3_query_dev_fec_info(struct hns3_hw *hw);
+static int hns3_do_stop(struct hns3_adapter *hns);
void hns3_ether_format_addr(char *buf, uint16_t size,
const struct rte_ether_addr *ether_addr)
@@ -5133,11 +5134,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
return ret;
}
ret = hns3_map_rx_interrupt(dev);
- if (ret) {
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto map_rx_inter_err;
/*
* There are three register used to control the status of a TQP
@@ -5151,19 +5149,12 @@ hns3_dev_start(struct rte_eth_dev *dev)
* status of queue in the dpdk framework.
*/
ret = hns3_start_all_txqs(dev);
- if (ret) {
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto map_rx_inter_err;
ret = hns3_start_all_rxqs(dev);
- if (ret) {
- hns3_stop_all_txqs(dev);
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto start_all_rxqs_fail;
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
@@ -5187,7 +5178,17 @@ hns3_dev_start(struct rte_eth_dev *dev)
hns3_tm_dev_start_proc(hw);
hns3_info(hw, "hns3 dev start successful!");
+
return 0;
+
+start_all_rxqs_fail:
+ hns3_stop_all_txqs(dev);
+map_rx_inter_err:
+ (void)hns3_do_stop(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
}
static int
@@ -5196,6 +5197,17 @@ hns3_do_stop(struct hns3_adapter *hns)
struct hns3_hw *hw = &hns->hw;
int ret;
+ /*
+ * The "hns3_do_stop" function will also be called by .stop_service to
+ * prepare reset. At the time of global or IMP reset, the command cannot
+ * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
+ * accessed during the reset process. So the mbuf can not be released
+ * during reset and is required to be released after the reset is
+ * completed.
+ */
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ hns3_dev_release_mbufs(hns);
+
ret = hns3_cfg_mac_mode(hw, false);
if (ret)
return ret;
@@ -5273,7 +5285,6 @@ hns3_dev_stop(struct rte_eth_dev *dev)
hns3_stop_tqps(hw);
hns3_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
- hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
hns3_rx_scattered_reset(dev);
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 90951df..12af105 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1941,6 +1941,17 @@ hns3vf_do_stop(struct hns3_adapter *hns)
hw->mac.link_status = ETH_LINK_DOWN;
+ /*
+ * The "hns3vf_do_stop" function will also be called by .stop_service to
+ * prepare reset. At the time of global or IMP reset, the command cannot
+ * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
+ * accessed during the reset process. So the mbuf can not be released
+ * during reset and is required to be released after the reset is
+ * completed.
+ */
+ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ hns3_dev_release_mbufs(hns);
+
if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3vf_configure_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
@@ -2010,7 +2021,6 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3vf_unmap_rx_interrupt(dev);
- hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
hns3_rx_scattered_reset(dev);
@@ -2253,11 +2263,8 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
return ret;
}
ret = hns3vf_map_rx_interrupt(dev);
- if (ret) {
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto map_rx_inter_err;
/*
* There are three register used to control the status of a TQP
@@ -2271,19 +2278,12 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
* status of queue in the dpdk framework.
*/
ret = hns3_start_all_txqs(dev);
- if (ret) {
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto map_rx_inter_err;
ret = hns3_start_all_rxqs(dev);
- if (ret) {
- hns3_stop_all_txqs(dev);
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto start_all_rxqs_fail;
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
@@ -2305,6 +2305,15 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
hns3_start_tqps(hw);
return ret;
+
+start_all_rxqs_fail:
+ hns3_stop_all_txqs(dev);
+map_rx_inter_err:
+ (void)hns3vf_do_stop(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
}
static bool
--
2.7.4

View File

@ -0,0 +1,211 @@
From b9fbefb52b791730d5720946713e6cb187337652 Mon Sep 17 00:00:00 2001
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Date: Thu, 4 Mar 2021 15:44:53 +0800
Subject: [PATCH 057/189] net/hns3: process MAC interrupt
TNL is the abbreviation of tunnel, which means port
here. MAC TNL interrupt indicates the MAC status
report of the network port, which will be generated
when the MAC status changes.
This patch enables MAC TNL interrupt reporting, and
queries and prints the corresponding MAC status when
the interrupt is received, then clear the MAC interrupt
status. Because this interrupt uses the same interrupt
as RAS, the interrupt log is adjusted.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 3 +++
drivers/net/hns3/hns3_ethdev.c | 57 ++++++++++++++++++++++++++++++++++++------
drivers/net/hns3/hns3_intr.c | 20 +++++++++++++++
drivers/net/hns3/hns3_intr.h | 4 +++
4 files changed, 76 insertions(+), 8 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 6ceb655..094bf7e 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -116,6 +116,9 @@ enum hns3_opcode_type {
HNS3_OPC_QUERY_LINK_STATUS = 0x0307,
HNS3_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HNS3_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HNS3_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HNS3_OPC_MAC_TNL_INT_EN = 0x0311,
+ HNS3_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HNS3_OPC_CONFIG_FEC_MODE = 0x031A,
/* PFC/Pause commands */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 1d56916..80f91a7 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -217,9 +217,6 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
goto out;
}
- if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg))
- hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x",
- vector0_int_stats, cmdq_src_val, hw_err_src_reg);
val = vector0_int_stats;
ret = HNS3_VECTOR0_EVENT_OTHER;
out:
@@ -258,6 +255,34 @@ hns3_clear_all_event_cause(struct hns3_hw *hw)
}
static void
+hns3_handle_mac_tnl(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc;
+ uint32_t status;
+ int ret;
+
+ /* query and clear mac tnl interruptions */
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
+ return;
+ }
+
+ status = rte_le_to_cpu_32(desc.data[0]);
+ if (status) {
+ hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
+ false);
+ desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
+ ret);
+ }
+}
+
+static void
hns3_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -265,24 +290,36 @@ hns3_interrupt_handler(void *param)
struct hns3_hw *hw = &hns->hw;
enum hns3_evt_cause event_cause;
uint32_t clearval = 0;
+ uint32_t vector0_int;
+ uint32_t ras_int;
+ uint32_t cmdq_int;
/* Disable interrupt */
hns3_pf_disable_irq0(hw);
event_cause = hns3_check_event_cause(hns, &clearval);
+ vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
+ ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
+ cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
/* vector 0 interrupt is shared with reset and mailbox source events. */
if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
- hns3_warn(hw, "Received err interrupt");
+ hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
+ "ras_int_stat:0x%x cmdq_int_stat:0x%x",
+ vector0_int, ras_int, cmdq_int);
hns3_handle_msix_error(hns, &hw->reset.request);
hns3_handle_ras_error(hns, &hw->reset.request);
+ hns3_handle_mac_tnl(hw);
hns3_schedule_reset(hns);
} else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
- hns3_warn(hw, "Received reset interrupt");
+ hns3_warn(hw, "received reset interrupt");
hns3_schedule_reset(hns);
- } else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
+ } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
hns3_dev_handle_mbx_msg(hw);
- else
- hns3_err(hw, "Received unknown event");
+ } else {
+ hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
+ "ras_int_stat:0x%x cmdq_int_stat:0x%x",
+ vector0_int, ras_int, cmdq_int);
+ }
hns3_clear_event_cause(hw, event_cause, clearval);
/* Enable interrupt if it is not cause by reset */
@@ -4639,6 +4676,8 @@ hns3_update_link_status(struct hns3_hw *hw)
if (state != hw->mac.link_status) {
hw->mac.link_status = state;
hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
+ hns3_config_mac_tnl_int(hw,
+ state == ETH_LINK_UP ? true : false);
return true;
}
@@ -4957,6 +4996,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
(void)hns3_firmware_compat_config(hw, false);
hns3_uninit_umv_space(hw);
hns3_tqp_stats_uninit(hw);
+ hns3_config_mac_tnl_int(hw, false);
hns3_pf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
@@ -5282,6 +5322,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
rte_spinlock_lock(&hw->lock);
if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
hns3_tm_dev_stop_proc(hw);
+ hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
hns3_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 88ce4c6..2563504 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1248,6 +1248,26 @@ enable_ssu_err_intr(struct hns3_adapter *hns, bool en)
return ret;
}
+void
+hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en)
+{
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN_MASK);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "fail to %s mac tnl intr, ret = %d",
+ en ? "enable" : "disable", ret);
+}
+
static int
config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en)
{
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index 19de1aa..c569a9d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -22,6 +22,9 @@
#define HNS3_MAC_COMMON_ERR_INT_EN 0x107FF
#define HNS3_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
+#define HNS3_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HNS3_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HNS3_MAC_TNL_INT_CLR GENMASK(9, 0)
#define HNS3_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
#define HNS3_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
@@ -99,6 +102,7 @@ struct hns3_hw_error_desc {
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool state);
void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_intr_unregister(const struct rte_intr_handle *hdl,
rte_intr_callback_fn cb_fn, void *cb_arg);
--
2.7.4

View File

@ -0,0 +1,376 @@
From 7905cce75947b36dc0d955234d0930367e86bc17 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Thu, 4 Mar 2021 15:44:54 +0800
Subject: [PATCH 058/189] net/hns3: fix imprecise statistics
Currently, the hns3 statistics may be inaccurate due to the
following two problems:
1. Queue-level statistics are read from the firmware, and only one Rx or
Tx can be read at a time. This results in a large time interval
between reading multiple queues statistics in a stress scenario, such
as 1280 queues used by a PF or 256 functions used at the same time.
Especially when the 256 functions are used at the same time, the
interval between every two firmware commands in a function can be
huge, because the scheduling mechanism of the firmware is similar to
RR.
2. The current statistics are read by type. The HW statistics are read
first, and then the software statistics are read. Due to preceding
reasons, HW reading may be time-consuming, which cause a
synchronization problem between SW and HW statistics of the same
queue.
In this patch, queue-level statistics are directly read from the bar
instead of the firmware, and all the statistics of a queue include HW
and SW are read at a time to reduce inconsistency.
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
drivers/net/hns3/hns3_stats.c | 221 ++++++++++++++----------------------------
1 file changed, 72 insertions(+), 149 deletions(-)
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 87035e3..941c75f 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -367,7 +367,6 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
static void hns3_tqp_stats_clear(struct hns3_hw *hw);
-static void hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev);
/*
* Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
@@ -481,49 +480,6 @@ hns3_query_update_mac_stats(struct rte_eth_dev *dev)
return ret;
}
-/* Get tqp stats from register */
-static int
-hns3_update_tqp_stats(struct hns3_hw *hw)
-{
- struct hns3_tqp_stats *stats = &hw->tqp_stats;
- struct hns3_cmd_desc desc;
- uint64_t cnt;
- uint16_t i;
- int ret;
-
- for (i = 0; i < hw->tqps_num; i++) {
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
- true);
-
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
- ret = hns3_cmd_send(hw, &desc, 1);
- if (ret) {
- hns3_err(hw, "Failed to query RX No.%u queue stat: %d",
- i, ret);
- return ret;
- }
- cnt = rte_le_to_cpu_32(desc.data[1]);
- stats->rcb_rx_ring_pktnum_rcd += cnt;
- stats->rcb_rx_ring_pktnum[i] += cnt;
-
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
- true);
-
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
- ret = hns3_cmd_send(hw, &desc, 1);
- if (ret) {
- hns3_err(hw, "Failed to query TX No.%u queue stat: %d",
- i, ret);
- return ret;
- }
- cnt = rte_le_to_cpu_32(desc.data[1]);
- stats->rcb_tx_ring_pktnum_rcd += cnt;
- stats->rcb_tx_ring_pktnum[i] += cnt;
- }
-
- return 0;
-}
-
static int
hns3_update_rpu_drop_stats(struct hns3_hw *hw)
{
@@ -589,17 +545,11 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint64_t cnt;
uint16_t i;
int ret;
- /* Update tqp stats by read register */
- ret = hns3_update_tqp_stats(hw);
- if (ret) {
- hns3_err(hw, "Update tqp stats fail : %d", ret);
- return ret;
- }
-
if (!hns->is_vf) {
/* Update imissed stats */
ret = hns3_update_imissed_stats(hw, false);
@@ -612,24 +562,34 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt;
}
- /* Get the error stats and bytes of received packets */
+ /* Reads all the stats of a rxq in a loop to keep them synchronized */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
- if (rxq) {
- cnt = rxq->err_stats.l2_errors +
- rxq->err_stats.pkt_len_errors;
- rte_stats->ierrors += cnt;
+ if (rxq == NULL)
+ continue;
- rte_stats->ibytes += rxq->basic_stats.bytes;
- }
+ cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+ /*
+ * Read hardware and software in adjacent positions to minumize
+ * the timing variance.
+ */
+ rte_stats->ierrors += rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
+ stats->rcb_rx_ring_pktnum_rcd += cnt;
+ stats->rcb_rx_ring_pktnum[i] += cnt;
+ rte_stats->ibytes += rxq->basic_stats.bytes;
}
- /* Get the bytes of received packets */
- struct hns3_tx_queue *txq;
+ /* Reads all the stats of a txq in a loop to keep them synchronized */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
- if (txq)
- rte_stats->obytes += txq->basic_stats.bytes;
+ if (txq == NULL)
+ continue;
+
+ cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+ stats->rcb_tx_ring_pktnum_rcd += cnt;
+ stats->rcb_tx_ring_pktnum[i] += cnt;
+ rte_stats->obytes += txq->basic_stats.bytes;
}
rte_stats->oerrors = 0;
@@ -653,37 +613,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint16_t i;
int ret;
- /*
- * Note: Reading hardware statistics of rx/tx queue packet number
- * will clear them.
- */
- for (i = 0; i < hw->tqps_num; i++) {
- hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
- true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
- ret = hns3_cmd_send(hw, &desc_reset, 1);
- if (ret) {
- hns3_err(hw, "Failed to reset RX No.%u queue stat: %d",
- i, ret);
- return ret;
- }
-
- hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
- true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
- ret = hns3_cmd_send(hw, &desc_reset, 1);
- if (ret) {
- hns3_err(hw, "Failed to reset TX No.%u queue stat: %d",
- i, ret);
- return ret;
- }
- }
-
if (!hns->is_vf) {
/*
* Note: Reading hardware statistics of imissed registers will
@@ -697,25 +631,44 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
}
}
- /*
- * Clear soft stats of rx error packet which will be dropped
- * in driver.
- */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
- if (rxq) {
- rxq->err_stats.pkt_len_errors = 0;
- rxq->err_stats.l2_errors = 0;
- }
+ if (rxq == NULL)
+ continue;
+
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
+ }
+
+ /* Clear all the stats of a rxq in a loop to keep them synchronized */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ memset(&rxq->basic_stats, 0,
+ sizeof(struct hns3_rx_basic_stats));
+
+ /* This register is read-clear */
+ (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
+ }
+
+ /* Clear all the stats of a txq in a loop to keep them synchronized */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ memset(&txq->basic_stats, 0,
+ sizeof(struct hns3_tx_basic_stats));
+
+ /* This register is read-clear */
+ (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
}
- /*
- * 'packets' in hns3_tx_basic_stats and hns3_rx_basic_stats come
- * from hw->tqp_stats. And clearing tqp stats is like clearing
- * their source.
- */
hns3_tqp_stats_clear(hw);
- hns3_tqp_basic_stats_clear(eth_dev);
return 0;
}
@@ -881,6 +834,7 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct hns3_rx_basic_stats *rxq_stats;
struct hns3_rx_queue *rxq;
uint16_t i, j;
+ uint32_t cnt;
char *val;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -888,9 +842,17 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (rxq == NULL)
continue;
+ cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+ /*
+ * Read hardware and software in adjacent positions to minimize
+ * the time difference.
+ */
rxq_stats = &rxq->basic_stats;
rxq_stats->errors = rxq->err_stats.l2_errors +
rxq->err_stats.pkt_len_errors;
+ stats->rcb_rx_ring_pktnum_rcd += cnt;
+ stats->rcb_rx_ring_pktnum[i] += cnt;
+
/*
* If HW statistics are reset by stats_reset, but a lot of
* residual packets exist in the hardware queue and these
@@ -919,6 +881,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct hns3_tx_basic_stats *txq_stats;
struct hns3_tx_queue *txq;
uint16_t i, j;
+ uint32_t cnt;
char *val;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -926,6 +889,10 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (txq == NULL)
continue;
+ cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+ stats->rcb_tx_ring_pktnum_rcd += cnt;
+ stats->rcb_tx_ring_pktnum[i] += cnt;
+
txq_stats = &txq->basic_stats;
txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
@@ -939,54 +906,12 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
}
-static int
+static void
hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
int *count)
{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret;
-
- /* Update tqp stats by read register */
- ret = hns3_update_tqp_stats(hw);
- if (ret) {
- hns3_err(hw, "Update tqp stats fail, ret = %d.", ret);
- return ret;
- }
-
hns3_rxq_basic_stats_get(dev, xstats, count);
hns3_txq_basic_stats_get(dev, xstats, count);
-
- return 0;
-}
-
-/*
- * The function is only called by hns3_dev_xstats_reset to clear
- * basic stats of per-queue. TQP stats are all cleared in hns3_stats_reset
- * which is called before this function.
- *
- * @param dev
- * Pointer to Ethernet device.
- */
-static void
-hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev)
-{
- struct hns3_tx_queue *txq;
- struct hns3_rx_queue *rxq;
- uint16_t i;
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq)
- memset(&rxq->basic_stats, 0,
- sizeof(struct hns3_rx_basic_stats));
- }
-
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq)
- memset(&txq->basic_stats, 0,
- sizeof(struct hns3_tx_basic_stats));
- }
}
/*
@@ -1028,9 +953,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count = 0;
- ret = hns3_tqp_basic_stats_get(dev, xstats, &count);
- if (ret < 0)
- return ret;
+ hns3_tqp_basic_stats_get(dev, xstats, &count);
if (!hns->is_vf) {
/* Update Mac stats */
--
2.7.4

View File

@ -0,0 +1,341 @@
From 20570791fbb46112707b5ddb21da9446da8a938d Mon Sep 17 00:00:00 2001
From: Chengwen Feng <fengchengwen@huawei.com>
Date: Tue, 23 Mar 2021 19:21:00 +0800
Subject: [PATCH 059/189] net/hns3: add runtime config to select IO burst
function
Currently, the driver support multiple IO burst function and auto
selection of the most appropriate function based on offload
configuration.
Most applications such as l2fwd/l3fwd don't provide the means to
change offload configuration, so it will use the auto selection's io
burst function.
This patch support runtime config to select io burst function, which
add two config: rx_func_hint and tx_func_hint, both could assign
vec/sve/simple/common.
The driver will use the following rules to select io burst func:
a. if hint equal vec and meet the vec Rx/Tx usage condition then use the
neon function.
b. if hint equal sve and meet the sve Rx/Tx usage condition then use the
sve function.
c. if hint equal simple and meet the simple Rx/Tx usage condition then
use the simple function.
d. if hint equal common then use the common function.
e. if hint not set then:
e.1. if meet the vec Rx/Tx usage condition then use the neon function.
e.2. if meet the simple Rx/Tx usage condition then use the simple
function.
e.3. else use the common function.
Note: the sve Rx/Tx usage condition based on the vec Rx/Tx usage
condition and runtime environment (which must support SVE).
In the previous versions, driver will preferred use the sve function
when meet the sve Rx/Tx usage condition, but in this case driver could
get better performance if use the neon function.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
doc/guides/nics/hns3.rst | 37 +++++++++++++++++++
drivers/net/hns3/hns3_ethdev.c | 77 +++++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_ethdev.h | 15 ++++++++
drivers/net/hns3/hns3_ethdev_vf.c | 4 ++
drivers/net/hns3/hns3_rxtx.c | 54 ++++++++++++++++++++-------
5 files changed, 173 insertions(+), 14 deletions(-)
diff --git a/doc/guides/nics/hns3.rst b/doc/guides/nics/hns3.rst
index 8db8867..e8abd07 100644
--- a/doc/guides/nics/hns3.rst
+++ b/doc/guides/nics/hns3.rst
@@ -46,6 +46,43 @@ Prerequisites
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+Runtime Config Options
+----------------------
+
+- ``rx_func_hint`` (default ``none``)
+
+ Used to select Rx burst function, supported value are ``vec``, ``sve``,
+ ``simple``, ``common``.
+ ``vec``, if supported use the ``vec`` Rx function which indicates the
+ default vector algorithm, neon for Kunpeng Arm platform.
+ ``sve``, if supported use the ``sve`` Rx function which indicates the
+ sve algorithm.
+ ``simple``, if supported use the ``simple`` Rx function which indicates
+ the scalar algorithm.
+ ``common``, if supported use the ``common`` Rx function which indicates
+ the scalar scattered algorithm.
+
+ When provided parameter is not supported, ``vec`` usage condition will
+ be first checked, if meets, use the ``vec``. Then, ``simple``, at last
+ ``common``.
+
+- ``tx_func_hint`` (default ``none``)
+
+ Used to select Tx burst function, supported value are ``vec``, ``sve``,
+ ``simple``, ``common``.
+ ``vec``, if supported use the ``vec`` Tx function which indicates the
+ default vector algorithm, neon for Kunpeng Arm platform.
+ ``sve``, if supported use the ``sve`` Tx function which indicates the
+ sve algorithm.
+ ``simple``, if supported use the ``simple`` Tx function which indicates
+ the scalar simple algorithm.
+ ``common``, if supported use the ``common`` Tx function which indicates
+ the scalar algorithm.
+
+ When provided parameter is not supported, ``vec`` usage condition will
+ be first checked, if meets, use the ``vec``. Then, ``simple``, at last
+ ``common``.
+
Driver compilation and testing
------------------------------
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 80f91a7..f6ec8ac 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6,6 +6,7 @@
#include <rte_bus_pci.h>
#include <rte_ethdev_pci.h>
#include <rte_pci.h>
+#include <rte_kvargs.h>
#include "hns3_ethdev.h"
#include "hns3_logs.h"
@@ -6505,6 +6506,78 @@ hns3_get_module_info(struct rte_eth_dev *dev,
return 0;
}
+static int
+hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
+{
+ uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
+
+ RTE_SET_USED(key);
+
+ if (strcmp(value, "vec") == 0)
+ hint = HNS3_IO_FUNC_HINT_VEC;
+ else if (strcmp(value, "sve") == 0)
+ hint = HNS3_IO_FUNC_HINT_SVE;
+ else if (strcmp(value, "simple") == 0)
+ hint = HNS3_IO_FUNC_HINT_SIMPLE;
+ else if (strcmp(value, "common") == 0)
+ hint = HNS3_IO_FUNC_HINT_COMMON;
+
+ /* If the hint is valid then update output parameters */
+ if (hint != HNS3_IO_FUNC_HINT_NONE)
+ *(uint32_t *)extra_args = hint;
+
+ return 0;
+}
+
+static const char *
+hns3_get_io_hint_func_name(uint32_t hint)
+{
+ switch (hint) {
+ case HNS3_IO_FUNC_HINT_VEC:
+ return "vec";
+ case HNS3_IO_FUNC_HINT_SVE:
+ return "sve";
+ case HNS3_IO_FUNC_HINT_SIMPLE:
+ return "simple";
+ case HNS3_IO_FUNC_HINT_COMMON:
+ return "common";
+ default:
+ return "none";
+ }
+}
+
+void
+hns3_parse_devargs(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
+ uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_kvargs *kvlist;
+
+ if (dev->device->devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
+ if (!kvlist)
+ return;
+
+ rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
+ &hns3_parse_io_hint_func, &rx_func_hint);
+ rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
+ &hns3_parse_io_hint_func, &tx_func_hint);
+ rte_kvargs_free(kvlist);
+
+ if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
+ hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
+ hns3_get_io_hint_func_name(rx_func_hint));
+ hns->rx_func_hint = rx_func_hint;
+ if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
+ hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
+ hns3_get_io_hint_func_name(tx_func_hint));
+ hns->tx_func_hint = tx_func_hint;
+}
+
static const struct eth_dev_ops hns3_eth_dev_ops = {
.dev_configure = hns3_dev_configure,
.dev_start = hns3_dev_start,
@@ -6625,6 +6698,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_UNINITIALIZED;
hns->is_vf = false;
hw->data = eth_dev->data;
+ hns3_parse_devargs(eth_dev);
/*
* Set default max packet size according to the mtu
@@ -6758,5 +6832,8 @@ static struct rte_pci_driver rte_hns3_pmd = {
RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
+ HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
+ HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common ");
RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 52e6c49..67a69ba 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -772,9 +772,23 @@ struct hns3_adapter {
bool tx_simple_allowed;
bool tx_vec_allowed;
+ uint32_t rx_func_hint;
+ uint32_t tx_func_hint;
+
struct hns3_ptype_table ptype_tbl __rte_cache_min_aligned;
};
+enum {
+ HNS3_IO_FUNC_HINT_NONE = 0,
+ HNS3_IO_FUNC_HINT_VEC,
+ HNS3_IO_FUNC_HINT_SVE,
+ HNS3_IO_FUNC_HINT_SIMPLE,
+ HNS3_IO_FUNC_HINT_COMMON
+};
+
+#define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint"
+#define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint"
+
#define HNS3_DEV_SUPPORT_DCB_B 0x0
#define HNS3_DEV_SUPPORT_COPPER_B 0x1
#define HNS3_DEV_SUPPORT_UDP_GSO_B 0x2
@@ -975,6 +989,7 @@ int hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *info);
void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
uint32_t link_speed, uint8_t link_duplex);
+void hns3_parse_devargs(struct rte_eth_dev *dev);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 12af105..a4fd8ca 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2834,6 +2834,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_UNINITIALIZED;
hns->is_vf = true;
hw->data = eth_dev->data;
+ hns3_parse_devargs(eth_dev);
ret = hns3_reset_init(hw);
if (ret)
@@ -2962,3 +2963,6 @@ static struct rte_pci_driver rte_hns3vf_pmd = {
RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
+ HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
+ HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common ");
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 09b38d4..8e927f1 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2689,13 +2689,26 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
+ bool vec_allowed, sve_allowed, simple_allowed;
+
+ vec_allowed = hns->rx_vec_allowed &&
+ hns3_rx_check_vec_support(dev) == 0;
+ sve_allowed = vec_allowed && hns3_check_sve_support();
+ simple_allowed = hns->rx_simple_allowed && !dev->data->scattered_rx &&
+ (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_recv_pkts_vec_sve;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
+ return hns3_recv_pkts;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
+ return hns3_recv_scattered_pkts;
- if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
- return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
- hns3_recv_pkts_vec;
-
- if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
- (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
+ if (vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (simple_allowed)
return hns3_recv_pkts;
return hns3_recv_scattered_pkts;
@@ -3930,19 +3943,32 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
+ bool vec_allowed, sve_allowed, simple_allowed;
- if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
- *prep = NULL;
- return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
- hns3_xmit_pkts_vec;
- }
+ vec_allowed = hns->tx_vec_allowed &&
+ hns3_tx_check_vec_support(dev) == 0;
+ sve_allowed = vec_allowed && hns3_check_sve_support();
+ simple_allowed = hns->tx_simple_allowed &&
+ offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
- if (hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
- *prep = NULL;
+ *prep = NULL;
+
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+ return hns3_xmit_pkts_vec;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_xmit_pkts_vec_sve;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
return hns3_xmit_pkts_simple;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
+ *prep = hns3_prep_pkts;
+ return hns3_xmit_pkts;
}
+ if (vec_allowed)
+ return hns3_xmit_pkts_vec;
+ if (simple_allowed)
+ return hns3_xmit_pkts_simple;
+
*prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
--
2.7.4

View File

@ -0,0 +1,292 @@
From a9ababcfe9b34d979359f023833fbaebbb04e9d0 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Tue, 23 Mar 2021 19:21:01 +0800
Subject: [PATCH 060/189] net/hns3: support outer UDP checksum
Kunpeng930 support outer UDP cksum, this patch add support for it.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 3 ++
drivers/net/hns3/hns3_ethdev.c | 3 ++
drivers/net/hns3/hns3_ethdev.h | 4 ++
drivers/net/hns3/hns3_ethdev_vf.c | 3 ++
drivers/net/hns3/hns3_rxtx.c | 85 +++++++++++++++++++++++++++++-------
drivers/net/hns3/hns3_rxtx.h | 4 +-
drivers/net/hns3/hns3_rxtx_vec_sve.c | 5 ++-
7 files changed, 88 insertions(+), 19 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 8a2cc2d..f8d8b0a 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -433,6 +433,9 @@ hns3_parse_capability(struct hns3_hw *hw,
if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
1);
+ if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
+ hns3_set_bit(hw->capability,
+ HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
}
static uint32_t
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f6ec8ac..5da00b3 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2620,6 +2620,9 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
+ if (hns3_dev_outer_udp_cksum_supported(hw))
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+
if (hns3_dev_indep_txrx_supported(hw))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 67a69ba..dc27bb1 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -798,6 +798,7 @@ enum {
#define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6
#define HNS3_DEV_SUPPORT_STASH_B 0x7
#define HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B 0x9
+#define HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B 0xA
#define hns3_dev_dcb_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
@@ -831,6 +832,9 @@ enum {
#define hns3_dev_rxd_adv_layout_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B)
+#define hns3_dev_outer_udp_cksum_supported(hw) \
+ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B)
+
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
(&((struct hns3_adapter *)adapter)->hw)
#define HNS3_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index a4fd8ca..35c42ca 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -988,6 +988,9 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
+ if (hns3_dev_outer_udp_cksum_supported(hw))
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+
if (hns3_dev_indep_txrx_supported(hw))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 8e927f1..404c403 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2967,7 +2967,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
- desc->tx.paylen = rte_cpu_to_le_32(paylen);
+ desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
hns3_set_tso(desc, paylen, rxm);
/*
@@ -3204,8 +3204,10 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
+ uint64_t ol_flags = m->ol_flags;
uint32_t tmp_outer = 0;
uint32_t tmp_inner = 0;
+ uint32_t tmp_ol4cs;
int ret;
/*
@@ -3215,7 +3217,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
* calculations, the length of the L2 header include the outer and
* inner, will be filled during the parsing of tunnel packects.
*/
- if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
+ if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
/*
* For non tunnel type the tunnel type id is 0, so no need to
* assign a value to it. Only the inner(normal) L2 header length
@@ -3230,7 +3232,8 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
* inner l2_len. It would lead a cksum error. So driver has to
* calculate the header length.
*/
- if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ if (unlikely(!(ol_flags &
+ (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
m->outer_l2_len == 0)) {
struct rte_net_hdr_lens hdr_len;
(void)rte_net_get_ptype(m, &hdr_len,
@@ -3247,6 +3250,9 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
+ tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
+ BIT(HNS3_TXD_OL4CS_B) : 0;
+ desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
return 0;
}
@@ -3376,31 +3382,78 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
return false;
}
+static bool
+hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+ if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv4_hdr->next_proto_id;
+ return false;
+}
+
+static bool
+hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv6_hdr *ipv6_hdr;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv6_hdr->proto;
+ return false;
+}
+
static void
hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
{
uint64_t ol_flags = m->ol_flags;
uint32_t paylen, hdr_len, l4_proto;
+ struct rte_udp_hdr *udp_hdr;
if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
return;
if (ol_flags & PKT_TX_OUTER_IPV4) {
- struct rte_ipv4_hdr *ipv4_hdr;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- m->outer_l2_len);
- l4_proto = ipv4_hdr->next_proto_id;
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
+ if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
} else {
- struct rte_ipv6_hdr *ipv6_hdr;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- m->outer_l2_len);
- l4_proto = ipv6_hdr->proto;
+ if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
}
+
/* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
- struct rte_udp_hdr *udp_hdr;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
@@ -3686,7 +3739,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
@@ -3702,7 +3755,7 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 9adeb24..cd04200 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -149,6 +149,7 @@
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
+#define HNS3_TXD_OL4CS_B 22
#define HNS3_L2_LEN_UNIT 1UL
#define HNS3_L3_LEN_UNIT 2UL
#define HNS3_L4_LEN_UNIT 2UL
@@ -234,7 +235,7 @@ struct hns3_desc {
};
};
- uint32_t paylen;
+ uint32_t paylen_fd_dop_ol4cs;
uint16_t tp_fe_sc_vld_ra_ri;
uint16_t mss;
} tx;
@@ -503,6 +504,7 @@ struct hns3_queue_info {
};
#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_SEG | \
diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c
index 947c19f..f6c6f52 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_sve.c
+++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c
@@ -408,8 +408,9 @@ hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
(uint64_t *)&txdp->tx.outer_vlan_tag,
offsets, svdup_n_u64(0));
/* save offset 24~31byte of every BD */
- svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen,
- offsets, svdup_n_u64(valid_bit));
+ svst1_scatter_u64offset_u64(pg,
+ (uint64_t *)&txdp->tx.paylen_fd_dop_ol4cs,
+ offsets, svdup_n_u64(valid_bit));
/* Increment bytes counter */
uint32_t idx;
--
2.7.4

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,287 @@
From 95824ab5efc3996a682c57af118571fdccc5f677 Mon Sep 17 00:00:00 2001
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Date: Tue, 23 Mar 2021 19:21:03 +0800
Subject: [PATCH 062/189] net/hns3: delete redundant xstats RAS statistics
The current RAS code stores the reported RAS statistics in xstats.
This part of statistics is of little use in practice, and because
of the change of RAS scheme on Kunpeng930, the driver can not
obtain the RAS information any more, so this patch delete these
redundant RAS statistics.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 2 -
drivers/net/hns3/hns3_ethdev.h | 35 ---------------
drivers/net/hns3/hns3_intr.c | 1 -
drivers/net/hns3/hns3_stats.c | 100 +----------------------------------------
drivers/net/hns3/hns3_stats.h | 1 -
5 files changed, 1 insertion(+), 138 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 5da00b3..7bdc6f7 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5768,14 +5768,12 @@ hns3_record_imp_error(struct hns3_adapter *hns)
reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
hns3_warn(hw, "Detected IMP RD poison!");
- hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS");
hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
}
if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
hns3_warn(hw, "Detected IMP CMDQ error!");
- hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS");
hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
}
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index dc27bb1..dfe0c59 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -561,38 +561,6 @@ struct hns3_hw {
#define HNS3_FLAG_TC_BASE_SCH_MODE 1
#define HNS3_FLAG_VNET_BASE_SCH_MODE 2
-struct hns3_err_msix_intr_stats {
- uint64_t mac_afifo_tnl_int_cnt;
- uint64_t ppu_mpf_abn_int_st2_msix_cnt;
- uint64_t ssu_port_based_pf_int_cnt;
- uint64_t ppp_pf_abnormal_int_cnt;
- uint64_t ppu_pf_abnormal_int_msix_cnt;
-
- uint64_t imp_tcm_ecc_int_cnt;
- uint64_t cmdq_mem_ecc_int_cnt;
- uint64_t imp_rd_poison_int_cnt;
- uint64_t tqp_int_ecc_int_cnt;
- uint64_t msix_ecc_int_cnt;
- uint64_t ssu_ecc_multi_bit_int_0_cnt;
- uint64_t ssu_ecc_multi_bit_int_1_cnt;
- uint64_t ssu_common_ecc_int_cnt;
- uint64_t igu_int_cnt;
- uint64_t ppp_mpf_abnormal_int_st1_cnt;
- uint64_t ppp_mpf_abnormal_int_st3_cnt;
- uint64_t ppu_mpf_abnormal_int_st1_cnt;
- uint64_t ppu_mpf_abn_int_st2_ras_cnt;
- uint64_t ppu_mpf_abnormal_int_st3_cnt;
- uint64_t tm_sch_int_cnt;
- uint64_t qcn_fifo_int_cnt;
- uint64_t qcn_ecc_int_cnt;
- uint64_t ncsi_ecc_int_cnt;
- uint64_t ssu_port_based_err_int_cnt;
- uint64_t ssu_fifo_overflow_int_cnt;
- uint64_t ssu_ets_tcg_int_cnt;
- uint64_t igu_egu_tnl_int_cnt;
- uint64_t ppu_pf_abnormal_int_ras_cnt;
-};
-
/* vlan entry information. */
struct hns3_user_vlan_table {
LIST_ENTRY(hns3_user_vlan_table) next;
@@ -738,9 +706,6 @@ struct hns3_pf {
uint16_t max_umv_size;
uint16_t used_umv_size;
- /* Statistics information for abnormal interrupt */
- struct hns3_err_msix_intr_stats abn_int_stats;
-
bool support_sfp_query;
uint32_t fec_mode; /* current FEC mode for ethdev */
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 265dae8..c259f2e 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1838,7 +1838,6 @@ hns3_find_highest_level(struct hns3_adapter *hns, const char *reg,
reset_level = err->reset_level;
need_reset = true;
}
- hns3_error_int_stats_add(hns, reg);
}
err++;
}
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 941c75f..7cda36c 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -201,65 +201,6 @@ static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
};
-static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
- {"MAC_AFIFO_TNL_INT_R",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_int_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST2_MSIX",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_msix_cnt)},
- {"SSU_PORT_BASED_ERR_INT_MSIX",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_int_cnt)},
- {"PPP_PF_ABNORMAL_INT_ST0",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_int_cnt)},
- {"PPU_PF_ABNORMAL_INT_ST_MSIX",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_msix_cnt)},
- {"IMP_TCM_ECC_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_tcm_ecc_int_cnt)},
- {"CMDQ_MEM_ECC_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(cmdq_mem_ecc_int_cnt)},
- {"IMP_RD_POISON_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_rd_poison_int_cnt)},
- {"TQP_INT_ECC_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(tqp_int_ecc_int_cnt)},
- {"MSIX_ECC_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(msix_ecc_int_cnt)},
- {"SSU_ECC_MULTI_BIT_INT_0",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_0_cnt)},
- {"SSU_ECC_MULTI_BIT_INT_1",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_1_cnt)},
- {"SSU_COMMON_ERR_INT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_common_ecc_int_cnt)},
- {"IGU_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_int_cnt)},
- {"PPP_MPF_ABNORMAL_INT_ST1",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st1_cnt)},
- {"PPP_MPF_ABNORMAL_INT_ST3",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st3_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST1",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st1_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST2_RAS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_ras_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST3",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st3_cnt)},
- {"TM_SCH_RINT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(tm_sch_int_cnt)},
- {"QCN_FIFO_RINT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_fifo_int_cnt)},
- {"QCN_ECC_RINT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_ecc_int_cnt)},
- {"NCSI_ECC_INT_RPT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ncsi_ecc_int_cnt)},
- {"SSU_PORT_BASED_ERR_INT_RAS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_err_int_cnt)},
- {"SSU_FIFO_OVERFLOW_INT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_fifo_overflow_int_cnt)},
- {"SSU_ETS_TCG_INT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ets_tcg_int_cnt)},
- {"IGU_EGU_TNL_INT_STS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_egu_tnl_int_cnt)},
- {"PPU_PF_ABNORMAL_INT_ST_RAS",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_ras_cnt)},
-};
-
/* The statistic of reset */
static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
{"REQ_RESET_CNT",
@@ -333,9 +274,6 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
sizeof(hns3_mac_strings[0]))
-#define HNS3_NUM_ERROR_INT_XSTATS (sizeof(hns3_error_int_stats_strings) / \
- sizeof(hns3_error_int_stats_strings[0]))
-
#define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
sizeof(hns3_reset_stats_strings[0]))
@@ -363,7 +301,7 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
sizeof(hns3_imissed_stats_strings[0]))
-#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
+#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + \
HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
static void hns3_tqp_stats_clear(struct hns3_hw *hw);
@@ -750,23 +688,6 @@ hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
}
-void
-hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
-{
- struct hns3_pf *pf = &hns->pf;
- uint16_t i;
- char *addr;
-
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- if (strcmp(hns3_error_int_stats_strings[i].name, err) == 0) {
- addr = (char *)&pf->abn_int_stats +
- hns3_error_int_stats_strings[i].offset;
- *(uint64_t *)addr += 1;
- break;
- }
- }
-}
-
static void
hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
int *count)
@@ -932,7 +853,6 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int n)
{
struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_pf *pf = &hns->pf;
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
@@ -986,13 +906,6 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- addr = (char *)&pf->abn_int_stats +
- hns3_error_int_stats_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
- }
}
/* Get the reset stat */
@@ -1134,13 +1047,6 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
"%s", hns3_imissed_stats_strings[i].name);
count++;
}
-
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s", hns3_error_int_stats_strings[i].name);
- count++;
- }
}
for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
snprintf(xstats_names[count].name,
@@ -1358,7 +1264,6 @@ int
hns3_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_pf *pf = &hns->pf;
int ret;
/* Clear tqp stats */
@@ -1379,9 +1284,6 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- /* Clear error stats */
- memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats));
-
return 0;
}
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 70a9c5b..8ea69b4 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -164,7 +164,6 @@ int hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
const uint64_t *ids,
uint32_t size);
int hns3_stats_reset(struct rte_eth_dev *dev);
-void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err);
int hns3_tqp_stats_init(struct hns3_hw *hw);
void hns3_tqp_stats_uninit(struct hns3_hw *hw);
int hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear);
--
2.7.4

View File

@ -0,0 +1,516 @@
From e8d6fcbfdb76309172f36de8b046c5543a4f4cf1 Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Tue, 23 Mar 2021 19:21:04 +0800
Subject: [PATCH 063/189] net/hns3: support imissed stats for PF/VF
This patch added function level imissed stats for PF and VF. In
Kunpeng920, imissed is supported, only including RPU drop stats in PF.
In kunpeng930, imissed is supported,including RPU drop stats and SSU
drop stats in PF.
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 13 +++
drivers/net/hns3/hns3_ethdev.c | 2 +
drivers/net/hns3/hns3_ethdev.h | 21 ++++
drivers/net/hns3/hns3_ethdev_vf.c | 9 ++
drivers/net/hns3/hns3_regs.h | 2 +
drivers/net/hns3/hns3_stats.c | 234 +++++++++++++++++++++++++++++---------
drivers/net/hns3/hns3_stats.h | 1 +
7 files changed, 230 insertions(+), 52 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 094bf7e..e704d0c 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -111,6 +111,8 @@ enum hns3_opcode_type {
HNS3_OPC_QUERY_DEV_SPECS = 0x0050,
+ HNS3_OPC_SSU_DROP_REG = 0x0065,
+
/* MAC command */
HNS3_OPC_CONFIG_MAC_MODE = 0x0301,
HNS3_OPC_QUERY_LINK_STATUS = 0x0307,
@@ -957,6 +959,17 @@ struct hns3_query_rpu_cmd {
uint32_t rsv2[2];
};
+#define HNS3_OPC_SSU_DROP_REG_NUM 2
+
+struct hns3_query_ssu_cmd {
+ uint8_t rxtx;
+ uint8_t rsv[3];
+ uint32_t full_drop_cnt;
+ uint32_t part_drop_cnt;
+ uint32_t oq_drop_cnt;
+ uint32_t rev1[2];
+};
+
#define HNS3_MAX_TQP_NUM_HIP08_PF 64
#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7bdc6f7..b5057da 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3122,6 +3122,7 @@ hns3_get_capability(struct hns3_hw *hw)
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
+ hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = false;
@@ -3140,6 +3141,7 @@ hns3_get_capability(struct hns3_hw *hw)
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
+ hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = true;
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index dfe0c59..01561cc 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -426,6 +426,9 @@ struct hns3_queue_intr {
#define HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 0
#define HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 1
+#define HNS3_PKTS_DROP_STATS_MODE1 0
+#define HNS3_PKTS_DROP_STATS_MODE2 1
+
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
@@ -544,6 +547,24 @@ struct hns3_hw {
* port won't be copied to the function which has set promisc mode.
*/
uint8_t promisc_mode;
+
+ /*
+ * drop_stats_mode mode.
+ * value range:
+ * HNS3_PKTS_DROP_STATS_MODE1/HNS3_PKTS_DROP_STATS_MODE2
+ *
+ * - HNS3_PKTS_DROP_STATS_MODE1
+ * This mode for kunpeng920. In this mode, port level imissed stats
+ * is supported. It only includes RPU drop stats.
+ *
+ * - HNS3_PKTS_DROP_STATS_MODE2
+ * This mode for kunpeng930. In this mode, imissed stats and oerrors
+ * stats is supported. Function level imissed stats is supported. It
+ * includes RPU drop stats in VF, and includes both RPU drop stats
+ * and SSU drop stats in PF. Oerror stats is also supported in PF.
+ */
+ uint8_t drop_stats_mode;
+
uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
struct hns3_port_base_vlan_config port_base_vlan_cfg;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 35c42ca..c567dff 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1221,6 +1221,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
+ hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
hw->rss_info.ipv6_sctp_offload_supported = false;
hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
@@ -1238,6 +1239,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
+ hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
hw->rss_info.ipv6_sctp_offload_supported = true;
hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
@@ -1875,6 +1877,13 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
if (ret)
goto err_get_config;
+ /* Hardware statistics of imissed registers cleared. */
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+ goto err_set_tc_queue;
+ }
+
ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
if (ret) {
PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 0540554..e141fe1 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -36,6 +36,8 @@
#define HNS3_GLOBAL_RESET_REG 0x20A00
#define HNS3_FUN_RST_ING 0x20C00
#define HNS3_GRO_EN_REG 0x28000
+
+#define HNS3_RPU_DROP_CNT_REG 0x28004
#define HNS3_RXD_ADV_LAYOUT_EN_REG 0x28008
/* Vector0 register bits for reset */
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index 7cda36c..e802c0b 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -269,6 +269,8 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
{"RPU_DROP_CNT",
HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
+ {"SSU_DROP_CNT",
+ HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
};
#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
@@ -301,8 +303,7 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
sizeof(hns3_imissed_stats_strings[0]))
-#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + \
- HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
+#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
static void hns3_tqp_stats_clear(struct hns3_hw *hw);
@@ -419,7 +420,7 @@ hns3_query_update_mac_stats(struct rte_eth_dev *dev)
}
static int
-hns3_update_rpu_drop_stats(struct hns3_hw *hw)
+hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
{
struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
struct hns3_query_rpu_cmd *req;
@@ -449,11 +450,90 @@ hns3_update_rpu_drop_stats(struct hns3_hw *hw)
return 0;
}
+static void
+hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+
+ stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
+}
+
+static int
+hns3_update_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret = 0;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
+ ret = hns3_update_port_rpu_drop_stats(hw);
+ else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
+ hns3_update_function_rpu_drop_stats(hw);
+
+ return ret;
+}
+
+static int
+hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
+ int bd_num, bool is_rx)
+{
+ struct hns3_query_ssu_cmd *req;
+ int ret;
+ int i;
+
+ for (i = 0; i < bd_num - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i],
+ HNS3_OPC_SSU_DROP_REG, true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ req->rxtx = is_rx ? 0 : 1;
+ ret = hns3_cmd_send(hw, desc, bd_num);
+
+ return ret;
+}
+
+static int
+hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+ struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
+ struct hns3_query_ssu_cmd *req;
+ uint64_t cnt;
+ int ret;
+
+ ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
+ true);
+ if (ret) {
+ hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
+ rte_le_to_cpu_32(req->full_drop_cnt) +
+ rte_le_to_cpu_32(req->part_drop_cnt);
+
+ stats->ssu_rx_drop_cnt += cnt;
+
+ return 0;
+}
+
int
hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
+ return 0;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
+ ret = hns3_update_port_rx_ssu_drop_stats(hw);
+ if (ret)
+ return ret;
+ }
+
ret = hns3_update_rpu_drop_stats(hw);
if (ret)
return ret;
@@ -488,19 +568,17 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
uint16_t i;
int ret;
- if (!hns->is_vf) {
- /* Update imissed stats */
- ret = hns3_update_imissed_stats(hw, false);
- if (ret) {
- hns3_err(hw, "update imissed stats failed, ret = %d",
- ret);
- return ret;
- }
-
- rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt;
+ /* Update imissed stats */
+ ret = hns3_update_imissed_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update imissed stats failed, ret = %d",
+ ret);
+ return ret;
}
+ rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
+ imissed_stats->ssu_rx_drop_cnt;
- /* Reads all the stats of a rxq in a loop to keep them synchronized */
+ /* Get the error stats and bytes of received packets */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq == NULL)
@@ -556,17 +634,14 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
uint16_t i;
int ret;
- if (!hns->is_vf) {
- /*
- * Note: Reading hardware statistics of imissed registers will
- * clear them.
- */
- ret = hns3_update_imissed_stats(hw, true);
- if (ret) {
- hns3_err(hw, "clear imissed stats failed, ret = %d",
- ret);
- return ret;
- }
+ /*
+ * Note: Reading hardware statistics of imissed registers will
+ * clear them.
+ */
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+ return ret;
}
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -630,6 +705,22 @@ hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
return 0;
}
+static int
+hns3_get_imissed_stats_num(struct hns3_adapter *hns)
+{
+#define NO_IMISSED_STATS_NUM 0
+#define RPU_STATS_ITEM_NUM 1
+ struct hns3_hw *hw = &hns->hw;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
+ return NO_IMISSED_STATS_NUM;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
+ return HNS3_NUM_IMISSED_XSTATS;
+
+ return RPU_STATS_ITEM_NUM;
+}
+
/* This function calculates the number of xstats based on the current config */
static int
hns3_xstats_calc_num(struct rte_eth_dev *dev)
@@ -647,13 +738,17 @@ hns3_xstats_calc_num(struct rte_eth_dev *dev)
uint16_t nb_tx_q = dev->data->nb_tx_queues;
int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
+ int stats_num;
+
+ stats_num = rx_comm_stats_num + tx_comm_stats_num;
+ stats_num += hns3_get_imissed_stats_num(hns);
if (hns->is_vf)
- return rx_comm_stats_num + tx_comm_stats_num +
- HNS3_NUM_RESET_XSTATS;
+ stats_num += HNS3_NUM_RESET_XSTATS;
else
- return rx_comm_stats_num + tx_comm_stats_num +
- HNS3_FIX_NUM_STATS;
+ stats_num += HNS3_FIX_NUM_STATS;
+
+ return stats_num;
}
static void
@@ -835,6 +930,31 @@ hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
hns3_txq_basic_stats_get(dev, xstats, count);
}
+static void
+hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
+ int imissed_stats_num;
+ int cnt = *count;
+ char *addr;
+ uint16_t i;
+
+ imissed_stats_num = hns3_get_imissed_stats_num(hns);
+
+ for (i = 0; i < imissed_stats_num; i++) {
+ addr = (char *)imissed_stats +
+ hns3_imissed_stats_strings[i].offset;
+ xstats[cnt].value = *(uint64_t *)addr;
+ xstats[cnt].id = cnt;
+ cnt++;
+ }
+
+ *count = cnt;
+}
+
/*
* Retrieve extended(tqp | Mac) statistics of an Ethernet device.
* @param dev
@@ -854,7 +974,6 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_bd_errors_stats *rx_err_stats;
@@ -890,24 +1009,17 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
xstats[count].id = count;
count++;
}
+ }
- ret = hns3_update_imissed_stats(hw, false);
- if (ret) {
- hns3_err(hw, "update imissed stats failed, ret = %d",
- ret);
- return ret;
- }
-
- for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
- addr = (char *)imissed_stats +
- hns3_imissed_stats_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
- }
-
+ ret = hns3_update_imissed_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update imissed stats failed, ret = %d",
+ ret);
+ return ret;
}
+ hns3_imissed_stats_get(dev, xstats, &count);
+
/* Get the reset stat */
for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
@@ -992,6 +1104,28 @@ hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
}
}
+static void
+hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ uint32_t cnt = *count;
+ int imissed_stats_num;
+ uint16_t i;
+
+ imissed_stats_num = hns3_get_imissed_stats_num(hns);
+
+ for (i = 0; i < imissed_stats_num; i++) {
+ snprintf(xstats_names[cnt].name,
+ sizeof(xstats_names[cnt].name),
+ "%s", hns3_imissed_stats_strings[i].name);
+ cnt++;
+ }
+
+ *count = cnt;
+}
+
/*
* Retrieve names of extended statistics of an Ethernet device.
*
@@ -1040,14 +1174,10 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
"%s", hns3_mac_strings[i].name);
count++;
}
-
- for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s", hns3_imissed_stats_strings[i].name);
- count++;
- }
}
+
+ hns3_imissed_stats_name_get(dev, xstats_names, &count);
+
for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 8ea69b4..273be42 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -112,6 +112,7 @@ struct hns3_mac_stats {
struct hns3_rx_missed_stats {
uint64_t rpu_rx_drop_cnt;
+ uint64_t ssu_rx_drop_cnt;
};
/* store statistics names and its offset in stats structure */
--
2.7.4

View File

@ -0,0 +1,124 @@
From afd493a7e66236c538bf9ab7feb332200cbd2e76 Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Tue, 23 Mar 2021 19:21:05 +0800
Subject: [PATCH 064/189] net/hns3: support oerrors stats in PF
This patch added oerrors stats for PF in kunpeng930.
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.h | 1 +
drivers/net/hns3/hns3_stats.c | 64 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 01561cc..6800ee0 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -443,6 +443,7 @@ struct hns3_hw {
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
struct hns3_rx_missed_stats imissed_stats;
+ uint64_t oerror_stats;
uint32_t fw_version;
uint16_t num_msi;
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index e802c0b..1af689f 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -519,6 +519,31 @@ hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
return 0;
}
+static int
+hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
+ struct hns3_query_ssu_cmd *req;
+ uint64_t cnt;
+ int ret;
+
+ ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
+ false);
+ if (ret) {
+ hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
+ rte_le_to_cpu_32(req->full_drop_cnt) +
+ rte_le_to_cpu_32(req->part_drop_cnt);
+
+ hw->oerror_stats += cnt;
+
+ return 0;
+}
+
int
hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
{
@@ -544,6 +569,25 @@ hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
return 0;
}
+static int
+hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
+ return 0;
+
+ ret = hns3_update_port_tx_ssu_drop_stats(hw);
+ if (ret)
+ return ret;
+
+ if (is_clear)
+ hw->oerror_stats = 0;
+
+ return 0;
+}
+
/*
* Query tqp tx queue statistics ,opcode id: 0x0B03.
* Query tqp rx queue statistics ,opcode id: 0x0B13.
@@ -608,7 +652,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
rte_stats->obytes += txq->basic_stats.bytes;
}
- rte_stats->oerrors = 0;
+ ret = hns3_update_oerror_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update oerror stats failed, ret = %d",
+ ret);
+ return ret;
+ }
+ rte_stats->oerrors = hw->oerror_stats;
+
/*
* If HW statistics are reset by stats_reset, but a lot of residual
* packets exist in the hardware queue and these packets are error
@@ -644,6 +695,17 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
return ret;
}
+ /*
+ * Note: Reading hardware statistics of oerror registers will
+ * clear them.
+ */
+ ret = hns3_update_oerror_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear oerror stats failed, ret = %d",
+ ret);
+ return ret;
+ }
+
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq == NULL)
--
2.7.4

View File

@ -0,0 +1,128 @@
From 71c308ebc61180d6f7a65c9f4dfdfc5e6d1fb782 Mon Sep 17 00:00:00 2001
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Date: Tue, 23 Mar 2021 19:21:06 +0800
Subject: [PATCH 065/189] net/hns3: support Tx descriptor status query
Add support for query Tx descriptor status in hns3 driver. Check the
descriptor specified and provide the status information of the
corresponding descriptor.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
doc/guides/nics/features/hns3_vf.ini | 1 +
drivers/net/hns3/hns3_ethdev.c | 1 +
drivers/net/hns3/hns3_ethdev_vf.c | 1 +
drivers/net/hns3/hns3_rxtx.c | 28 ++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 1 +
6 files changed, 33 insertions(+)
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index 00a26cd..445d391 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -34,6 +34,7 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index f3dd239..eb55b4f 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -32,6 +32,7 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index b5057da..5b07183 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6774,6 +6774,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
+ eth_dev->tx_descriptor_status = NULL;
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
return ret;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index c567dff..2688c19 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2917,6 +2917,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
+ eth_dev->tx_descriptor_status = NULL;
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 404c403..efdb49a 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4044,6 +4044,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
+ eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
} else {
eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
@@ -4256,6 +4257,33 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
return -ENOTSUP;
}
+int
+hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *txdp;
+ struct hns3_tx_queue *txq;
+ struct rte_eth_dev *dev;
+ uint16_t desc_id;
+
+ txq = (struct hns3_tx_queue *)tx_queue;
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[txq->port_id];
+ if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
+ dev->tx_pkt_burst != hns3_xmit_pkts &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec)
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
+ txdp = &txq->tx_ring[desc_id];
+ if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
uint32_t
hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index cd04200..82d5aa0 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -720,5 +720,6 @@ void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
+int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
#endif /* _HNS3_RXTX_H_ */
--
2.7.4

View File

@ -0,0 +1,137 @@
From d29daf59b08c1314190c6d1cab0e22ee176b6c0f Mon Sep 17 00:00:00 2001
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Date: Tue, 23 Mar 2021 19:21:07 +0800
Subject: [PATCH 066/189] net/hns3: support Rx descriptor status query
Add support for query Rx descriptor status in hns3 driver. Check the
descriptor specified and provide the status information of the
corresponding descriptor.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
doc/guides/nics/features/hns3_vf.ini | 1 +
drivers/net/hns3/hns3_ethdev.c | 1 +
drivers/net/hns3/hns3_ethdev_vf.c | 1 +
drivers/net/hns3/hns3_rxtx.c | 36 ++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 1 +
6 files changed, 41 insertions(+)
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index 445d391..d407b2f 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -34,6 +34,7 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index eb55b4f..a0fd56d 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -33,6 +33,7 @@ Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Tx descriptor status = Y
+Rx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 5b07183..12cc3ac 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -6772,6 +6772,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
err_mp_init_secondary:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
+ eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->tx_descriptor_status = NULL;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 2688c19..6404264 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2915,6 +2915,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
err_mp_init_secondary:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
+ eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->tx_descriptor_status = NULL;
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index efdb49a..6a7c360 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4042,6 +4042,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
__atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
+ eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
@@ -4258,6 +4259,41 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
}
int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *rxdp;
+ struct hns3_rx_queue *rxq;
+ struct rte_eth_dev *dev;
+ uint32_t bd_base_info;
+ uint16_t desc_id;
+
+ rxq = (struct hns3_rx_queue *)rx_queue;
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+ rxdp = &rxq->rx_ring[desc_id];
+ bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+ dev = &rte_eth_devices[rxq->port_id];
+ if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
+ if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return RTE_ETH_RX_DESC_AVAIL;
+ else
+ return RTE_ETH_RX_DESC_DONE;
+}
+
+int
hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
volatile struct hns3_desc *txdp;
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 82d5aa0..f9b3048 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -720,6 +720,7 @@ void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
+int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
#endif /* _HNS3_RXTX_H_ */
--
2.7.4

View File

@ -0,0 +1,57 @@
From 3a77c6eecf9089843c3f4452139c07ffe5c6823d Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Tue, 23 Mar 2021 21:45:51 +0800
Subject: [PATCH 067/189] net/hns3: fix reporting undefined speed
There may be a case in future that the speed obtained from firmware
is undefined (such as, 400G or other rate), and link status of device is
up. At this case, PMD driver will reports 100Mbps to the user in the
"hns3_dev_link_update" API, which is unreasonable. Besides, if the
speed from firmware is zero, driver should report zero instead of
100Mbps.
Fixes: 59fad0f32135 ("net/hns3: support link update operation")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 5 ++++-
drivers/net/hns3/hns3_ethdev_vf.c | 5 ++++-
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 12cc3ac..55e2f07 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2725,7 +2725,10 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev,
new_link.link_speed = mac->link_speed;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ if (mac->link_status)
+ new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ else
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
break;
}
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 6404264..26f0698 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2123,7 +2123,10 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
new_link.link_speed = mac->link_speed;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ if (mac->link_status)
+ new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ else
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
break;
}
--
2.7.4

View File

@ -0,0 +1,35 @@
From 2043f4bf46fe0403c6015da614d0bb3f9f9f4344 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Tue, 23 Mar 2021 21:45:52 +0800
Subject: [PATCH 068/189] net/hns3: fix build for SVE path
The 'queue_full_cnt' stats have been encapsulated in 'dfx_stats'.
However, the modification in the SVE algorithm is omitted.
As a result, the driver fails to be compiled when the SVE
algorithm is used.
Fixes: 9b77f1fe303f ("net/hns3: encapsulate DFX stats in datapath")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_rxtx_vec_sve.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c
index f6c6f52..2700e6e 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_sve.c
+++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c
@@ -439,7 +439,7 @@ hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue,
nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_pkts == 0)) {
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
--
2.7.4

View File

@ -0,0 +1,34 @@
From 5233a6a3449ff374def3469b79d98a85e6cfd6f1 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Tue, 23 Mar 2021 21:45:53 +0800
Subject: [PATCH 069/189] net/hns3: fix processing Tx offload flags
Currently, if the PKT_TX_TCP_SEG and PKT_TX_TCP_CKSUM offload flags set
in the same time, hns3 PMD can not process the descriptors correctly.
This patch fixes it by adding the processing of this situation.
Fixes: fb6eb9009f41 ("net/hns3: fix Tx checksum with fixed header length")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 6a7c360..62c56f6 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -3291,6 +3291,7 @@ hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
uint32_t tmp;
/* Enable L4 checksum offloads */
switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
+ case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
case PKT_TX_TCP_CKSUM:
case PKT_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
--
2.7.4

View File

@ -0,0 +1,225 @@
From 77d2a11852ab27e6ac367cc3bf2146b9e1aa2d0c Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Tue, 23 Mar 2021 21:45:54 +0800
Subject: [PATCH 070/189] net/hns3: fix Tx checksum for UDP packets with
special port
For Kunpeng920 network engine, UDP packets with destination port 6081,
4789 or 4790 will be identified as tunnel packets. If the UDP CKSUM
offload is set in the mbuf, and the TX tunnel mask is not set, the
CKSUM of these packets will be wrong. In this case, the upper layer
user may not identify the packet as a tunnel packet, and processes it
as non-tunnel packet, and expect to offload the outer UDP CKSUM, so
they may not fill the outer L2/L3 length to mbuf. However, the HW
identifies these packet as tunnel packets and therefore offload the
inner UDP CKSUM. As a result, the inner and outer UDP CKSUM are
incorrect. And for non-tunnel UDP packets with preceding special
destination port will also exist similar checksum error.
For the new generation Kunpeng930 network engine, the above errata
have been fixed. Therefore, the concept of udp_cksum_mode is
introduced. There are two udp_cksum_mode for hns3 PMD,
HNS3_SPECIAL_PORT_HW_CKSUM_MODE means HW could solve the above
problem. And in HNS3_SPECIAL_PORT_SW_CKSUM_MODE, hns3 PMD will check
packets in the Tx prepare and perform the UDP CKSUM for such packets
to avoid a checksum error.
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 2 ++
drivers/net/hns3/hns3_ethdev.h | 19 ++++++++++++
drivers/net/hns3/hns3_rxtx.c | 68 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 16 ++++++++++
4 files changed, 105 insertions(+)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 55e2f07..3e0b28a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3129,6 +3129,7 @@ hns3_get_capability(struct hns3_hw *hw)
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = false;
+ hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
return 0;
}
@@ -3148,6 +3149,7 @@ hns3_get_capability(struct hns3_hw *hw)
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = true;
+ hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
return 0;
}
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 6800ee0..eb2203c 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -47,6 +47,9 @@
#define HNS3_UNLIMIT_PROMISC_MODE 0
#define HNS3_LIMIT_PROMISC_MODE 1
+#define HNS3_SPECIAL_PORT_SW_CKSUM_MODE 0
+#define HNS3_SPECIAL_PORT_HW_CKSUM_MODE 1
+
#define HNS3_UC_MACADDR_NUM 128
#define HNS3_VF_UC_MACADDR_NUM 48
#define HNS3_MC_MACADDR_NUM 128
@@ -567,6 +570,22 @@ struct hns3_hw {
uint8_t drop_stats_mode;
uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+ /*
+ * udp checksum mode.
+ * value range:
+ * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE
+ *
+ * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
+ * In this mode, HW can not do checksum for special UDP port like
+ * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
+ * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * do the checksum for these packets to avoid a checksum error.
+ *
+ * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
+ * In this mode, HW does not have the preceding problems and can
+ * directly calculate the checksum of these UDP packets.
+ */
+ uint8_t udp_cksum_mode;
struct hns3_port_base_vlan_config port_base_vlan_cfg;
/*
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 62c56f6..626f91f 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -5,6 +5,7 @@
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_geneve.h>
#include <rte_vxlan.h>
#include <rte_ethdev_driver.h>
#include <rte_io.h>
@@ -2845,6 +2846,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
+ txq->udp_cksum_mode = hw->udp_cksum_mode;
memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
@@ -3548,6 +3550,69 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
}
#endif
+static uint16_t
+hns3_udp_cksum_help(struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ uint16_t cksum = 0;
+ uint32_t l4_len;
+
+ if (ol_flags & PKT_TX_IPV4) {
+ struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
+ } else {
+ struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv6_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
+ }
+
+ rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
+
+ cksum = ~cksum;
+ /*
+ * RFC 768:If the computed checksum is zero for UDP, it is transmitted
+ * as all ones
+ */
+ if (cksum == 0)
+ cksum = 0xffff;
+
+ return (uint16_t)cksum;
+}
+
+static bool
+hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t dst_port;
+
+ if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
+ ol_flags & PKT_TX_TUNNEL_MASK ||
+ (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
+ return true;
+ /*
+ * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
+ * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
+ * offload is set and the tunnel mask has not been set, the CKSUM will
+ * be wrong since the header length is wrong and driver should complete
+ * the CKSUM to avoid CKSUM error.
+ */
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->l2_len + m->l3_len);
+ dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
+ switch (dst_port) {
+ case RTE_VXLAN_DEFAULT_PORT:
+ case RTE_VXLAN_GPE_DEFAULT_PORT:
+ case RTE_GENEVE_DEFAULT_PORT:
+ udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
+ m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
+ return false;
+ default:
+ return true;
+ }
+}
+
static int
hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
{
@@ -3592,6 +3657,9 @@ hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
return ret;
}
+ if (!hns3_validate_tunnel_cksum(tx_queue, m))
+ return 0;
+
hns3_outer_header_cksum_prepare(m);
return 0;
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index f9b3048..6689397 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -465,6 +465,22 @@ struct hns3_tx_queue {
*/
uint8_t tso_mode;
/*
+ * udp checksum mode.
+ * value range:
+ * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE
+ *
+ * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
+ * In this mode, HW can not do checksum for special UDP port like
+ * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
+ * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * do the checksum for these packets to avoid a checksum error.
+ *
+ * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
+ * In this mode, HW does not have the preceding problems and can
+ * directly calculate the checksum of these UDP packets.
+ */
+ uint8_t udp_cksum_mode;
+ /*
* The minimum length of the packet supported by hardware in the Tx
* direction.
*/
--
2.7.4

View File

@ -0,0 +1,112 @@
From 46602854722b87761da2e56e6bd95461ddd7b6ea Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Tue, 23 Mar 2021 21:45:55 +0800
Subject: [PATCH 071/189] net/hns3: fix link update when failed to get link
info
In the "hns3_dev_link_update" API, the link information of the port is
obtained first, and then 'dev_link' in dev->data is updated. When the
driver is resetting or fails to obtain link info, the current driver
still reports the previous link info to the user. This may cause that
the dev->data->dev_link may be inconsistent with the hw link status.
Therefore, the link status consistency between the hardware, driver,
and framework can be ensured in this interface regardless of whether
the driver is normal or abnormal.
Fixes: 109e4dd1bd7a ("net/hns3: get link state change through mailbox")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 55 +++++++++++++++++++++++++++++-------------
1 file changed, 38 insertions(+), 17 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 3e0b28a..356c52a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2698,20 +2698,22 @@ hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
}
static int
-hns3_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- struct hns3_mac *mac = &hw->mac;
- struct rte_eth_link new_link;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- if (!hns3_is_reset_pending(hns)) {
- hns3_update_link_status(hw);
- hns3_update_link_info(eth_dev);
- }
+ (void)hns3_update_link_status(hw);
+
+ return hns3_update_link_info(eth_dev);
+}
+
+static void
+hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *new_link)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct hns3_mac *mac = &hw->mac;
- memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
case ETH_SPEED_NUM_10M:
case ETH_SPEED_NUM_100M:
@@ -2722,20 +2724,39 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev,
case ETH_SPEED_NUM_50G:
case ETH_SPEED_NUM_100G:
case ETH_SPEED_NUM_200G:
- new_link.link_speed = mac->link_speed;
+ new_link->link_speed = mac->link_speed;
break;
default:
if (mac->link_status)
- new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
else
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link->link_speed = ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = mac->link_duplex;
- new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
- new_link.link_autoneg =
+ new_link->link_duplex = mac->link_duplex;
+ new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link->link_autoneg =
!(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+}
+
+static int
+hns3_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct hns3_mac *mac = &hw->mac;
+ struct rte_eth_link new_link;
+ int ret;
+
+ ret = hns3_update_port_link_info(eth_dev);
+ if (ret) {
+ mac->link_status = ETH_LINK_DOWN;
+ hns3_err(hw, "failed to get port link info, ret = %d.", ret);
+ }
+
+ memset(&new_link, 0, sizeof(new_link));
+ hns3_setup_linkstatus(eth_dev, &new_link);
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
--
2.7.4

View File

@ -0,0 +1,211 @@
From 081a3335ccdc6cce70ab9cde9e5df87e2dcd591f Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Tue, 23 Mar 2021 21:45:56 +0800
Subject: [PATCH 072/189] net/hns3: fix long task queue pairs reset time
Currently, the queue reset process needs to be performed one by one,
which is inefficient. However, the queues reset in the same function is
almost at the same stage. To optimize the queue reset process, a new
function has been added to the firmware command HNS3_OPC_CFG_RST_TRIGGER
to reset all queues in the same function at a time. And the related
queue reset MBX message is adjusted in the same way too.
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_cmd.h | 8 ++-
drivers/net/hns3/hns3_rxtx.c | 125 ++++++++++++++++++++++++++++++++++++-------
2 files changed, 114 insertions(+), 19 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index e704d0c..30aca82 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -933,10 +933,16 @@ struct hns3_reset_tqp_queue_cmd {
#define HNS3_CFG_RESET_MAC_B 3
#define HNS3_CFG_RESET_FUNC_B 7
+#define HNS3_CFG_RESET_RCB_B 1
struct hns3_reset_cmd {
uint8_t mac_func_reset;
uint8_t fun_reset_vfid;
- uint8_t rsv[22];
+ uint8_t fun_reset_rcb;
+ uint8_t rsv1;
+ uint16_t fun_reset_rcb_vqid_start;
+ uint16_t fun_reset_rcb_vqid_num;
+ uint8_t fun_reset_rcb_return_status;
+ uint8_t rsv2[15];
};
#define HNS3_QUERY_DEV_SPECS_BD_NUM 4
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 626f91f..0596c9c 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -629,10 +629,6 @@ hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
uint64_t end;
int ret;
- ret = hns3_tqp_enable(hw, queue_id, false);
- if (ret)
- return ret;
-
/*
* In current version VF is not supported when PF is driven by DPDK
* driver, all task queue pairs are mapped to PF function, so PF's queue
@@ -679,11 +675,6 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
uint8_t msg_data[2];
int ret;
- /* Disable VF's queue before send queue reset msg to PF */
- ret = hns3_tqp_enable(hw, queue_id, false);
- if (ret)
- return ret;
-
memcpy(msg_data, &queue_id, sizeof(uint16_t));
ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
@@ -695,14 +686,105 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
}
static int
-hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
+hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
{
- struct hns3_hw *hw = &hns->hw;
+ struct hns3_reset_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
- if (hns->is_vf)
- return hns3vf_reset_tqp(hw, queue_id);
- else
- return hns3pf_reset_tqp(hw, queue_id);
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+ req = (struct hns3_reset_cmd *)desc.data;
+ hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
+
+ /*
+ * The start qid should be the global qid of the first tqp of the
+ * function which should be reset in this port. Since our PF not
+ * support take over of VFs, so we only need to reset function 0,
+ * and its start qid is always 0.
+ */
+ req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
+ req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
+ return ret;
+ }
+
+ *reset_status = req->fun_reset_rcb_return_status;
+ return 0;
+}
+
+static int
+hns3pf_reset_all_tqps(struct hns3_hw *hw)
+{
+#define HNS3_RESET_RCB_NOT_SUPPORT 0U
+#define HNS3_RESET_ALL_TQP_SUCCESS 1U
+ uint8_t reset_status;
+ int ret;
+ int i;
+
+ ret = hns3_reset_rcb_cmd(hw, &reset_status);
+ if (ret)
+ return ret;
+
+ /*
+ * If the firmware version is low, it may not support the rcb reset
+ * which means reset all the tqps at a time. In this case, we should
+ * reset tqps one by one.
+ */
+ if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
+ for (i = 0; i < hw->cfg_max_queues; i++) {
+ ret = hns3pf_reset_tqp(hw, i);
+ if (ret) {
+ hns3_err(hw,
+ "fail to reset tqp, queue_id = %d, ret = %d.",
+ i, ret);
+ return ret;
+ }
+ }
+ } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
+ hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
+ reset_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+hns3vf_reset_all_tqps(struct hns3_hw *hw)
+{
+#define HNS3VF_RESET_ALL_TQP_DONE 1U
+ uint8_t reset_status;
+ uint8_t msg_data[2];
+ int ret;
+ int i;
+
+ memset(msg_data, 0, sizeof(uint16_t));
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ sizeof(msg_data), true, &reset_status,
+ sizeof(reset_status));
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
+ return ret;
+ }
+
+ if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
+ return 0;
+
+ /*
+ * If the firmware version or kernel PF version is low, it may not
+ * support the rcb reset which means reset all the tqps at a time.
+ * In this case, we should reset tqps one by one.
+ */
+ for (i = 1; i < hw->cfg_max_queues; i++) {
+ ret = hns3vf_reset_tqp(hw, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int
@@ -711,14 +793,21 @@ hns3_reset_all_tqps(struct hns3_adapter *hns)
struct hns3_hw *hw = &hns->hw;
int ret, i;
+ /* Disable all queues before reset all queues */
for (i = 0; i < hw->cfg_max_queues; i++) {
- ret = hns3_reset_tqp(hns, i);
+ ret = hns3_tqp_enable(hw, i, false);
if (ret) {
- hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
+ hns3_err(hw,
+ "fail to disable tqps before tqps reset, ret = %d.",
+ ret);
return ret;
}
}
- return 0;
+
+ if (hns->is_vf)
+ return hns3vf_reset_all_tqps(hw);
+ else
+ return hns3pf_reset_all_tqps(hw);
}
static int
--
2.7.4

View File

@ -0,0 +1,106 @@
From 85c88060616e97d475df4b2321843a57218b80d9 Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Thu, 1 Apr 2021 21:38:03 +0800
Subject: [PATCH 073/189] net/hns3: fix MTU config complexity
This patch fixed cyclomatic complexity about MTU
in device configure process.
Fixes: 1f5ca0b460cd ("net/hns3: support some device operations")
Cc: stable@dpdk.org
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 62 ++++++++++++++++++++++++++----------------
1 file changed, 38 insertions(+), 24 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 356c52a..ffdf019 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2373,6 +2373,41 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
}
static int
+hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t max_rx_pkt_len;
+ uint16_t mtu;
+ int ret;
+
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
+ return 0;
+
+ /*
+ * If jumbo frames are enabled, MTU needs to be refreshed
+ * according to the maximum RX packet length.
+ */
+ max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
+ if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
+ max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
+ hns3_err(hw, "maximum Rx packet length must be greater than %u "
+ "and no more than %u when jumbo frame enabled.",
+ (uint16_t)HNS3_DEFAULT_FRAME_LEN,
+ (uint16_t)HNS3_MAX_FRAME_LEN);
+ return -EINVAL;
+ }
+
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
+ ret = hns3_dev_mtu_set(dev, mtu);
+ if (ret)
+ return ret;
+ dev->data->mtu = mtu;
+
+ return 0;
+}
+
+static int
hns3_dev_configure(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
@@ -2382,8 +2417,6 @@ hns3_dev_configure(struct rte_eth_dev *dev)
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
struct rte_eth_rss_conf rss_conf;
- uint32_t max_rx_pkt_len;
- uint16_t mtu;
bool gro_en;
int ret;
@@ -2431,28 +2464,9 @@ hns3_dev_configure(struct rte_eth_dev *dev)
goto cfg_err;
}
- /*
- * If jumbo frames are enabled, MTU needs to be refreshed
- * according to the maximum RX packet length.
- */
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
- if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
- max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
- hns3_err(hw, "maximum Rx packet length must be greater "
- "than %u and less than %u when jumbo frame enabled.",
- (uint16_t)HNS3_DEFAULT_FRAME_LEN,
- (uint16_t)HNS3_MAX_FRAME_LEN);
- ret = -EINVAL;
- goto cfg_err;
- }
-
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
- ret = hns3_dev_mtu_set(dev, mtu);
- if (ret)
- goto cfg_err;
- dev->data->mtu = mtu;
- }
+ ret = hns3_refresh_mtu(dev, conf);
+ if (ret)
+ goto cfg_err;
ret = hns3_dev_configure_vlan(dev);
if (ret)
--
2.7.4

View File

@ -0,0 +1,793 @@
From 95a068d7af10549d1a084b94b86fbffd03c0e3bd Mon Sep 17 00:00:00 2001
From: "Min Hu (Connor)" <humin29@huawei.com>
Date: Thu, 1 Apr 2021 21:38:04 +0800
Subject: [PATCH 074/189] net/hns3: support IEEE 1588 PTP
Add hns3 support for new ethdev APIs to enable and read IEEE1588/
802.1AS PTP timestamps.
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
doc/guides/nics/features/hns3.ini | 2 +
doc/guides/nics/hns3.rst | 1 +
drivers/net/hns3/hns3_cmd.h | 30 ++++
drivers/net/hns3/hns3_ethdev.c | 41 +++++-
drivers/net/hns3/hns3_ethdev.h | 20 +++
drivers/net/hns3/hns3_ptp.c | 292 ++++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_regs.h | 23 +++
drivers/net/hns3/hns3_rxtx.c | 47 +++++-
drivers/net/hns3/hns3_rxtx.h | 7 +
drivers/net/hns3/hns3_rxtx_vec.c | 15 +-
drivers/net/hns3/meson.build | 3 +-
11 files changed, 468 insertions(+), 13 deletions(-)
create mode 100644 drivers/net/hns3/hns3_ptp.c
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index d407b2f..cc1ad0f 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -42,6 +42,8 @@ Stats per queue = Y
FW version = Y
Registers dump = Y
Module EEPROM dump = Y
+Timesync = Y
+Timestamp offload = Y
Multiprocess aware = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/hns3.rst b/doc/guides/nics/hns3.rst
index e8abd07..d722509 100644
--- a/doc/guides/nics/hns3.rst
+++ b/doc/guides/nics/hns3.rst
@@ -37,6 +37,7 @@ Features of the HNS3 PMD are:
- MTU update
- NUMA support
- Generic flow API
+- IEEE1588/802.1AS timestamping
Prerequisites
-------------
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 30aca82..5d1fb67 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -123,6 +123,10 @@ enum hns3_opcode_type {
HNS3_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HNS3_OPC_CONFIG_FEC_MODE = 0x031A,
+ /* PTP command */
+ HNS3_OPC_PTP_INT_EN = 0x0501,
+ HNS3_OPC_CFG_PTP_MODE = 0x0507,
+
/* PFC/Pause commands */
HNS3_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HNS3_OPC_CFG_PFC_PAUSE_EN = 0x0702,
@@ -976,6 +980,32 @@ struct hns3_query_ssu_cmd {
uint32_t rev1[2];
};
+#define HNS3_PTP_ENABLE_B 0
+#define HNS3_PTP_TX_ENABLE_B 1
+#define HNS3_PTP_RX_ENABLE_B 2
+
+#define HNS3_PTP_TYPE_S 0
+#define HNS3_PTP_TYPE_M (0x3 << HNS3_PTP_TYPE_S)
+
+#define ALL_PTP_V2_TYPE 0xF
+#define HNS3_PTP_MESSAGE_TYPE_S 0
+#define HNS3_PTP_MESSAGE_TYPE_M (0xF << HNS3_PTP_MESSAGE_TYPE_S)
+
+#define PTP_TYPE_L2_V2_TYPE 0
+
+struct hns3_ptp_mode_cfg_cmd {
+ uint8_t enable;
+ uint8_t ptp_type;
+ uint8_t v2_message_type_1;
+ uint8_t v2_message_type_0;
+ uint8_t rsv[20];
+};
+
+struct hns3_ptp_int_cmd {
+ uint8_t int_en;
+ uint8_t rsvd[23];
+};
+
#define HNS3_MAX_TQP_NUM_HIP08_PF 64
#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index ffdf019..aef1ebf 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -58,6 +58,7 @@ enum hns3_evt_cause {
HNS3_VECTOR0_EVENT_RST,
HNS3_VECTOR0_EVENT_MBX,
HNS3_VECTOR0_EVENT_ERR,
+ HNS3_VECTOR0_EVENT_PTP,
HNS3_VECTOR0_EVENT_OTHER,
};
@@ -202,6 +203,13 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
goto out;
}
+ /* Check for vector0 1588 event source */
+ if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
+ val = BIT(HNS3_VECTOR0_1588_INT_B);
+ ret = HNS3_VECTOR0_EVENT_PTP;
+ goto out;
+ }
+
/* check for vector0 msix event source */
if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
@@ -227,10 +235,17 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
return ret;
}
+static bool
+hns3_is_1588_event_type(uint32_t event_type)
+{
+ return (event_type == HNS3_VECTOR0_EVENT_PTP);
+}
+
static void
hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
{
- if (event_type == HNS3_VECTOR0_EVENT_RST)
+ if (event_type == HNS3_VECTOR0_EVENT_RST ||
+ hns3_is_1588_event_type(event_type))
hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
else if (event_type == HNS3_VECTOR0_EVENT_MBX)
hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
@@ -253,6 +268,8 @@ hns3_clear_all_event_cause(struct hns3_hw *hw)
BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
BIT(HNS3_VECTOR0_CORERESET_INT_B));
hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
+ hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
+ BIT(HNS3_VECTOR0_1588_INT_B));
}
static void
@@ -2468,6 +2485,10 @@ hns3_dev_configure(struct rte_eth_dev *dev)
if (ret)
goto cfg_err;
+ ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
+ if (ret)
+ goto cfg_err;
+
ret = hns3_dev_configure_vlan(dev);
if (ret)
goto cfg_err;
@@ -2641,6 +2662,9 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ if (hns3_dev_ptp_supported(hw))
+ info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
.nb_min = HNS3_MIN_RING_DESC,
@@ -4960,6 +4984,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
goto err_intr_callback_register;
}
+ ret = hns3_ptp_init(hw);
+ if (ret)
+ goto err_get_config;
+
/* Enable interrupt */
rte_intr_enable(&pci_dev->intr_handle);
hns3_pf_enable_irq0(hw);
@@ -5977,6 +6005,10 @@ hns3_restore_conf(struct hns3_adapter *hns)
if (ret)
goto err_promisc;
+ ret = hns3_restore_ptp(hns);
+ if (ret)
+ goto err_promisc;
+
ret = hns3_restore_rx_interrupt(hw);
if (ret)
goto err_promisc;
@@ -6681,6 +6713,13 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.fec_set = hns3_fec_set,
.tm_ops_get = hns3_tm_ops_get,
.tx_done_cleanup = hns3_tx_done_cleanup,
+ .timesync_enable = hns3_timesync_enable,
+ .timesync_disable = hns3_timesync_disable,
+ .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
+ .timesync_adjust_time = hns3_timesync_adjust_time,
+ .timesync_read_time = hns3_timesync_read_time,
+ .timesync_write_time = hns3_timesync_write_time,
};
static const struct hns3_reset_ops hns3_reset_ops = {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index eb2203c..25cb5e2 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -750,6 +750,11 @@ struct hns3_pf {
bool support_sfp_query;
uint32_t fec_mode; /* current FEC mode for ethdev */
+ bool ptp_enable;
+
+ /* Stores timestamp of last received packet on dev */
+ uint64_t rx_timestamp;
+
struct hns3_vtag_cfg vtag_config;
LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list;
@@ -1000,6 +1005,21 @@ int hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
uint32_t link_speed, uint8_t link_duplex);
void hns3_parse_devargs(struct rte_eth_dev *dev);
+int hns3_restore_ptp(struct hns3_adapter *hns);
+int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
+ struct rte_eth_conf *conf);
+int hns3_ptp_init(struct hns3_hw *hw);
+int hns3_timesync_enable(struct rte_eth_dev *dev);
+int hns3_timesync_disable(struct rte_eth_dev *dev);
+int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused);
+int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);
+int hns3_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *ts);
+int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
new file mode 100644
index 0000000..146b69d
--- /dev/null
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021-2021 Hisilicon Limited.
+ */
+
+#include <rte_ethdev_pci.h>
+#include <rte_io.h>
+#include <rte_time.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_regs.h"
+#include "hns3_logs.h"
+
+uint64_t hns3_timestamp_rx_dynflag;
+int hns3_timestamp_dynfield_offset = -1;
+
+int
+hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
+ struct rte_eth_conf *conf)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ return 0;
+
+ ret = rte_mbuf_dyn_rx_timestamp_register
+ (&hns3_timestamp_dynfield_offset,
+ &hns3_timestamp_rx_dynflag);
+ if (ret) {
+ hns3_err(hw,
+ "failed to register Rx timestamp field/flag");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hns3_ptp_int_en(struct hns3_hw *hw, bool en)
+{
+ struct hns3_ptp_int_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ req = (struct hns3_ptp_int_cmd *)desc.data;
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int
+hns3_ptp_init(struct hns3_hw *hw)
+{
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return 0;
+
+ ret = hns3_ptp_int_en(hw, true);
+ if (ret)
+ return ret;
+
+ /* Start PTP timer */
+ hns3_write_dev(hw, HNS3_CFG_TIME_CYC_EN, 1);
+
+ return 0;
+}
+
+static int
+hns3_timesync_configure(struct hns3_adapter *hns, bool en)
+{
+ struct hns3_ptp_mode_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_cmd_desc desc;
+ int val;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PTP_MODE, false);
+
+ req = (struct hns3_ptp_mode_cfg_cmd *)desc.data;
+
+ val = en ? 1 : 0;
+ hns3_set_bit(req->enable, HNS3_PTP_ENABLE_B, val);
+ hns3_set_bit(req->enable, HNS3_PTP_TX_ENABLE_B, val);
+ hns3_set_bit(req->enable, HNS3_PTP_RX_ENABLE_B, val);
+
+ if (en) {
+ hns3_set_field(req->ptp_type, HNS3_PTP_TYPE_M, HNS3_PTP_TYPE_S,
+ PTP_TYPE_L2_V2_TYPE);
+ hns3_set_field(req->v2_message_type_1, HNS3_PTP_MESSAGE_TYPE_M,
+ HNS3_PTP_MESSAGE_TYPE_S, ALL_PTP_V2_TYPE);
+ }
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "configure PTP time failed, en = %d, ret = %d",
+ en, ret);
+ return ret;
+ }
+
+ pf->ptp_enable = en;
+
+ return 0;
+}
+
+int
+hns3_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ if (pf->ptp_enable)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_timesync_configure(hns, true);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+int
+hns3_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ if (!pf->ptp_enable)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_timesync_configure(hns, false);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+int
+hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+#define TIME_RX_STAMP_NS_MASK 0x3FFFFFFF
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ uint64_t ns, sec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK;
+ sec = upper_32_bits(pf->rx_timestamp);
+
+ ns += sec * NSEC_PER_SEC;
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+int
+hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+#define TIME_TX_STAMP_NS_MASK 0x3FFFFFFF
+#define TIME_TX_STAMP_VALID 24
+#define TIME_TX_STAMP_CNT_MASK 0x7
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint64_t sec;
+ uint64_t tmp;
+ uint64_t ns;
+ int ts_cnt;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) &
+ TIME_TX_STAMP_CNT_MASK;
+ if (ts_cnt == 0)
+ return -EINVAL;
+
+ ns = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_0) & TIME_TX_STAMP_NS_MASK;
+ sec = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_1);
+ tmp = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_2) & 0xFFFF;
+ sec = (tmp << 32) | sec;
+
+ ns += sec * NSEC_PER_SEC;
+
+ *timestamp = rte_ns_to_timespec(ns);
+
+ /* Clear current timestamp hardware stores */
+ hns3_read_dev(hw, HNS3_TX_1588_SEQID_BACK);
+
+ return 0;
+}
+
+int
+hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t ns, sec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L);
+ sec |= (uint64_t)(hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & 0xFFFF)
+ << 32;
+
+ ns = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_NS);
+ ns += sec * NSEC_PER_SEC;
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+int
+hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t sec = ts->tv_sec;
+ uint64_t ns = ts->tv_nsec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ /* Set the timecounters to a new value. */
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1);
+
+ return 0;
+}
+
+int
+hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+#define TIME_SYNC_L_MASK 0x7FFFFFFF
+#define SYMBOL_BIT_OFFSET 31
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct timespec cur_time;
+ uint64_t ns;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ (void)hns3_timesync_read_time(dev, &cur_time);
+ ns = rte_timespec_to_ns((const struct timespec *)&cur_time);
+ cur_time = rte_ns_to_timespec(ns + delta);
+ (void)hns3_timesync_write_time(dev, (const struct timespec *)&cur_time);
+
+ return 0;
+}
+
+int
+hns3_restore_ptp(struct hns3_adapter *hns)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ bool en = pf->ptp_enable;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return 0;
+
+ ret = hns3_timesync_configure(hns, en);
+ if (ret)
+ hns3_err(hw, "restore PTP enable state(%d) failed, ret = %d",
+ en, ret);
+
+ return ret;
+}
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index e141fe1..c9e10be 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -121,6 +121,29 @@
#define HNS3_TQP_INTR_RL_DEFAULT 0
#define HNS3_TQP_INTR_QL_DEFAULT 0
+/* Register bit for 1588 event */
+#define HNS3_VECTOR0_1588_INT_B 0
+
+#define HNS3_PTP_BASE_ADDRESS 0x29000
+
+#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0)
+#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4)
+#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8)
+#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc)
+
+#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30)
+
+#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50)
+#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54)
+#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58)
+#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c)
+
+#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70)
+
+#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74)
+#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78)
+#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c)
+
/* gl_usec convert to hardware count, as writing each 1 represents 2us */
#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1)
/* rl_usec convert to hardware count, as writing each 1 represents 4us */
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 0596c9c..c41cccb 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2365,6 +2365,23 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
return rte_mbuf_raw_alloc(rxq->mb_pool);
}
+static inline void
+hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+ volatile struct hns3_desc *rxd)
+{
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+ uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
+
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ if (hns3_timestamp_rx_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = timestamp;
+ mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
+ }
+
+ pf->rx_timestamp = timestamp;
+}
+
uint16_t
hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -2424,8 +2441,12 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
rxm = rxe->mbuf;
+ rxm->ol_flags = 0;
rxe->mbuf = nmb;
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
+
dma_addr = rte_mbuf_data_iova_default(nmb);
rxdp->addr = rte_cpu_to_le_64(dma_addr);
rxdp->rx.bd_base_info = 0;
@@ -2436,7 +2457,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- rxm->ol_flags = PKT_RX_RSS_HASH;
+ rxm->ol_flags |= PKT_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
@@ -2455,6 +2476,9 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
+ if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+
if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
cksum_err);
@@ -3043,7 +3067,7 @@ hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
{
desc->addr = rte_mbuf_data_iova(rxm);
desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
- desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
+ desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
}
static void
@@ -3091,6 +3115,10 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
+
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ desc->tx.tp_fe_sc_vld_ra_ri |=
+ rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
static inline int
@@ -4149,10 +4177,21 @@ hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
return 0;
}
+static bool
+hns3_tx_check_simple_support(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return false;
+
+ return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+}
+
static eth_tx_burst_t
hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
- uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
@@ -4160,7 +4199,7 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
hns3_tx_check_vec_support(dev) == 0;
sve_allowed = vec_allowed && hns3_check_sve_support();
simple_allowed = hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ hns3_tx_check_simple_support(dev);
*prep = NULL;
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 6689397..eebbebf 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -106,6 +106,8 @@
#define HNS3_RXD_L3L4P_B 11
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+
+#define HNS3_RXD_TS_VLD_B 14
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
@@ -200,6 +202,8 @@ enum hns3_pkt_tun_type {
struct hns3_desc {
union {
uint64_t addr;
+ uint64_t timestamp;
+
struct {
uint32_t addr0;
uint32_t addr1;
@@ -534,6 +538,9 @@ enum hns3_cksum_status {
HNS3_OUTER_L4_CKSUM_ERR = 8
};
+extern uint64_t hns3_timestamp_rx_dynflag;
+extern int hns3_timestamp_dynfield_offset;
+
static inline int
hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
uint32_t bd_base_info, uint32_t l234_info,
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index a26c83d..fd7b272 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -18,6 +18,10 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
{
struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
return -ENOTSUP;
@@ -167,7 +171,6 @@ hns3_rxq_vec_setup(struct hns3_rx_queue *rxq)
memset(rxq->offset_table, 0, sizeof(rxq->offset_table));
}
-#ifndef RTE_LIBRTE_IEEE1588
static int
hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
{
@@ -183,17 +186,19 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
RTE_SET_USED(arg);
return 0;
}
-#endif
int
hns3_rx_check_vec_support(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
if (dev->data->scattered_rx)
return -ENOTSUP;
@@ -207,8 +212,4 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev)
return -ENOTSUP;
return 0;
-#else
- RTE_SET_USED(dev);
- return -ENOTSUP;
-#endif
}
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index f6aac69..6d78c33 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -26,7 +26,8 @@ sources = files('hns3_cmd.c',
'hns3_rxtx.c',
'hns3_stats.c',
'hns3_mp.c',
- 'hns3_tm.c')
+ 'hns3_tm.c',
+ 'hns3_ptp.c')
deps += ['hash']
--
2.7.4

View File

@ -0,0 +1,64 @@
From 94240428064fc641a78c2189329094acb4059eb8 Mon Sep 17 00:00:00 2001
From: Chengchang Tang <tangchengchang@huawei.com>
Date: Fri, 2 Apr 2021 10:58:49 +0800
Subject: [PATCH 075/189] ethdev: validate input in register info
This patch adds validity check of input pointer in regs dump API.
Fixes: 7a3f27cbf59b ("ethdev: add access to specific device info")
Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
drivers/net/hns3/hns3_regs.c | 5 -----
lib/librte_ethdev/rte_ethdev.c | 2 ++
lib/librte_ethdev/rte_ethdev.h | 1 +
3 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 5b14727..93055a4 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -484,11 +484,6 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
uint32_t *data;
int ret;
- if (regs == NULL) {
- hns3_err(hw, "the input parameter regs is NULL!");
- return -EINVAL;
- }
-
ret = hns3_get_regs_length(hw, &length);
if (ret)
return ret;
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 17ddacc..f311868 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -5239,6 +5239,8 @@ rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (info == NULL)
+ return -EINVAL;
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index f5f8919..e89fc50 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -4395,6 +4395,7 @@ int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
--
2.7.4

View File

@ -0,0 +1,64 @@
From ee9d7f7372170853e7538256c5e764bf154f3132 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Tue, 30 Mar 2021 20:53:25 +0800
Subject: [PATCH 076/189] net/hns3: support wait in link update
There are two APIs in ethdev layer to get link status of device, namely,
"rte_eth_link_get" and "rte_eth_link_get_nowait". When the device link
is unstable or auto-negotiation is in progress, the first API supports
the function of waiting for the NIC to link up, and the maximum waiting
time is 9 seconds based on DPDK Documentation. For the hns3 PMD driver,
the link can be established within 2 seconds.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index aef1ebf..05f199e 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2779,19 +2779,31 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
}
static int
-hns3_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
+#define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */
+#define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */
+
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
struct hns3_mac *mac = &hw->mac;
struct rte_eth_link new_link;
int ret;
- ret = hns3_update_port_link_info(eth_dev);
- if (ret) {
- mac->link_status = ETH_LINK_DOWN;
- hns3_err(hw, "failed to get port link info, ret = %d.", ret);
- }
+ do {
+ ret = hns3_update_port_link_info(eth_dev);
+ if (ret) {
+ mac->link_status = ETH_LINK_DOWN;
+ hns3_err(hw, "failed to get port link info, ret = %d.",
+ ret);
+ break;
+ }
+
+ if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+ break;
+
+ rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
+ } while (retry_cnt--);
memset(&new_link, 0, sizeof(new_link));
hns3_setup_linkstatus(eth_dev, &new_link);
--
2.7.4

View File

@ -0,0 +1,76 @@
From 45da720c44c7a7758cc811bea609116088168f25 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong@huawei.com>
Date: Wed, 31 Mar 2021 18:01:35 +0800
Subject: [PATCH 077/189] net/hns3: fix some function names for copper media
type
PHY is a common concept for the copper and optical media type interface.
There are some inappropriate function names for copper ports, which
needs to be adjusted.
Fixes: 2e4859f3b362 ("net/hns3: support PF device with copper PHYs")
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
drivers/net/hns3/hns3_ethdev.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 05f199e..4dff016 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4597,7 +4597,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
}
static void
-hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
+hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
{
struct hns3_phy_params_bd0_cmd *req;
@@ -4615,7 +4615,7 @@ hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
}
static int
-hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
+hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
{
struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
uint16_t i;
@@ -4634,20 +4634,20 @@ hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
return ret;
}
- hns3_parse_phy_params(desc, mac);
+ hns3_parse_copper_phy_params(desc, mac);
return 0;
}
static int
-hns3_update_phy_link_info(struct hns3_hw *hw)
+hns3_update_copper_link_info(struct hns3_hw *hw)
{
struct hns3_mac *mac = &hw->mac;
struct hns3_mac mac_info;
int ret;
memset(&mac_info, 0, sizeof(struct hns3_mac));
- ret = hns3_get_phy_params(hw, &mac_info);
+ ret = hns3_get_copper_phy_params(hw, &mac_info);
if (ret)
return ret;
@@ -4676,7 +4676,7 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev)
int ret = 0;
if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
- ret = hns3_update_phy_link_info(hw);
+ ret = hns3_update_copper_link_info(hw);
else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
ret = hns3_update_fiber_link_info(hw);
--
2.7.4

Some files were not shown because too many files have changed in this diff Show More