And patchs are as follows: - net/hns3: fix burst mode query with dummy function - net/hns3: add debug info for Rx/Tx dummy function - net/hns3: remove debug condition for Tx prepare - net/hns3: separate Tx prepare from getting Tx function - net/hns3: make getting Tx function static - net/hns3: extract common functions to set Rx/Tx
131 lines
4.6 KiB
Diff
131 lines
4.6 KiB
Diff
From 9e0cd6d469351131e473edc8a9dbbcd70890519f Mon Sep 17 00:00:00 2001
|
|
From: Huisong Li <lihuisong@huawei.com>
|
|
Date: Sat, 11 Feb 2023 17:18:28 +0800
|
|
Subject: net/hns3: separate Tx prepare from getting Tx function
|
|
|
|
[ upstream commit 6a934ba4c6c48691b119a878981a4e3748766518 ]
|
|
|
|
Separate getting tx prepare from hns3_get_tx_function by extracting
|
|
an independent function.
|
|
|
|
Fixes: d7ec2c076579 ("net/hns3: select Tx prepare based on Tx offload")
|
|
Cc: stable@dpdk.org
|
|
|
|
Signed-off-by: Huisong Li <lihuisong@huawei.com>
|
|
Signed-off-by: Dongdong Liu <liudongdong3@huawei.com>
|
|
---
|
|
drivers/net/hns3/hns3_rxtx.c | 32 ++++++++++++++------------------
|
|
drivers/net/hns3/hns3_rxtx.h | 3 +--
|
|
2 files changed, 15 insertions(+), 20 deletions(-)
|
|
|
|
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
|
|
index 9fc54d50f1..2dba4d8120 100644
|
|
--- a/drivers/net/hns3/hns3_rxtx.c
|
|
+++ b/drivers/net/hns3/hns3_rxtx.c
|
|
@@ -4324,26 +4324,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
|
|
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
|
|
|
|
uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
|
|
+
|
|
if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
+static eth_tx_prep_t
|
|
+hns3_get_tx_prepare(struct rte_eth_dev *dev)
|
|
+{
|
|
+ return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL;
|
|
+}
|
|
+
|
|
eth_tx_burst_t
|
|
-hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
|
|
+hns3_get_tx_function(struct rte_eth_dev *dev)
|
|
{
|
|
struct hns3_adapter *hns = dev->data->dev_private;
|
|
bool vec_allowed, sve_allowed, simple_allowed;
|
|
- bool vec_support, tx_prepare_needed;
|
|
+ bool vec_support;
|
|
|
|
vec_support = hns3_tx_check_vec_support(dev) == 0;
|
|
vec_allowed = vec_support && hns3_get_default_vec_support();
|
|
sve_allowed = vec_support && hns3_get_sve_support();
|
|
simple_allowed = hns3_tx_check_simple_support(dev);
|
|
- tx_prepare_needed = hns3_get_tx_prep_needed(dev);
|
|
-
|
|
- *prep = NULL;
|
|
|
|
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
|
|
return hns3_xmit_pkts_vec;
|
|
@@ -4351,19 +4355,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
|
|
return hns3_xmit_pkts_vec_sve;
|
|
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
|
|
return hns3_xmit_pkts_simple;
|
|
- if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
|
|
- if (tx_prepare_needed)
|
|
- *prep = hns3_prep_pkts;
|
|
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
|
|
return hns3_xmit_pkts;
|
|
- }
|
|
|
|
if (vec_allowed)
|
|
return hns3_xmit_pkts_vec;
|
|
if (simple_allowed)
|
|
return hns3_xmit_pkts_simple;
|
|
|
|
- if (tx_prepare_needed)
|
|
- *prep = hns3_prep_pkts;
|
|
return hns3_xmit_pkts;
|
|
}
|
|
|
|
@@ -4403,7 +4402,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
|
|
{
|
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
|
|
struct hns3_adapter *hns = eth_dev->data->dev_private;
|
|
- eth_tx_prep_t prep = NULL;
|
|
|
|
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
|
|
__atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
|
|
@@ -4411,8 +4409,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
|
|
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
|
|
eth_dev->tx_pkt_burst = hw->set_link_down ?
|
|
rte_eth_pkt_burst_dummy :
|
|
- hns3_get_tx_function(eth_dev, &prep);
|
|
- eth_dev->tx_pkt_prepare = prep;
|
|
+ hns3_get_tx_function(eth_dev);
|
|
+ eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev);
|
|
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
|
|
} else {
|
|
eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
|
|
@@ -4758,10 +4756,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev)
|
|
void
|
|
hns3_start_tx_datapath(struct rte_eth_dev *dev)
|
|
{
|
|
- eth_tx_prep_t prep = NULL;
|
|
-
|
|
- dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
|
|
- dev->tx_pkt_prepare = prep;
|
|
+ dev->tx_pkt_burst = hns3_get_tx_function(dev);
|
|
+ dev->tx_pkt_prepare = hns3_get_tx_prepare(dev);
|
|
hns3_eth_dev_fp_ops_config(dev);
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
|
|
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
|
|
index ea1a805491..38c3581312 100644
|
|
--- a/drivers/net/hns3/hns3_rxtx.h
|
|
+++ b/drivers/net/hns3/hns3_rxtx.h
|
|
@@ -740,8 +740,7 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
|
|
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
|
void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
|
|
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
|
|
-eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev,
|
|
- eth_tx_prep_t *prep);
|
|
+eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev);
|
|
|
|
uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
|
|
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
|
|
--
|
|
2.23.0
|
|
|