sync patches ranges from versoin 9 t0 17 from master branch Signed-off-by: speech_white <humin29@huawei.com>
253 lines
8.7 KiB
Diff
253 lines
8.7 KiB
Diff
From 11bcfb49be7f092d8d20d88dfdc5358196d3ecca Mon Sep 17 00:00:00 2001
|
|
From: Huisong Li <lihuisong@huawei.com>
|
|
Date: Mon, 25 Oct 2021 14:39:22 +0800
|
|
Subject: [PATCH 33/33] app/testpmd: remove unused header file
|
|
|
|
This patch removes unused "rte_eth_bond.h" header file.
|
|
|
|
Fixes: 2950a769315e ("bond: testpmd support")
|
|
Cc: stable@dpdk.org
|
|
|
|
Signed-off-by: Huisong Li <lihuisong@huawei.com>
|
|
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
|
|
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
|
|
---
|
|
app/test-pmd/parameters.c | 3 -
|
|
drivers/net/hns3/hns3_common.c | 101 +++++++++++++++++----------------
|
|
drivers/net/hns3/hns3_flow.h | 5 +-
|
|
3 files changed, 55 insertions(+), 54 deletions(-)
|
|
|
|
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
|
|
index c464c42f6..2a69df5b7 100644
|
|
--- a/app/test-pmd/parameters.c
|
|
+++ b/app/test-pmd/parameters.c
|
|
@@ -39,9 +39,6 @@
|
|
#include <rte_ether.h>
|
|
#include <rte_ethdev.h>
|
|
#include <rte_string_fns.h>
|
|
-#ifdef RTE_NET_BOND
|
|
-#include <rte_eth_bond.h>
|
|
-#endif
|
|
#include <rte_flow.h>
|
|
|
|
#include "testpmd.h"
|
|
diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c
|
|
index eac2aa104..0328f2beb 100644
|
|
--- a/drivers/net/hns3/hns3_common.c
|
|
+++ b/drivers/net/hns3/hns3_common.c
|
|
@@ -4,7 +4,7 @@
|
|
|
|
#include <rte_kvargs.h>
|
|
#include <rte_bus_pci.h>
|
|
-#include <ethdev_pci.h>
|
|
+#include <rte_ethdev_pci.h>
|
|
#include <rte_pci.h>
|
|
|
|
#include "hns3_common.h"
|
|
@@ -60,43 +60,42 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
|
|
info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
|
|
info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
|
|
info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
|
|
- info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
|
|
- RTE_ETH_RX_OFFLOAD_SCATTER |
|
|
- RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
|
|
- RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
|
|
- RTE_ETH_RX_OFFLOAD_RSS_HASH |
|
|
- RTE_ETH_RX_OFFLOAD_TCP_LRO);
|
|
- info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
|
|
- RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
|
|
- RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
|
|
- RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
|
|
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
|
|
- RTE_ETH_TX_OFFLOAD_TCP_TSO |
|
|
- RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
|
|
- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
|
|
- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
|
|
- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
|
|
- RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
|
|
+ info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
|
|
+ DEV_RX_OFFLOAD_TCP_CKSUM |
|
|
+ DEV_RX_OFFLOAD_UDP_CKSUM |
|
|
+ DEV_RX_OFFLOAD_SCTP_CKSUM |
|
|
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
|
|
+ DEV_RX_OFFLOAD_SCATTER |
|
|
+ DEV_RX_OFFLOAD_VLAN_STRIP |
|
|
+ DEV_RX_OFFLOAD_VLAN_FILTER |
|
|
+ DEV_RX_OFFLOAD_RSS_HASH |
|
|
+ DEV_RX_OFFLOAD_TCP_LRO);
|
|
+ info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
|
|
+ DEV_TX_OFFLOAD_TCP_CKSUM |
|
|
+ DEV_TX_OFFLOAD_UDP_CKSUM |
|
|
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
|
|
+ DEV_TX_OFFLOAD_MULTI_SEGS |
|
|
+ DEV_TX_OFFLOAD_TCP_TSO |
|
|
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
|
|
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
|
|
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
|
|
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE |
|
|
+ DEV_TX_OFFLOAD_VLAN_INSERT);
|
|
|
|
if (!hw->port_base_vlan_cfg.state)
|
|
- info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
|
|
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
|
|
|
|
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
|
|
- info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
|
|
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
|
|
|
|
if (hns3_dev_get_support(hw, INDEP_TXRX))
|
|
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
|
|
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
|
|
- info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
|
|
|
|
if (hns3_dev_get_support(hw, PTP))
|
|
- info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
|
|
+ info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
|
|
|
|
info->rx_desc_lim = (struct rte_eth_desc_lim) {
|
|
.nb_max = HNS3_MAX_RING_DESC,
|
|
@@ -143,7 +142,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
|
|
*/
|
|
if (!hns->is_vf) {
|
|
info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
|
|
- info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
|
|
+ info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
|
|
info->speed_capa = hns3_get_speed_capa(hw);
|
|
} else {
|
|
info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
|
|
@@ -641,7 +640,7 @@ int
|
|
hns3_map_rx_interrupt(struct rte_eth_dev *dev)
|
|
{
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
|
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
|
|
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
|
|
uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
|
|
@@ -664,13 +663,16 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
|
|
if (rte_intr_efd_enable(intr_handle, intr_vector))
|
|
return -EINVAL;
|
|
|
|
- /* Allocate vector list */
|
|
- if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
|
|
- hw->used_rx_queues)) {
|
|
- hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
|
|
- hw->used_rx_queues);
|
|
- ret = -ENOMEM;
|
|
- goto alloc_intr_vec_error;
|
|
+ if (intr_handle->intr_vec == NULL) {
|
|
+ intr_handle->intr_vec =
|
|
+ rte_zmalloc("intr_vec",
|
|
+ hw->used_rx_queues * sizeof(int), 0);
|
|
+ if (intr_handle->intr_vec == NULL) {
|
|
+ hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
|
|
+ hw->used_rx_queues);
|
|
+ ret = -ENOMEM;
|
|
+ goto alloc_intr_vec_error;
|
|
+ }
|
|
}
|
|
|
|
if (rte_intr_allow_others(intr_handle)) {
|
|
@@ -683,21 +685,20 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
|
|
HNS3_RING_TYPE_RX, q_id);
|
|
if (ret)
|
|
goto bind_vector_error;
|
|
-
|
|
- if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
|
|
- goto bind_vector_error;
|
|
+ intr_handle->intr_vec[q_id] = vec;
|
|
/*
|
|
* If there are not enough efds (e.g. not enough interrupt),
|
|
* remaining queues will be bond to the last interrupt.
|
|
*/
|
|
- if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
|
|
+ if (vec < base + intr_handle->nb_efd - 1)
|
|
vec++;
|
|
}
|
|
rte_intr_enable(intr_handle);
|
|
return 0;
|
|
|
|
bind_vector_error:
|
|
- rte_intr_vec_list_free(intr_handle);
|
|
+ rte_free(intr_handle->intr_vec);
|
|
+ intr_handle->intr_vec = NULL;
|
|
alloc_intr_vec_error:
|
|
rte_intr_efd_disable(intr_handle);
|
|
return ret;
|
|
@@ -707,7 +708,7 @@ void
|
|
hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
|
|
{
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
|
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
|
|
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
|
struct hns3_adapter *hns = dev->data->dev_private;
|
|
struct hns3_hw *hw = &hns->hw;
|
|
uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
|
|
@@ -727,13 +728,16 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
|
|
(void)hw->ops.bind_ring_with_vector(hw, vec, false,
|
|
HNS3_RING_TYPE_RX,
|
|
q_id);
|
|
- if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
|
|
+ if (vec < base + intr_handle->nb_efd - 1)
|
|
vec++;
|
|
}
|
|
}
|
|
/* Clean datapath event and queue/vec mapping */
|
|
rte_intr_efd_disable(intr_handle);
|
|
- rte_intr_vec_list_free(intr_handle);
|
|
+ if (intr_handle->intr_vec) {
|
|
+ rte_free(intr_handle->intr_vec);
|
|
+ intr_handle->intr_vec = NULL;
|
|
+ }
|
|
}
|
|
|
|
int
|
|
@@ -741,7 +745,7 @@ hns3_restore_rx_interrupt(struct hns3_hw *hw)
|
|
{
|
|
struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
|
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
|
|
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
|
uint16_t q_id;
|
|
int ret;
|
|
|
|
@@ -751,9 +755,8 @@ hns3_restore_rx_interrupt(struct hns3_hw *hw)
|
|
if (rte_intr_dp_is_en(intr_handle)) {
|
|
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
|
|
ret = hw->ops.bind_ring_with_vector(hw,
|
|
- rte_intr_vec_list_index_get(intr_handle,
|
|
- q_id),
|
|
- true, HNS3_RING_TYPE_RX, q_id);
|
|
+ intr_handle->intr_vec[q_id], true,
|
|
+ HNS3_RING_TYPE_RX, q_id);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/hns3/hns3_flow.h b/drivers/net/hns3/hns3_flow.h
|
|
index 2eb451b72..d679e5928 100644
|
|
--- a/drivers/net/hns3/hns3_flow.h
|
|
+++ b/drivers/net/hns3/hns3_flow.h
|
|
@@ -36,8 +36,9 @@ struct hns3_flow_mem {
|
|
TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele);
|
|
TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem);
|
|
|
|
-int hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
|
|
- const struct rte_flow_ops **ops);
|
|
+int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
|
|
+ enum rte_filter_type filter_type,
|
|
+ enum rte_filter_op filter_op, void *arg);
|
|
void hns3_flow_init(struct rte_eth_dev *dev);
|
|
void hns3_flow_uninit(struct rte_eth_dev *dev);
|
|
|
|
--
|
|
2.33.0
|
|
|