Sync some patches for bonding PMD and testpmd. And patchs
are as follows: - net/bonding: fix Tx hash for TCP - net/bonding: add link speeds configuration - net/bonding: call Tx prepare before Tx burst - net/bonding: fix MTU set for slaves - app/testpmd: remove jumbo offload related code (cherry picked from commit 4b9b49876b4ace72c09cc28416aa5e5d851c3ad5)
This commit is contained in:
parent
d1f313b77e
commit
6773fbb43a
42
0190-net-bonding-fix-Tx-hash-for-TCP.patch
Normal file
42
0190-net-bonding-fix-Tx-hash-for-TCP.patch
Normal file
@ -0,0 +1,42 @@
|
||||
From fbe9bd4deab755855a4ef2d88e559da6ae4b76c2 Mon Sep 17 00:00:00 2001
|
||||
From: Jun Qiu <jun.qiu@jaguarmicro.com>
|
||||
Date: Fri, 28 Oct 2022 15:32:42 +0800
|
||||
Subject: net/bonding: fix Tx hash for TCP
|
||||
|
||||
In the following two cases, tcp_hdr + sizeof(*tcp_hdr) == pkt_end,
|
||||
and the TCP port is not taken into account in calculating the HASH
|
||||
value of TCP packets. TCP connections with the same source and
|
||||
destination IP addresses will be hashed to the same slave port,
|
||||
which may cause load imbalance.
|
||||
1. TCP Pure ACK packets with no options, The header length is 20
|
||||
and there is no data.
|
||||
2. A TCP packet contains data, but the first seg of the mbuf
|
||||
contains only the header information (ETH, IP, TCP), and the
|
||||
data is in subsequent segs, which is usually the case in the
|
||||
indirect mbuf used for zero-copy.
|
||||
|
||||
Fixes: 726158060d55 ("net/bonding: fix potential out of bounds read")
|
||||
Cc: stable@dpdk.org
|
||||
|
||||
Signed-off-by: Jun Qiu <jun.qiu@jaguarmicro.com>
|
||||
Acked-by: Min Hu (Connor) <humin29@huawei.com>
|
||||
---
|
||||
drivers/net/bonding/rte_eth_bond_pmd.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
index 3be2b08128..18754e3299 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
@@ -768,7 +768,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
((char *)ipv4_hdr +
|
||||
ip_hdr_offset);
|
||||
if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
|
||||
- < pkt_end)
|
||||
+ <= pkt_end)
|
||||
l4hash = HASH_L4_PORTS(tcp_hdr);
|
||||
} else if (ipv4_hdr->next_proto_id ==
|
||||
IPPROTO_UDP) {
|
||||
--
|
||||
2.23.0
|
||||
|
||||
112
0191-net-bonding-add-link-speeds-configuration.patch
Normal file
112
0191-net-bonding-add-link-speeds-configuration.patch
Normal file
@ -0,0 +1,112 @@
|
||||
From b92c505e9506f38e76dcf094fbbb2e765e5452a8 Mon Sep 17 00:00:00 2001
|
||||
From: Huisong Li <lihuisong@huawei.com>
|
||||
Date: Fri, 28 Oct 2022 15:32:43 +0800
|
||||
Subject: net/bonding: add link speeds configuration
|
||||
|
||||
This patch adds link speeds configuration.
|
||||
|
||||
Signed-off-by: Huisong Li <lihuisong@huawei.com>
|
||||
Acked-by: Chas Williams <3chas3@gmail.com>
|
||||
---
|
||||
drivers/net/bonding/eth_bond_private.h | 3 +++
|
||||
drivers/net/bonding/rte_eth_bond_api.c | 3 +++
|
||||
drivers/net/bonding/rte_eth_bond_pmd.c | 27 ++++++++++++++++++++++++++
|
||||
3 files changed, 33 insertions(+)
|
||||
|
||||
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
|
||||
index 9626b26d67..c338e11d4f 100644
|
||||
--- a/drivers/net/bonding/eth_bond_private.h
|
||||
+++ b/drivers/net/bonding/eth_bond_private.h
|
||||
@@ -131,6 +131,9 @@ struct bond_dev_private {
|
||||
uint32_t link_down_delay_ms;
|
||||
uint32_t link_up_delay_ms;
|
||||
|
||||
+ uint32_t speed_capa;
|
||||
+ /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
|
||||
+
|
||||
uint16_t nb_rx_queues; /**< Total number of rx queues */
|
||||
uint16_t nb_tx_queues; /**< Total number of tx queues*/
|
||||
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
|
||||
index 2d5cac6c51..b74477128a 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_api.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_api.c
|
||||
@@ -513,6 +513,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
|
||||
internals->primary_port = slave_port_id;
|
||||
internals->current_primary_port = slave_port_id;
|
||||
|
||||
+ internals->speed_capa = dev_info.speed_capa;
|
||||
+
|
||||
/* Inherit queues settings from first slave */
|
||||
internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
|
||||
internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
|
||||
@@ -527,6 +529,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
+ internals->speed_capa &= dev_info.speed_capa;
|
||||
eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
|
||||
eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
|
||||
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
index 18754e3299..b5b706901a 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
@@ -1721,6 +1721,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
|
||||
|
||||
slave_eth_dev->data->dev_conf.rxmode.mtu =
|
||||
bonded_eth_dev->data->dev_conf.rxmode.mtu;
|
||||
+ slave_eth_dev->data->dev_conf.link_speeds =
|
||||
+ bonded_eth_dev->data->dev_conf.link_speeds;
|
||||
|
||||
slave_eth_dev->data->dev_conf.txmode.offloads |=
|
||||
bonded_eth_dev->data->dev_conf.txmode.offloads;
|
||||
@@ -2257,6 +2259,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
|
||||
dev_info->reta_size = internals->reta_size;
|
||||
dev_info->hash_key_size = internals->rss_key_len;
|
||||
+ dev_info->speed_capa = internals->speed_capa;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3571,6 +3574,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
|
||||
uint64_t offloads;
|
||||
int arg_count;
|
||||
uint16_t port_id = dev - rte_eth_devices;
|
||||
+ uint32_t link_speeds;
|
||||
uint8_t agg_mode;
|
||||
|
||||
static const uint8_t default_rss_key[40] = {
|
||||
@@ -3629,6 +3633,29 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
|
||||
dev->data->dev_conf.txmode.offloads = offloads;
|
||||
}
|
||||
|
||||
+ link_speeds = dev->data->dev_conf.link_speeds;
|
||||
+ /*
|
||||
+ * The default value of 'link_speeds' is zero. From its definition,
|
||||
+ * this value actually means auto-negotiation. But not all PMDs support
|
||||
+ * auto-negotiation. So ignore the check for the auto-negotiation and
|
||||
+ * only consider fixed speed to reduce the impact on PMDs.
|
||||
+ */
|
||||
+ if (link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
|
||||
+ if ((link_speeds &
|
||||
+ (internals->speed_capa & ~RTE_ETH_LINK_SPEED_FIXED)) == 0) {
|
||||
+ RTE_BOND_LOG(ERR, "the fixed speed is not supported by all slave devices.");
|
||||
+ return -EINVAL;
|
||||
+ }
|
||||
+ /*
|
||||
+ * Two '1' in binary of 'link_speeds': bit0 and a unique
|
||||
+ * speed bit.
|
||||
+ */
|
||||
+ if (__builtin_popcountl(link_speeds) != 2) {
|
||||
+ RTE_BOND_LOG(ERR, "please set a unique speed.");
|
||||
+ return -EINVAL;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* set the max_rx_pktlen */
|
||||
internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
|
||||
|
||||
--
|
||||
2.23.0
|
||||
|
||||
211
0192-net-bonding-call-Tx-prepare-before-Tx-burst.patch
Normal file
211
0192-net-bonding-call-Tx-prepare-before-Tx-burst.patch
Normal file
@ -0,0 +1,211 @@
|
||||
From 2606fe3bfdbe544819a08f27cd5ed6b5432c96a7 Mon Sep 17 00:00:00 2001
|
||||
From: Chengwen Feng <fengchengwen@huawei.com>
|
||||
Date: Fri, 28 Oct 2022 15:32:44 +0800
|
||||
Subject: net/bonding: call Tx prepare before Tx burst
|
||||
|
||||
Normally, to use the HW offloads capability (e.g. checksum and TSO) in
|
||||
the Tx direction, the application needs to call rte_eth_tx_prepare() to
|
||||
do some adjustment with the packets before sending them. But the
|
||||
tx_prepare callback of the bonding driver is not implemented. Therefore,
|
||||
the sent packets may have errors (e.g. checksum errors).
|
||||
|
||||
However, it is difficult to design the tx_prepare callback for bonding
|
||||
driver. Because when a bonded device sends packets, the bonded device
|
||||
allocates the packets to different slave devices based on the real-time
|
||||
link status and bonding mode. That is, it is very difficult for the
|
||||
bonded device to determine which slave device's prepare function should
|
||||
be invoked.
|
||||
|
||||
So in this patch, the tx_prepare callback of bonding driver is not
|
||||
implemented. Instead, the rte_eth_tx_prepare() will be called before
|
||||
rte_eth_tx_burst(). In this way, all tx_offloads can be processed
|
||||
correctly for all NIC devices.
|
||||
|
||||
Note: because it is rara that bond different PMDs together, so just
|
||||
call tx-prepare once in broadcast bonding mode.
|
||||
|
||||
Also the following description was added to the rte_eth_tx_burst()
|
||||
function:
|
||||
"@note This function must not modify mbufs (including packets data)
|
||||
unless the refcnt is 1. The exception is the bonding PMD, which does not
|
||||
have tx-prepare function, in this case, mbufs maybe modified."
|
||||
|
||||
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
|
||||
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
|
||||
Reviewed-by: Min Hu (Connor) <humin29@huawei.com>
|
||||
Acked-by: Chas Williams <3chas3@gmail.com>
|
||||
---
|
||||
drivers/net/bonding/rte_eth_bond_8023ad.c | 10 ++++--
|
||||
drivers/net/bonding/rte_eth_bond_pmd.c | 37 ++++++++++++++++++-----
|
||||
lib/ethdev/rte_ethdev.h | 4 +++
|
||||
3 files changed, 41 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
|
||||
index b3cddd8a20..29a71ae0bf 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
|
||||
@@ -636,9 +636,12 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
- uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
|
||||
+ uint16_t pkts_sent = rte_eth_tx_prepare(slave_id,
|
||||
internals->mode4.dedicated_queues.tx_qid,
|
||||
&lacp_pkt, 1);
|
||||
+ pkts_sent = rte_eth_tx_burst(slave_id,
|
||||
+ internals->mode4.dedicated_queues.tx_qid,
|
||||
+ &lacp_pkt, pkts_sent);
|
||||
if (pkts_sent != 1) {
|
||||
rte_pktmbuf_free(lacp_pkt);
|
||||
set_warning_flags(port, WRN_TX_QUEUE_FULL);
|
||||
@@ -1371,9 +1374,12 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
|
||||
}
|
||||
} else {
|
||||
/* Send packet directly to the slow queue */
|
||||
- uint16_t tx_count = rte_eth_tx_burst(slave_id,
|
||||
+ uint16_t tx_count = rte_eth_tx_prepare(slave_id,
|
||||
internals->mode4.dedicated_queues.tx_qid,
|
||||
&pkt, 1);
|
||||
+ tx_count = rte_eth_tx_burst(slave_id,
|
||||
+ internals->mode4.dedicated_queues.tx_qid,
|
||||
+ &pkt, tx_count);
|
||||
if (tx_count != 1) {
|
||||
/* reset timer */
|
||||
port->rx_marker_timer = 0;
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
index b5b706901a..4e82f7b145 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
@@ -602,8 +602,11 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
|
||||
/* Send packet burst on each slave device */
|
||||
for (i = 0; i < num_of_slaves; i++) {
|
||||
if (slave_nb_pkts[i] > 0) {
|
||||
+ num_tx_slave = rte_eth_tx_prepare(slaves[i],
|
||||
+ bd_tx_q->queue_id, slave_bufs[i],
|
||||
+ slave_nb_pkts[i]);
|
||||
num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
|
||||
- slave_bufs[i], slave_nb_pkts[i]);
|
||||
+ slave_bufs[i], num_tx_slave);
|
||||
|
||||
/* if tx burst fails move packets to end of bufs */
|
||||
if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
|
||||
@@ -628,6 +631,7 @@ bond_ethdev_tx_burst_active_backup(void *queue,
|
||||
{
|
||||
struct bond_dev_private *internals;
|
||||
struct bond_tx_queue *bd_tx_q;
|
||||
+ uint16_t nb_prep_pkts;
|
||||
|
||||
bd_tx_q = (struct bond_tx_queue *)queue;
|
||||
internals = bd_tx_q->dev_private;
|
||||
@@ -635,8 +639,11 @@ bond_ethdev_tx_burst_active_backup(void *queue,
|
||||
if (internals->active_slave_count < 1)
|
||||
return 0;
|
||||
|
||||
+ nb_prep_pkts = rte_eth_tx_prepare(internals->current_primary_port,
|
||||
+ bd_tx_q->queue_id, bufs, nb_pkts);
|
||||
+
|
||||
return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
|
||||
- bufs, nb_pkts);
|
||||
+ bufs, nb_prep_pkts);
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
@@ -910,7 +917,7 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
|
||||
struct rte_eth_dev *primary_port =
|
||||
&rte_eth_devices[internals->primary_port];
|
||||
- uint16_t num_tx_total = 0;
|
||||
+ uint16_t num_tx_total = 0, num_tx_prep;
|
||||
uint16_t i, j;
|
||||
|
||||
uint16_t num_of_slaves = internals->active_slave_count;
|
||||
@@ -951,8 +958,10 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
#endif
|
||||
}
|
||||
|
||||
- num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
|
||||
+ num_tx_prep = rte_eth_tx_prepare(slaves[i], bd_tx_q->queue_id,
|
||||
bufs + num_tx_total, nb_pkts - num_tx_total);
|
||||
+ num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
|
||||
+ bufs + num_tx_total, num_tx_prep);
|
||||
|
||||
if (num_tx_total == nb_pkts)
|
||||
break;
|
||||
@@ -1064,8 +1073,10 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
/* Send ARP packets on proper slaves */
|
||||
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
||||
if (slave_bufs_pkts[i] > 0) {
|
||||
- num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
|
||||
+ num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id,
|
||||
slave_bufs[i], slave_bufs_pkts[i]);
|
||||
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
|
||||
+ slave_bufs[i], num_send);
|
||||
for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
|
||||
bufs[nb_pkts - 1 - num_not_send - j] =
|
||||
slave_bufs[i][nb_pkts - 1 - j];
|
||||
@@ -1088,8 +1099,10 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
/* Send update packets on proper slaves */
|
||||
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
||||
if (update_bufs_pkts[i] > 0) {
|
||||
+ num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id,
|
||||
+ update_bufs[i], update_bufs_pkts[i]);
|
||||
num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
|
||||
- update_bufs_pkts[i]);
|
||||
+ num_send);
|
||||
for (j = num_send; j < update_bufs_pkts[i]; j++) {
|
||||
rte_pktmbuf_free(update_bufs[i][j]);
|
||||
}
|
||||
@@ -1158,9 +1171,12 @@ tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
|
||||
if (slave_nb_bufs[i] == 0)
|
||||
continue;
|
||||
|
||||
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
|
||||
+ slave_tx_count = rte_eth_tx_prepare(slave_port_ids[i],
|
||||
bd_tx_q->queue_id, slave_bufs[i],
|
||||
slave_nb_bufs[i]);
|
||||
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
|
||||
+ bd_tx_q->queue_id, slave_bufs[i],
|
||||
+ slave_tx_count);
|
||||
|
||||
total_tx_count += slave_tx_count;
|
||||
|
||||
@@ -1243,8 +1259,10 @@ tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
|
||||
|
||||
if (rte_ring_dequeue(port->tx_ring,
|
||||
(void **)&ctrl_pkt) != -ENOENT) {
|
||||
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
|
||||
+ slave_tx_count = rte_eth_tx_prepare(slave_port_ids[i],
|
||||
bd_tx_q->queue_id, &ctrl_pkt, 1);
|
||||
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
|
||||
+ bd_tx_q->queue_id, &ctrl_pkt, slave_tx_count);
|
||||
/*
|
||||
* re-enqueue LAG control plane packets to buffering
|
||||
* ring if transmission fails so the packet isn't lost.
|
||||
@@ -1316,6 +1334,9 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
|
||||
if (num_of_slaves < 1)
|
||||
return 0;
|
||||
|
||||
+ /* It is rare that bond different PMDs together, so just call tx-prepare once */
|
||||
+ nb_pkts = rte_eth_tx_prepare(slaves[0], bd_tx_q->queue_id, bufs, nb_pkts);
|
||||
+
|
||||
/* Increment reference count on mbufs */
|
||||
for (i = 0; i < nb_pkts; i++)
|
||||
rte_pktmbuf_refcnt_update(bufs[i], num_of_slaves - 1);
|
||||
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
|
||||
index 8c894e090d..b262939a33 100644
|
||||
--- a/lib/ethdev/rte_ethdev.h
|
||||
+++ b/lib/ethdev/rte_ethdev.h
|
||||
@@ -5691,6 +5691,10 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
|
||||
* @see rte_eth_tx_prepare to perform some prior checks or adjustments
|
||||
* for offloads.
|
||||
*
|
||||
+ * @note This function must not modify mbufs (including packets data) unless
|
||||
+ * the refcnt is 1. The exception is the bonding PMD, which does not have
|
||||
+ * tx-prepare function, in this case, mbufs maybe modified.
|
||||
+ *
|
||||
* @param port_id
|
||||
* The port identifier of the Ethernet device.
|
||||
* @param queue_id
|
||||
--
|
||||
2.23.0
|
||||
|
||||
62
0193-net-bonding-fix-MTU-set-for-slaves.patch
Normal file
62
0193-net-bonding-fix-MTU-set-for-slaves.patch
Normal file
@ -0,0 +1,62 @@
|
||||
From f099709983c155337a14340da3d9607a2a08a7f9 Mon Sep 17 00:00:00 2001
|
||||
From: Ferruh Yigit <ferruh.yigit@intel.com>
|
||||
Date: Fri, 28 Oct 2022 15:32:45 +0800
|
||||
Subject: net/bonding: fix MTU set for slaves
|
||||
|
||||
ethdev requires device to be configured before setting MTU.
|
||||
|
||||
In bonding PMD, while configuring slaves, bonding first sets MTU later
|
||||
configures them, which causes failure if slaves are not configured in
|
||||
advance.
|
||||
|
||||
Fixing by changing the order in slave configure as requested in ethdev
|
||||
layer, configure first and set MTU later.
|
||||
|
||||
Bugzilla ID: 864
|
||||
Fixes: b26bee10ee37 ("ethdev: forbid MTU set before device configure")
|
||||
Cc: stable@dpdk.org
|
||||
|
||||
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
|
||||
Tested-by: Yu Jiang <yux.jiang@intel.com>
|
||||
Acked-by: Min Hu (Connor) <humin29@huawei.com>
|
||||
---
|
||||
drivers/net/bonding/rte_eth_bond_pmd.c | 16 ++++++++--------
|
||||
1 file changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
index 4e82f7b145..ab1196e505 100644
|
||||
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
|
||||
@@ -1770,14 +1770,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
|
||||
}
|
||||
}
|
||||
|
||||
- errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
|
||||
- bonded_eth_dev->data->mtu);
|
||||
- if (errval != 0 && errval != -ENOTSUP) {
|
||||
- RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
|
||||
- slave_eth_dev->data->port_id, errval);
|
||||
- return errval;
|
||||
- }
|
||||
-
|
||||
/* Configure device */
|
||||
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
|
||||
nb_rx_queues, nb_tx_queues,
|
||||
@@ -1788,6 +1780,14 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
|
||||
return errval;
|
||||
}
|
||||
|
||||
+ errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
|
||||
+ bonded_eth_dev->data->mtu);
|
||||
+ if (errval != 0 && errval != -ENOTSUP) {
|
||||
+ RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
|
||||
+ slave_eth_dev->data->port_id, errval);
|
||||
+ return errval;
|
||||
+ }
|
||||
+
|
||||
/* Setup Rx Queues */
|
||||
for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
|
||||
bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
|
||||
--
|
||||
2.23.0
|
||||
|
||||
121
0194-app-testpmd-remove-jumbo-offload-related-code.patch
Normal file
121
0194-app-testpmd-remove-jumbo-offload-related-code.patch
Normal file
@ -0,0 +1,121 @@
|
||||
From 20204b1f3811015975a5dac2012ca770be174acb Mon Sep 17 00:00:00 2001
|
||||
From: Chengwen Feng <fengchengwen@huawei.com>
|
||||
Date: Fri, 28 Oct 2022 15:32:46 +0800
|
||||
Subject: app/testpmd: remove jumbo offload related code
|
||||
|
||||
The jumbo offload was removed from patch [1], but testpmd still exist
|
||||
jumbo offload related code, this patch removes it, and also updates
|
||||
the rst file.
|
||||
|
||||
[1] ethdev: remove jumbo offload flag
|
||||
|
||||
Fixes: b563c1421282 ("ethdev: remove jumbo offload flag")
|
||||
Cc: stable@dpdk.org
|
||||
|
||||
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
|
||||
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
|
||||
---
|
||||
app/test-pmd/cmdline.c | 12 ++++++------
|
||||
app/test-pmd/testpmd.h | 1 -
|
||||
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 8 ++++----
|
||||
3 files changed, 10 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
|
||||
index 6cb095f965..8d4a88bb85 100644
|
||||
--- a/app/test-pmd/cmdline.c
|
||||
+++ b/app/test-pmd/cmdline.c
|
||||
@@ -861,7 +861,7 @@ static void cmd_help_long_parsed(void *parsed_result,
|
||||
"port config <port_id> rx_offload vlan_strip|"
|
||||
"ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
|
||||
"outer_ipv4_cksum|macsec_strip|header_split|"
|
||||
- "vlan_filter|vlan_extend|jumbo_frame|scatter|"
|
||||
+ "vlan_filter|vlan_extend|scatter|"
|
||||
"buffer_split|timestamp|security|keep_crc on|off\n"
|
||||
" Enable or disable a per port Rx offloading"
|
||||
" on all Rx queues of a port\n\n"
|
||||
@@ -869,7 +869,7 @@ static void cmd_help_long_parsed(void *parsed_result,
|
||||
"port (port_id) rxq (queue_id) rx_offload vlan_strip|"
|
||||
"ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
|
||||
"outer_ipv4_cksum|macsec_strip|header_split|"
|
||||
- "vlan_filter|vlan_extend|jumbo_frame|scatter|"
|
||||
+ "vlan_filter|vlan_extend|scatter|"
|
||||
"buffer_split|timestamp|security|keep_crc on|off\n"
|
||||
" Enable or disable a per queue Rx offloading"
|
||||
" only on a specific Rx queue\n\n"
|
||||
@@ -16080,7 +16080,7 @@ cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload =
|
||||
(struct cmd_config_per_port_rx_offload_result,
|
||||
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
|
||||
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
|
||||
- "header_split#vlan_filter#vlan_extend#jumbo_frame#"
|
||||
+ "header_split#vlan_filter#vlan_extend#"
|
||||
"scatter#buffer_split#timestamp#security#"
|
||||
"keep_crc#rss_hash");
|
||||
cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off =
|
||||
@@ -16163,7 +16163,7 @@ cmdline_parse_inst_t cmd_config_per_port_rx_offload = {
|
||||
.help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|"
|
||||
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
|
||||
"macsec_strip|header_split|vlan_filter|vlan_extend|"
|
||||
- "jumbo_frame|scatter|buffer_split|timestamp|security|"
|
||||
+ "scatter|buffer_split|timestamp|security|"
|
||||
"keep_crc|rss_hash on|off",
|
||||
.tokens = {
|
||||
(void *)&cmd_config_per_port_rx_offload_result_port,
|
||||
@@ -16212,7 +16212,7 @@ cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload =
|
||||
(struct cmd_config_per_queue_rx_offload_result,
|
||||
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
|
||||
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
|
||||
- "header_split#vlan_filter#vlan_extend#jumbo_frame#"
|
||||
+ "header_split#vlan_filter#vlan_extend#"
|
||||
"scatter#buffer_split#timestamp#security#keep_crc");
|
||||
cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off =
|
||||
TOKEN_STRING_INITIALIZER
|
||||
@@ -16271,7 +16271,7 @@ cmdline_parse_inst_t cmd_config_per_queue_rx_offload = {
|
||||
"vlan_strip|ipv4_cksum|"
|
||||
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
|
||||
"macsec_strip|header_split|vlan_filter|vlan_extend|"
|
||||
- "jumbo_frame|scatter|buffer_split|timestamp|security|"
|
||||
+ "scatter|buffer_split|timestamp|security|"
|
||||
"keep_crc on|off",
|
||||
.tokens = {
|
||||
(void *)&cmd_config_per_queue_rx_offload_result_port,
|
||||
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
|
||||
index 9c3a5d9bc5..ab6642585e 100644
|
||||
--- a/app/test-pmd/testpmd.h
|
||||
+++ b/app/test-pmd/testpmd.h
|
||||
@@ -1097,7 +1097,6 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
|
||||
void add_tx_dynf_callback(portid_t portid);
|
||||
void remove_tx_dynf_callback(portid_t portid);
|
||||
int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen);
|
||||
-int update_jumbo_frame_offload(portid_t portid);
|
||||
void flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename);
|
||||
void flex_item_destroy(portid_t port_id, uint16_t flex_id);
|
||||
void port_flex_item_flush(portid_t port_id);
|
||||
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
|
||||
index e15dc0c4c4..e0edd349bc 100644
|
||||
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
|
||||
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
|
||||
@@ -1767,8 +1767,8 @@ Enable or disable a per port Rx offloading on all Rx queues of a port::
|
||||
* ``offloading``: can be any of these offloading capability:
|
||||
vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro,
|
||||
qinq_strip, outer_ipv4_cksum, macsec_strip,
|
||||
- header_split, vlan_filter, vlan_extend, jumbo_frame,
|
||||
- scatter, timestamp, security, keep_crc, rss_hash
|
||||
+ header_split, vlan_filter, vlan_extend, scatter, timestamp, security,
|
||||
+ keep_crc, rss_hash
|
||||
|
||||
This command should be run when the port is stopped, or else it will fail.
|
||||
|
||||
@@ -1782,8 +1782,8 @@ Enable or disable a per queue Rx offloading only on a specific Rx queue::
|
||||
* ``offloading``: can be any of these offloading capability:
|
||||
vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro,
|
||||
qinq_strip, outer_ipv4_cksum, macsec_strip,
|
||||
- header_split, vlan_filter, vlan_extend, jumbo_frame,
|
||||
- scatter, timestamp, security, keep_crc
|
||||
+ header_split, vlan_filter, vlan_extend, scatter, timestamp, security,
|
||||
+ keep_crc
|
||||
|
||||
This command should be run when the port is stopped, or else it will fail.
|
||||
|
||||
--
|
||||
2.23.0
|
||||
|
||||
17
dpdk.spec
17
dpdk.spec
@ -1,6 +1,6 @@
|
||||
Name: dpdk
|
||||
Version: 21.11
|
||||
Release: 20
|
||||
Release: 21
|
||||
Packager: packaging@6wind.com
|
||||
URL: http://dpdk.org
|
||||
%global source_version 21.11
|
||||
@ -207,6 +207,12 @@ Patch6006: backport-gro-fix-chain-index-for-more-than-2-packets.patch
|
||||
Patch6007: backport-gro-trim-tail-padding-bytes.patch
|
||||
Patch6008: backport-gro-check-payload-length-after-trim.patch
|
||||
|
||||
Patch9190: 0190-net-bonding-fix-Tx-hash-for-TCP.patch
|
||||
Patch9191: 0191-net-bonding-add-link-speeds-configuration.patch
|
||||
Patch9192: 0192-net-bonding-call-Tx-prepare-before-Tx-burst.patch
|
||||
Patch9193: 0193-net-bonding-fix-MTU-set-for-slaves.patch
|
||||
Patch9194: 0194-app-testpmd-remove-jumbo-offload-related-code.patch
|
||||
|
||||
Summary: Data Plane Development Kit core
|
||||
Group: System Environment/Libraries
|
||||
License: BSD and LGPLv2 and GPLv2
|
||||
@ -334,6 +340,15 @@ strip -g $RPM_BUILD_ROOT/lib/modules/%{kern_devel_ver}/extra/dpdk/igb_uio.ko
|
||||
/usr/sbin/depmod
|
||||
|
||||
%changelog
|
||||
* Sat Oct 29 2022 chenjiji <chenjiji09@163.com> - 21.11-21
|
||||
Sync some patches for bonding PMD and testpmd. And patchs
|
||||
are as follows:
|
||||
- net/bonding: fix Tx hash for TCP
|
||||
- net/bonding: add link speeds configuration
|
||||
- net/bonding: call Tx prepare before Tx burst
|
||||
- net/bonding: fix MTU set for slaves
|
||||
- app/testpmd: remove jumbo offload related code
|
||||
|
||||
* Fri Oct 28 2022 jiangheng <jiangheng14@huawei.com> - 21.11-20
|
||||
- gro: trim tail padding bytes
|
||||
- gro: check payload length after trim
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user