139 lines
4.6 KiB
Diff
139 lines
4.6 KiB
Diff
From 25c01bd32374b0c3cbc260f3e3872408d749cb45 Mon Sep 17 00:00:00 2001
|
|
From: Matan Azrad <matan@nvidia.com>
|
|
Date: Thu, 11 Aug 2022 19:59:18 +0300
|
|
Subject: [PATCH] net/mlx5: fix Rx queue recovery mechanism
|
|
|
|
[ upstream commit 60b254e3923d007bcadbb8d410f95ad89a2f13fa ]
|
|
|
|
The local variables are getting inconsistent in data receiving routines
|
|
after queue error recovery.
|
|
Receive queue consumer index is getting wrong, need to reset one to the
|
|
size of the queue (as RQ was fully replenished in recovery procedure).
|
|
|
|
In MPRQ case, also the local consumed strd variable should be reset.
|
|
|
|
CVE-2022-28199
|
|
Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling")
|
|
|
|
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
|
|
Signed-off-by: Matan Azrad <matan@nvidia.com>
|
|
Conflict: NA
|
|
Reference: https://git.dpdk.org/dpdk-stable/commit/?id=25c01db32374
|
|
---
|
|
drivers/net/mlx5/mlx5_rx.c | 34 ++++++++++++++++++++++++----------
|
|
1 file changed, 24 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
|
|
index f388fcc313..9fcd039c22 100644
|
|
--- a/drivers/net/mlx5/mlx5_rx.c
|
|
+++ b/drivers/net/mlx5/mlx5_rx.c
|
|
@@ -390,6 +390,11 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
|
|
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
|
|
}
|
|
|
|
+/* Must be negative. */
|
|
+#define MLX5_ERROR_CQE_RET (-1)
|
|
+/* Must not be negative. */
|
|
+#define MLX5_RECOVERY_ERROR_RET 0
|
|
+
|
|
/**
|
|
* Handle a Rx error.
|
|
* The function inserts the RQ state to reset when the first error CQE is
|
|
@@ -404,7 +409,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
|
|
* 0 when called from non-vectorized Rx burst.
|
|
*
|
|
* @return
|
|
- * -1 in case of recovery error, otherwise the CQE status.
|
|
+ * MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status.
|
|
*/
|
|
int
|
|
mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
@@ -433,7 +438,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
sm.queue_id = rxq->idx;
|
|
sm.state = IBV_WQS_RESET;
|
|
if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
if (rxq_ctrl->dump_file_n <
|
|
RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
|
|
MKSTR(err_str, "Unexpected CQE error syndrome "
|
|
@@ -473,7 +478,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
sm.queue_id = rxq->idx;
|
|
sm.state = IBV_WQS_RDY;
|
|
if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
if (vec) {
|
|
const uint32_t elts_n =
|
|
mlx5_rxq_mprq_enabled(rxq) ?
|
|
@@ -501,7 +506,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
rte_pktmbuf_free_seg
|
|
(*elt);
|
|
}
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
}
|
|
}
|
|
for (i = 0; i < (int)elts_n; ++i) {
|
|
@@ -520,7 +525,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
}
|
|
return ret;
|
|
default:
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
}
|
|
}
|
|
|
|
@@ -538,7 +543,9 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
* written.
|
|
*
|
|
* @return
|
|
- * 0 in case of empty CQE, otherwise the packet size in bytes.
|
|
+ * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE,
|
|
+ * otherwise the packet size in regular RxQ, and striding byte
|
|
+ * count format in mprq case.
|
|
*/
|
|
static inline int
|
|
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
|
|
@@ -605,8 +612,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
|
|
rxq->err_state)) {
|
|
ret = mlx5_rx_err_handle(rxq, 0);
|
|
if (ret == MLX5_CQE_STATUS_HW_OWN ||
|
|
- ret == -1)
|
|
- return 0;
|
|
+ ret == MLX5_RECOVERY_ERROR_RET)
|
|
+ return MLX5_ERROR_CQE_RET;
|
|
} else {
|
|
return 0;
|
|
}
|
|
@@ -851,8 +858,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
if (!pkt) {
|
|
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
|
|
len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
|
|
- if (!len) {
|
|
+ if (len <= 0) {
|
|
rte_mbuf_raw_free(rep);
|
|
+ if (unlikely(len == MLX5_ERROR_CQE_RET))
|
|
+ rq_ci = rxq->rq_ci << sges_n;
|
|
break;
|
|
}
|
|
pkt = seg;
|
|
@@ -1075,8 +1084,13 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
}
|
|
cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
|
|
ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
|
|
- if (!ret)
|
|
+ if (ret == 0)
|
|
+ break;
|
|
+ if (unlikely(ret == MLX5_ERROR_CQE_RET)) {
|
|
+ rq_ci = rxq->rq_ci;
|
|
+ consumed_strd = rxq->consumed_strd;
|
|
break;
|
|
+ }
|
|
byte_cnt = ret;
|
|
len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
|
|
MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
|
|
--
|
|
2.23.0
|
|
|