gazelle/0231-kni-down-not-stop-nic.patch
kircher ec652d9528 sync add-udp-multicast-support-in-gazelle
(cherry picked from commit 39a8ed6beb5633bb927b1dec1a4444ad0c321874)
2023-05-16 20:05:58 +08:00

219 lines
8.3 KiB
Diff

From 59c658fa0f75748fa7b49170b9fc34267ef1797c Mon Sep 17 00:00:00 2001
From: jiangheng12 <jiangheng14@huawei.com>
Date: Mon, 3 Apr 2023 14:42:53 +0800
Subject: [PATCH] kni down not stop nic
---
src/common/dpdk_common.c | 37 +++++++++++--------------
src/common/dpdk_common.h | 2 +-
src/lstack/core/lstack_cfg.c | 8 +++---
src/lstack/core/lstack_protocol_stack.c | 10 ++++---
src/lstack/netif/lstack_ethdev.c | 8 ++++--
src/ltran/ltran_forward.c | 5 ++--
6 files changed, 34 insertions(+), 36 deletions(-)
diff --git a/src/common/dpdk_common.c b/src/common/dpdk_common.c
index f5a20dd..23c96d6 100644
--- a/src/common/dpdk_common.c
+++ b/src/common/dpdk_common.c
@@ -34,16 +34,12 @@
#define COMMON_INFO(fmt, ...) LSTACK_LOG(INFO, LSTACK, fmt, ##__VA_ARGS__)
#endif
-static pthread_mutex_t g_kni_mutex = PTHREAD_MUTEX_INITIALIZER;
struct rte_kni *g_pkni = NULL;
+static volatile bool g_kni_started = false;
-/*
- * lock for preventing data race between tx thread and down operation.
- * Don't need to add lock on rx because down operation and rx are in the same thread
- */
-pthread_mutex_t *get_kni_mutex(void)
+bool get_kni_started(void)
{
- return &g_kni_mutex;
+ return g_kni_started;
}
struct rte_kni* get_gazelle_kni(void)
@@ -62,23 +58,18 @@ static int32_t kni_config_network_interface(uint16_t port_id, uint8_t if_up)
}
if (if_up != 0) { /* Configure network interface up */
- if (!g_bond_dev_started) {
- pthread_mutex_lock(&g_kni_mutex);
- ret = rte_eth_dev_start(port_id);
- pthread_mutex_unlock(&g_kni_mutex);
- if (ret < 0) {
- COMMON_ERR("Failed to start port %hu ret=%d\n", port_id, ret);
+ if (!g_kni_started) {
+ g_kni_started = true;
+ if (!g_bond_dev_started) {
+ rte_eth_dev_start(port_id);
+ g_bond_dev_started = true;
}
- g_bond_dev_started = true;
} else {
COMMON_INFO("trying to start a started dev. \n");
}
} else { /* Configure network interface down */
- if (g_bond_dev_started) {
- pthread_mutex_lock(&g_kni_mutex);
- rte_eth_dev_stop(port_id);
- pthread_mutex_unlock(&g_kni_mutex);
- g_bond_dev_started = false;
+ if (g_kni_started) {
+ g_kni_started = false;
} else {
COMMON_INFO("trying to stop a stopped dev. \n");
}
@@ -201,6 +192,12 @@ void dpdk_kni_release(void)
int32_t kni_process_tx(struct rte_mbuf **pkts_burst, uint32_t count)
{
uint32_t i;
+ if (!g_kni_started) {
+ for (i = 0; i < count; i++) {
+ rte_pktmbuf_free(pkts_burst[i]);
+ }
+ return 0;
+ }
for (i = 0; i < count; ++i) {
struct rte_ipv4_hdr * ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkts_burst[i], char*)
@@ -227,9 +224,7 @@ void kni_process_rx(uint16_t port)
nb_kni_rx = rte_kni_rx_burst(g_pkni, pkts_burst, GAZELLE_KNI_READ_SIZE);
if (nb_kni_rx > 0) {
- pthread_mutex_lock(&g_kni_mutex);
nb_rx = rte_eth_tx_burst(port, 0, pkts_burst, nb_kni_rx);
- pthread_mutex_unlock(&g_kni_mutex);
for (i = nb_rx; i < nb_kni_rx; ++i) {
rte_pktmbuf_free(pkts_burst[i]);
diff --git a/src/common/dpdk_common.h b/src/common/dpdk_common.h
index 6b107ae..2f0e8d1 100644
--- a/src/common/dpdk_common.h
+++ b/src/common/dpdk_common.h
@@ -94,7 +94,7 @@ static __rte_always_inline void time_stamp_into_mbuf(uint32_t rx_count, struct r
}
}
-pthread_mutex_t *get_kni_mutex(void);
+bool get_kni_started(void);
struct rte_kni* get_gazelle_kni(void);
int32_t dpdk_kni_init(uint16_t port, struct rte_mempool *pool);
int32_t kni_process_tx(struct rte_mbuf **pkts_burst, uint32_t count);
diff --git a/src/lstack/core/lstack_cfg.c b/src/lstack/core/lstack_cfg.c
index 168aa49..8558121 100644
--- a/src/lstack/core/lstack_cfg.c
+++ b/src/lstack/core/lstack_cfg.c
@@ -1026,6 +1026,10 @@ static int parse_process_index(void)
}
} else {
g_config_params.process_idx = (uint8_t)config_setting_get_int(process_idx);
+ if (g_config_params.is_primary && g_config_params.process_idx != 0 ||
+ !g_config_params.is_primary && g_config_params.process_idx == 0) {
+ return -EINVAL;
+ }
}
return 0;
@@ -1045,9 +1049,5 @@ static int parse_tuple_filter(void)
return -EINVAL;
}
- // check primary process_idx
- if (g_config_params.is_primary && g_config_params.process_idx != 0) {
- return -EINVAL;
- }
return 0;
}
diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c
index 76914f8..a858b37 100644
--- a/src/lstack/core/lstack_protocol_stack.c
+++ b/src/lstack/core/lstack_protocol_stack.c
@@ -483,7 +483,9 @@ static void* gazelle_stack_thread(void *arg)
* so processing KNI requests only in the thread with queue_id No.0 is sufficient. */
if (kni_switch && !queue_id && !(wakeup_tick & 0xfff)) {
rte_kni_handle_request(get_gazelle_kni());
- kni_handle_rx(get_port_id());
+ if (get_kni_started()) {
+ kni_handle_rx(get_port_id());
+ }
}
wakeup_tick++;
@@ -557,9 +559,9 @@ int32_t init_protocol_stack(void)
if (get_global_cfg_params()->is_primary) {
for (uint16_t idx = 0; idx < get_global_cfg_params()->tot_queue_num; idx++) {
- struct rte_mempool* rxtx_mbuf = create_pktmbuf_mempool("rxtx_mbuf",
- get_global_cfg_params()->mbuf_count_per_conn * get_global_cfg_params()->tcp_conn_count / stack_group->stack_num, RXTX_CACHE_SZ, idx);
- get_protocol_stack_group()->total_rxtx_pktmbuf_pool[idx] = rxtx_mbuf;
+ struct rte_mempool* rxtx_mbuf = create_pktmbuf_mempool("rxtx_mbuf",
+ get_global_cfg_params()->mbuf_count_per_conn * get_global_cfg_params()->tcp_conn_count / stack_group->stack_num, RXTX_CACHE_SZ, idx);
+ get_protocol_stack_group()->total_rxtx_pktmbuf_pool[idx] = rxtx_mbuf;
}
}
diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c
index e26fe30..4103f22 100644
--- a/src/lstack/netif/lstack_ethdev.c
+++ b/src/lstack/netif/lstack_ethdev.c
@@ -719,7 +719,9 @@ void kni_handle_rx(uint16_t port_id)
void kni_handle_tx(struct rte_mbuf *mbuf)
{
- if (!get_global_cfg_params()->kni_switch) {
+ if (!get_global_cfg_params()->kni_switch ||
+ !get_kni_started()) {
+ rte_pktmbuf_free(mbuf);
return;
}
struct rte_ipv4_hdr *ipv4_hdr;
@@ -776,9 +778,9 @@ int32_t gazelle_eth_dev_poll(struct protocol_stack *stack, uint8_t use_ltran_fla
if (likely(transfer_type == TRANSFER_CURRENT_THREAD)) {
eth_dev_recv(stack->pkts[i], stack);
- }else if (transfer_type == TRANSFER_KERNEL) {
+ } else if (transfer_type == TRANSFER_KERNEL) {
kni_handle_tx(stack->pkts[i]);
- }else {
+ } else {
/*transfer to other thread*/
}
}
diff --git a/src/ltran/ltran_forward.c b/src/ltran/ltran_forward.c
index 8629acb..b41e1e2 100644
--- a/src/ltran/ltran_forward.c
+++ b/src/ltran/ltran_forward.c
@@ -690,7 +690,6 @@ static __rte_always_inline void downstream_forward_one(struct gazelle_stack *sta
/* send packets anyway. */
tx_pkts = 0;
- pthread_mutex_lock(get_kni_mutex());
while (tx_pkts < used_cnt) {
tx_pkts += rte_eth_tx_burst(port_id, queue_id,
(struct rte_mbuf **)(&dst_bufs[tx_pkts]),
@@ -702,7 +701,6 @@ static __rte_always_inline void downstream_forward_one(struct gazelle_stack *sta
}
}
}
- pthread_mutex_unlock(get_kni_mutex());
get_statistics()->port_stats[g_port_index].tx_bytes += tx_bytes;
get_statistics()->port_stats[g_port_index].tx += tx_pkts;
@@ -737,7 +735,8 @@ int32_t downstream_forward(uint16_t *port)
while (get_ltran_stop_flag() != GAZELLE_TRUE) {
/* kni rx means read from kni and send to nic */
- if (get_ltran_config()->dpdk.kni_switch == GAZELLE_ON) {
+ if (get_ltran_config()->dpdk.kni_switch == GAZELLE_ON &&
+ get_kni_started()) {
kni_process_rx(g_port_index);
}
--
2.33.0