rdma-core/0033-libhns-Use-new-interfaces-hr-reg-to-operate-the-WQE-.patch
zhengfeng luo 123f94ea3f Backport batch of bugfix and refactor patches for hns from rdma-core v39
These patches are mainly related to IO operations.

Signed-off-by: zhengfeng luo <luozhengfeng@h-partners.com>
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
2022-08-29 21:05:49 +08:00

551 lines
20 KiB
Diff

From 532c4b6babe97e3023a049f1c6bd8a8e3ad95140 Mon Sep 17 00:00:00 2001
From: Wenpeng Liang <liangwenpeng@huawei.com>
Date: Sat, 25 Dec 2021 17:42:55 +0800
Subject: libhns: Use new interfaces hr reg ***() to operate the WQE field
Use hr_reg_xxx() to simply the codes for filling fields.
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
---
providers/hns/hns_roce_u_hw_v2.c | 170 ++++++++++------------------
providers/hns/hns_roce_u_hw_v2.h | 184 ++++++++++++++-----------------
2 files changed, 144 insertions(+), 210 deletions(-)
diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c
index cf871ab..0cff12b 100644
--- a/providers/hns/hns_roce_u_hw_v2.c
+++ b/providers/hns/hns_roce_u_hw_v2.c
@@ -323,13 +323,10 @@ static void hns_roce_write_dwqe(struct hns_roce_qp *qp, void *wqe)
struct hns_roce_rc_sq_wqe *rc_sq_wqe = wqe;
/* All kinds of DirectWQE have the same header field layout */
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_FLAG_S, 1);
- roce_set_field(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_DB_SL_L_M,
- RC_SQ_WQE_BYTE_4_DB_SL_L_S, qp->sl);
- roce_set_field(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_DB_SL_H_M,
- RC_SQ_WQE_BYTE_4_DB_SL_H_S, qp->sl >> HNS_ROCE_SL_SHIFT);
- roce_set_field(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_WQE_INDEX_M,
- RC_SQ_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
+ hr_reg_enable(rc_sq_wqe, RCWQE_FLAG);
+ hr_reg_write(rc_sq_wqe, RCWQE_DB_SL_L, qp->sl);
+ hr_reg_write(rc_sq_wqe, RCWQE_DB_SL_H, qp->sl >> HNS_ROCE_SL_SHIFT);
+ hr_reg_write(rc_sq_wqe, RCWQE_WQE_IDX, qp->sq.head);
hns_roce_write512(qp->sq.db_reg, wqe);
}
@@ -834,29 +831,15 @@ static void fill_ud_inn_inl_data(const struct ibv_send_wr *wr,
tmp += wr->sg_list[i].length;
}
- roce_set_field(ud_sq_wqe->msg_len,
- UD_SQ_WQE_BYTE_8_INL_DATE_15_0_M,
- UD_SQ_WQE_BYTE_8_INL_DATE_15_0_S,
- *loc & 0xffff);
-
- roce_set_field(ud_sq_wqe->sge_num_pd,
- UD_SQ_WQE_BYTE_16_INL_DATA_23_16_M,
- UD_SQ_WQE_BYTE_16_INL_DATA_23_16_S,
- (*loc >> 16) & 0xff);
+ hr_reg_write(ud_sq_wqe, UDWQE_INLINE_DATA_15_0, *loc & 0xffff);
+ hr_reg_write(ud_sq_wqe, UDWQE_INLINE_DATA_23_16, (*loc >> 16) & 0xff);
tmp_data = *loc >> 24;
loc++;
tmp_data |= ((*loc & 0xffff) << 8);
- roce_set_field(ud_sq_wqe->rsv_msg_start_sge_idx,
- UD_SQ_WQE_BYTE_20_INL_DATA_47_24_M,
- UD_SQ_WQE_BYTE_20_INL_DATA_47_24_S,
- tmp_data);
-
- roce_set_field(ud_sq_wqe->udpspn_rsv,
- UD_SQ_WQE_BYTE_24_INL_DATA_63_48_M,
- UD_SQ_WQE_BYTE_24_INL_DATA_63_48_S,
- *loc >> 16);
+ hr_reg_write(ud_sq_wqe, UDWQE_INLINE_DATA_47_24, tmp_data);
+ hr_reg_write(ud_sq_wqe, UDWQE_INLINE_DATA_63_48, *loc >> 16);
}
static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
@@ -877,13 +860,11 @@ static int set_ud_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
return -EINVAL;
if (sge_info->total_len <= HNS_ROCE_MAX_UD_INL_INN_SZ) {
- roce_set_bit(ud_sq_wqe->rsv_msg_start_sge_idx,
- UD_SQ_WQE_BYTE_20_INL_TYPE_S, 0);
+ hr_reg_clear(ud_sq_wqe, UDWQE_INLINE_TYPE);
fill_ud_inn_inl_data(wr, ud_sq_wqe);
} else {
- roce_set_bit(ud_sq_wqe->rsv_msg_start_sge_idx,
- UD_SQ_WQE_BYTE_20_INL_TYPE_S, 1);
+ hr_reg_enable(ud_sq_wqe, UDWQE_INLINE_TYPE);
ret = fill_ext_sge_inl_data(qp, wr, sge_info);
if (ret)
@@ -891,8 +872,7 @@ static int set_ud_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
sge_info->valid_num = sge_info->start_idx - sge_idx;
- roce_set_field(ud_sq_wqe->sge_num_pd, UD_SQ_WQE_SGE_NUM_M,
- UD_SQ_WQE_SGE_NUM_S, sge_info->valid_num);
+ hr_reg_write(ud_sq_wqe, UDWQE_SGE_NUM, sge_info->valid_num);
}
return 0;
@@ -919,8 +899,7 @@ static int check_ud_opcode(struct hns_roce_ud_sq_wqe *ud_sq_wqe,
ud_sq_wqe->immtdata = get_immtdata(ib_op, wr);
- roce_set_field(ud_sq_wqe->rsv_opcode, UD_SQ_WQE_OPCODE_M,
- UD_SQ_WQE_OPCODE_S, to_hr_opcode(ib_op));
+ hr_reg_write(ud_sq_wqe, UDWQE_OPCODE, to_hr_opcode(ib_op));
return 0;
}
@@ -931,24 +910,12 @@ static int fill_ud_av(struct hns_roce_ud_sq_wqe *ud_sq_wqe,
if (unlikely(ah->av.sl > MAX_SERVICE_LEVEL))
return EINVAL;
- roce_set_field(ud_sq_wqe->lbi_flow_label, UD_SQ_WQE_SL_M,
- UD_SQ_WQE_SL_S, ah->av.sl);
-
- roce_set_field(ud_sq_wqe->sge_num_pd, UD_SQ_WQE_PD_M,
- UD_SQ_WQE_PD_S, to_hr_pd(ah->ibv_ah.pd)->pdn);
-
- roce_set_field(ud_sq_wqe->tclass_vlan, UD_SQ_WQE_TCLASS_M,
- UD_SQ_WQE_TCLASS_S, ah->av.tclass);
-
- roce_set_field(ud_sq_wqe->tclass_vlan, UD_SQ_WQE_HOPLIMIT_M,
- UD_SQ_WQE_HOPLIMIT_S, ah->av.hop_limit);
-
- roce_set_field(ud_sq_wqe->lbi_flow_label, UD_SQ_WQE_FLOW_LABEL_M,
- UD_SQ_WQE_FLOW_LABEL_S, ah->av.flowlabel);
-
- roce_set_field(ud_sq_wqe->udpspn_rsv, UD_SQ_WQE_UDP_SPN_M,
- UD_SQ_WQE_UDP_SPN_S, ah->av.udp_sport);
-
+ hr_reg_write(ud_sq_wqe, UDWQE_SL, ah->av.sl);
+ hr_reg_write(ud_sq_wqe, UDWQE_PD, to_hr_pd(ah->ibv_ah.pd)->pdn);
+ hr_reg_write(ud_sq_wqe, UDWQE_TCLASS, ah->av.tclass);
+ hr_reg_write(ud_sq_wqe, UDWQE_HOPLIMIT, ah->av.hop_limit);
+ hr_reg_write(ud_sq_wqe, UDWQE_FLOW_LABEL, ah->av.flowlabel);
+ hr_reg_write(ud_sq_wqe, UDWQE_UDPSPN, ah->av.udp_sport);
memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
ud_sq_wqe->sgid_index = ah->av.gid_index;
memcpy(ud_sq_wqe->dgid, ah->av.dgid, HNS_ROCE_GID_SIZE);
@@ -962,17 +929,14 @@ static int fill_ud_data_seg(struct hns_roce_ud_sq_wqe *ud_sq_wqe,
{
int ret = 0;
- roce_set_field(ud_sq_wqe->rsv_msg_start_sge_idx,
- UD_SQ_WQE_MSG_START_SGE_IDX_M,
- UD_SQ_WQE_MSG_START_SGE_IDX_S,
- sge_info->start_idx & (qp->ex_sge.sge_cnt - 1));
+ hr_reg_write(ud_sq_wqe, UDWQE_MSG_START_SGE_IDX,
+ sge_info->start_idx & (qp->ex_sge.sge_cnt - 1));
set_ud_sge((struct hns_roce_v2_wqe_data_seg *)ud_sq_wqe, qp, wr, sge_info);
ud_sq_wqe->msg_len = htole32(sge_info->total_len);
- roce_set_field(ud_sq_wqe->sge_num_pd, UD_SQ_WQE_SGE_NUM_M,
- UD_SQ_WQE_SGE_NUM_S, sge_info->valid_num);
+ hr_reg_write(ud_sq_wqe, UDWQE_SGE_NUM, sge_info->valid_num);
if (wr->send_flags & IBV_SEND_INLINE)
ret = set_ud_inl(qp, wr, ud_sq_wqe, sge_info);
@@ -987,12 +951,12 @@ static int set_ud_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr,
struct hns_roce_ud_sq_wqe *ud_sq_wqe = wqe;
int ret = 0;
- roce_set_bit(ud_sq_wqe->rsv_opcode, UD_SQ_WQE_CQE_S,
- !!(wr->send_flags & IBV_SEND_SIGNALED));
- roce_set_bit(ud_sq_wqe->rsv_opcode, UD_SQ_WQE_SE_S,
- !!(wr->send_flags & IBV_SEND_SOLICITED));
- roce_set_bit(ud_sq_wqe->rsv_opcode, UD_SQ_WQE_BYTE_4_INL_S,
- !!(wr->send_flags & IBV_SEND_INLINE));
+ hr_reg_write_bool(ud_sq_wqe, UDWQE_CQE,
+ !!(wr->send_flags & IBV_SEND_SIGNALED));
+ hr_reg_write_bool(ud_sq_wqe, UDWQE_SE,
+ !!(wr->send_flags & IBV_SEND_SOLICITED));
+ hr_reg_write_bool(ud_sq_wqe, UDWQE_INLINE,
+ !!(wr->send_flags & IBV_SEND_INLINE));
ret = check_ud_opcode(ud_sq_wqe, wr);
if (ret)
@@ -1001,8 +965,7 @@ static int set_ud_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr,
ud_sq_wqe->qkey = htole32(wr->wr.ud.remote_qkey & 0x80000000 ?
qp->qkey : wr->wr.ud.remote_qkey);
- roce_set_field(ud_sq_wqe->rsv_dqpn, UD_SQ_WQE_DQPN_M,
- UD_SQ_WQE_DQPN_S, wr->wr.ud.remote_qpn);
+ hr_reg_write(ud_sq_wqe, UDWQE_DQPN, wr->wr.ud.remote_qpn);
ret = fill_ud_av(ud_sq_wqe, ah);
if (ret)
@@ -1021,8 +984,8 @@ static int set_ud_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr,
if (qp->flags & HNS_ROCE_QP_CAP_OWNER_DB)
udma_to_device_barrier();
- roce_set_bit(ud_sq_wqe->rsv_opcode, UD_SQ_WQE_OWNER_S,
- ~((qp->sq.head + nreq) >> qp->sq.shift));
+ hr_reg_write_bool(wqe, RCWQE_OWNER,
+ !((qp->sq.head + nreq) & BIT(qp->sq.shift)));
return ret;
}
@@ -1045,8 +1008,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
dseg += sizeof(struct hns_roce_rc_sq_wqe);
if (sge_info->total_len <= HNS_ROCE_MAX_RC_INL_INN_SZ) {
- roce_set_bit(rc_sq_wqe->byte_20, RC_SQ_WQE_BYTE_20_INL_TYPE_S,
- 0);
+ hr_reg_clear(rc_sq_wqe, RCWQE_INLINE_TYPE);
for (i = 0; i < wr->num_sge; i++) {
memcpy(dseg, (void *)(uintptr_t)(wr->sg_list[i].addr),
@@ -1054,8 +1016,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
dseg += wr->sg_list[i].length;
}
} else {
- roce_set_bit(rc_sq_wqe->byte_20, RC_SQ_WQE_BYTE_20_INL_TYPE_S,
- 1);
+ hr_reg_enable(rc_sq_wqe, RCWQE_INLINE_TYPE);
ret = fill_ext_sge_inl_data(qp, wr, sge_info);
if (ret)
@@ -1063,9 +1024,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
sge_info->valid_num = sge_info->start_idx - sge_idx;
- roce_set_field(rc_sq_wqe->byte_16, RC_SQ_WQE_BYTE_16_SGE_NUM_M,
- RC_SQ_WQE_BYTE_16_SGE_NUM_S,
- sge_info->valid_num);
+ hr_reg_write(rc_sq_wqe, RCWQE_SGE_NUM, sge_info->valid_num);
}
return 0;
@@ -1074,17 +1033,16 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr,
static void set_bind_mw_seg(struct hns_roce_rc_sq_wqe *wqe,
const struct ibv_send_wr *wr)
{
- roce_set_bit(wqe->byte_4, RC_SQ_WQE_BYTE_4_MW_TYPE_S,
- wr->bind_mw.mw->type - 1);
- roce_set_bit(wqe->byte_4, RC_SQ_WQE_BYTE_4_ATOMIC_S,
- (wr->bind_mw.bind_info.mw_access_flags &
- IBV_ACCESS_REMOTE_ATOMIC) ? 1 : 0);
- roce_set_bit(wqe->byte_4, RC_SQ_WQE_BYTE_4_RDMA_READ_S,
- (wr->bind_mw.bind_info.mw_access_flags &
- IBV_ACCESS_REMOTE_READ) ? 1 : 0);
- roce_set_bit(wqe->byte_4, RC_SQ_WQE_BYTE_4_RDMA_WRITE_S,
- (wr->bind_mw.bind_info.mw_access_flags &
- IBV_ACCESS_REMOTE_WRITE) ? 1 : 0);
+ unsigned int access = wr->bind_mw.bind_info.mw_access_flags;
+
+ hr_reg_write_bool(wqe, RCWQE_MW_TYPE, wr->bind_mw.mw->type - 1);
+ hr_reg_write_bool(wqe, RCWQE_MW_RA_EN,
+ !!(access & IBV_ACCESS_REMOTE_ATOMIC));
+ hr_reg_write_bool(wqe, RCWQE_MW_RR_EN,
+ !!(access & IBV_ACCESS_REMOTE_READ));
+ hr_reg_write_bool(wqe, RCWQE_MW_RW_EN,
+ !!(access & IBV_ACCESS_REMOTE_WRITE));
+
wqe->new_rkey = htole32(wr->bind_mw.rkey);
wqe->byte_16 = htole32(wr->bind_mw.bind_info.length &
HNS_ROCE_ADDRESS_MASK);
@@ -1117,7 +1075,7 @@ static int check_rc_opcode(struct hns_roce_rc_sq_wqe *wqe,
wqe->va = htole64(wr->wr.atomic.remote_addr);
break;
case IBV_WR_LOCAL_INV:
- roce_set_bit(wqe->byte_4, RC_SQ_WQE_BYTE_4_SO_S, 1);
+ hr_reg_enable(wqe, RCWQE_SO);
/* fallthrough */
case IBV_WR_SEND_WITH_INV:
wqe->inv_key = htole32(wr->invalidate_rkey);
@@ -1130,8 +1088,7 @@ static int check_rc_opcode(struct hns_roce_rc_sq_wqe *wqe,
break;
}
- roce_set_field(wqe->byte_4, RC_SQ_WQE_BYTE_4_OPCODE_M,
- RC_SQ_WQE_BYTE_4_OPCODE_S, to_hr_opcode(wr->opcode));
+ hr_reg_write(wqe, RCWQE_OPCODE, to_hr_opcode(wr->opcode));
return ret;
}
@@ -1143,24 +1100,22 @@ static int set_rc_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr,
struct hns_roce_v2_wqe_data_seg *dseg;
int ret;
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_CQE_S,
- !!(wr->send_flags & IBV_SEND_SIGNALED));
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_FENCE_S,
- !!(wr->send_flags & IBV_SEND_FENCE));
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_SE_S,
- !!(wr->send_flags & IBV_SEND_SOLICITED));
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_INLINE_S,
- !!(wr->send_flags & IBV_SEND_INLINE));
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_SO_S, 0);
+ hr_reg_write_bool(wqe, RCWQE_CQE,
+ !!(wr->send_flags & IBV_SEND_SIGNALED));
+ hr_reg_write_bool(wqe, RCWQE_FENCE,
+ !!(wr->send_flags & IBV_SEND_FENCE));
+ hr_reg_write_bool(wqe, RCWQE_SE,
+ !!(wr->send_flags & IBV_SEND_SOLICITED));
+ hr_reg_write_bool(wqe, RCWQE_INLINE,
+ !!(wr->send_flags & IBV_SEND_INLINE));
+ hr_reg_clear(wqe, RCWQE_SO);
ret = check_rc_opcode(rc_sq_wqe, wr);
if (ret)
return ret;
- roce_set_field(rc_sq_wqe->byte_20,
- RC_SQ_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- RC_SQ_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_info->start_idx & (qp->ex_sge.sge_cnt - 1));
+ hr_reg_write(rc_sq_wqe, RCWQE_MSG_START_SGE_IDX,
+ sge_info->start_idx & (qp->ex_sge.sge_cnt - 1));
if (wr->opcode == IBV_WR_BIND_MW)
goto wqe_valid;
@@ -1172,8 +1127,7 @@ static int set_rc_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr,
rc_sq_wqe->msg_len = htole32(sge_info->total_len);
- roce_set_field(rc_sq_wqe->byte_16, RC_SQ_WQE_BYTE_16_SGE_NUM_M,
- RC_SQ_WQE_BYTE_16_SGE_NUM_S, sge_info->valid_num);
+ hr_reg_write(rc_sq_wqe, RCWQE_SGE_NUM, sge_info->valid_num);
if (wr->opcode == IBV_WR_ATOMIC_FETCH_AND_ADD ||
wr->opcode == IBV_WR_ATOMIC_CMP_AND_SWP) {
@@ -1196,8 +1150,8 @@ wqe_valid:
if (qp->flags & HNS_ROCE_QP_CAP_OWNER_DB)
udma_to_device_barrier();
- roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_OWNER_S,
- ~((qp->sq.head + nreq) >> qp->sq.shift));
+ hr_reg_write_bool(wqe, RCWQE_OWNER,
+ !((qp->sq.head + nreq) & BIT(qp->sq.shift)));
return 0;
}
@@ -1243,10 +1197,8 @@ int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
switch (ibvqp->qp_type) {
case IBV_QPT_XRC_SEND:
- roce_set_field(wqe->byte_16,
- RC_SQ_WQE_BYTE_16_XRC_SRQN_M,
- RC_SQ_WQE_BYTE_16_XRC_SRQN_S,
- wr->qp_type.xrc.remote_srqn);
+ hr_reg_write(wqe, RCWQE_XRC_SRQN,
+ wr->qp_type.xrc.remote_srqn);
SWITCH_FALLTHROUGH;
case IBV_QPT_RC:
ret = set_rc_wqe(wqe, qp, wr, nreq, &sge_info);
diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h
index 014cb8c..4330b7d 100644
--- a/providers/hns/hns_roce_u_hw_v2.h
+++ b/providers/hns/hns_roce_u_hw_v2.h
@@ -220,53 +220,44 @@ struct hns_roce_rc_sq_wqe {
__le64 va;
};
-#define RC_SQ_WQE_BYTE_4_OPCODE_S 0
-#define RC_SQ_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
-
-#define RC_SQ_WQE_BYTE_4_DB_SL_L_S 5
-#define RC_SQ_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5)
-
-#define RC_SQ_WQE_BYTE_4_DB_SL_H_S 13
-#define RC_SQ_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13)
-
-#define RC_SQ_WQE_BYTE_4_WQE_INDEX_S 15
-#define RC_SQ_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15)
-
-#define RC_SQ_WQE_BYTE_4_OWNER_S 7
-
-#define RC_SQ_WQE_BYTE_4_CQE_S 8
-
-#define RC_SQ_WQE_BYTE_4_FENCE_S 9
-
-#define RC_SQ_WQE_BYTE_4_SO_S 10
-
-#define RC_SQ_WQE_BYTE_4_SE_S 11
-
-#define RC_SQ_WQE_BYTE_4_INLINE_S 12
-
-#define RC_SQ_WQE_BYTE_4_MW_TYPE_S 14
-
-#define RC_SQ_WQE_BYTE_4_ATOMIC_S 20
-
-#define RC_SQ_WQE_BYTE_4_RDMA_READ_S 21
-
-#define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22
-
-#define RC_SQ_WQE_BYTE_4_FLAG_S 31
-
-#define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0
-#define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \
- (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S)
-
-#define RC_SQ_WQE_BYTE_16_SGE_NUM_S 24
-#define RC_SQ_WQE_BYTE_16_SGE_NUM_M \
- (((1UL << 8) - 1) << RC_SQ_WQE_BYTE_16_SGE_NUM_S)
-
-#define RC_SQ_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
-#define RC_SQ_WQE_BYTE_20_MSG_START_SGE_IDX_M \
- (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_20_MSG_START_SGE_IDX_S)
-
-#define RC_SQ_WQE_BYTE_20_INL_TYPE_S 31
+#define RCWQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_rc_sq_wqe, h, l)
+
+#define RCWQE_OPCODE RCWQE_FIELD_LOC(4, 0)
+#define RCWQE_DB_SL_L RCWQE_FIELD_LOC(6, 5)
+#define RCWQE_SQPN_L RCWQE_FIELD_LOC(6, 5)
+#define RCWQE_OWNER RCWQE_FIELD_LOC(7, 7)
+#define RCWQE_CQE RCWQE_FIELD_LOC(8, 8)
+#define RCWQE_FENCE RCWQE_FIELD_LOC(9, 9)
+#define RCWQE_SO RCWQE_FIELD_LOC(10, 10)
+#define RCWQE_SE RCWQE_FIELD_LOC(11, 11)
+#define RCWQE_INLINE RCWQE_FIELD_LOC(12, 12)
+#define RCWQE_DB_SL_H RCWQE_FIELD_LOC(14, 13)
+#define RCWQE_WQE_IDX RCWQE_FIELD_LOC(30, 15)
+#define RCWQE_SQPN_H RCWQE_FIELD_LOC(30, 13)
+#define RCWQE_FLAG RCWQE_FIELD_LOC(31, 31)
+#define RCWQE_MSG_LEN RCWQE_FIELD_LOC(63, 32)
+#define RCWQE_INV_KEY_IMMTDATA RCWQE_FIELD_LOC(95, 64)
+#define RCWQE_XRC_SRQN RCWQE_FIELD_LOC(119, 96)
+#define RCWQE_SGE_NUM RCWQE_FIELD_LOC(127, 120)
+#define RCWQE_MSG_START_SGE_IDX RCWQE_FIELD_LOC(151, 128)
+#define RCWQE_REDUCE_CODE RCWQE_FIELD_LOC(158, 152)
+#define RCWQE_INLINE_TYPE RCWQE_FIELD_LOC(159, 159)
+#define RCWQE_RKEY RCWQE_FIELD_LOC(191, 160)
+#define RCWQE_VA_L RCWQE_FIELD_LOC(223, 192)
+#define RCWQE_VA_H RCWQE_FIELD_LOC(255, 224)
+#define RCWQE_LEN0 RCWQE_FIELD_LOC(287, 256)
+#define RCWQE_LKEY0 RCWQE_FIELD_LOC(319, 288)
+#define RCWQE_VA0_L RCWQE_FIELD_LOC(351, 320)
+#define RCWQE_VA0_H RCWQE_FIELD_LOC(383, 352)
+#define RCWQE_LEN1 RCWQE_FIELD_LOC(415, 384)
+#define RCWQE_LKEY1 RCWQE_FIELD_LOC(447, 416)
+#define RCWQE_VA1_L RCWQE_FIELD_LOC(479, 448)
+#define RCWQE_VA1_H RCWQE_FIELD_LOC(511, 480)
+
+#define RCWQE_MW_TYPE RCWQE_FIELD_LOC(256, 256)
+#define RCWQE_MW_RA_EN RCWQE_FIELD_LOC(258, 258)
+#define RCWQE_MW_RR_EN RCWQE_FIELD_LOC(259, 259)
+#define RCWQE_MW_RW_EN RCWQE_FIELD_LOC(260, 260)
struct hns_roce_v2_wqe_data_seg {
__le32 len;
@@ -323,60 +314,51 @@ struct hns_roce_ud_sq_wqe {
uint8_t dgid[HNS_ROCE_GID_SIZE];
};
-#define UD_SQ_WQE_OPCODE_S 0
-#define UD_SQ_WQE_OPCODE_M GENMASK(4, 0)
-
-#define UD_SQ_WQE_OWNER_S 7
-
-#define UD_SQ_WQE_CQE_S 8
-
-#define UD_SQ_WQE_SE_S 11
-
-#define UD_SQ_WQE_PD_S 0
-#define UD_SQ_WQE_PD_M GENMASK(23, 0)
-
-#define UD_SQ_WQE_SGE_NUM_S 24
-#define UD_SQ_WQE_SGE_NUM_M GENMASK(31, 24)
-
-#define UD_SQ_WQE_MSG_START_SGE_IDX_S 0
-#define UD_SQ_WQE_MSG_START_SGE_IDX_M GENMASK(23, 0)
-
-#define UD_SQ_WQE_UDP_SPN_S 16
-#define UD_SQ_WQE_UDP_SPN_M GENMASK(31, 16)
-
-#define UD_SQ_WQE_DQPN_S 0
-#define UD_SQ_WQE_DQPN_M GENMASK(23, 0)
-
-#define UD_SQ_WQE_VLAN_S 0
-#define UD_SQ_WQE_VLAN_M GENMASK(15, 0)
-
-#define UD_SQ_WQE_HOPLIMIT_S 16
-#define UD_SQ_WQE_HOPLIMIT_M GENMASK(23, 16)
-
-#define UD_SQ_WQE_TCLASS_S 24
-#define UD_SQ_WQE_TCLASS_M GENMASK(31, 24)
-
-#define UD_SQ_WQE_FLOW_LABEL_S 0
-#define UD_SQ_WQE_FLOW_LABEL_M GENMASK(19, 0)
-
-#define UD_SQ_WQE_SL_S 20
-#define UD_SQ_WQE_SL_M GENMASK(23, 20)
-
-#define UD_SQ_WQE_VLAN_EN_S 30
-
-#define UD_SQ_WQE_LBI_S 31
-
-#define UD_SQ_WQE_BYTE_4_INL_S 12
-#define UD_SQ_WQE_BYTE_20_INL_TYPE_S 31
-
-#define UD_SQ_WQE_BYTE_8_INL_DATE_15_0_S 16
-#define UD_SQ_WQE_BYTE_8_INL_DATE_15_0_M GENMASK(31, 16)
-#define UD_SQ_WQE_BYTE_16_INL_DATA_23_16_S 24
-#define UD_SQ_WQE_BYTE_16_INL_DATA_23_16_M GENMASK(31, 24)
-#define UD_SQ_WQE_BYTE_20_INL_DATA_47_24_S 0
-#define UD_SQ_WQE_BYTE_20_INL_DATA_47_24_M GENMASK(23, 0)
-#define UD_SQ_WQE_BYTE_24_INL_DATA_63_48_S 0
-#define UD_SQ_WQE_BYTE_24_INL_DATA_63_48_M GENMASK(15, 0)
+#define UDWQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ud_sq_wqe, h, l)
+
+#define UDWQE_OPCODE UDWQE_FIELD_LOC(4, 0)
+#define UDWQE_DB_SL_L UDWQE_FIELD_LOC(6, 5)
+#define UDWQE_OWNER UDWQE_FIELD_LOC(7, 7)
+#define UDWQE_CQE UDWQE_FIELD_LOC(8, 8)
+#define UDWQE_RSVD1 UDWQE_FIELD_LOC(10, 9)
+#define UDWQE_SE UDWQE_FIELD_LOC(11, 11)
+#define UDWQE_INLINE UDWQE_FIELD_LOC(12, 12)
+#define UDWQE_DB_SL_H UDWQE_FIELD_LOC(14, 13)
+#define UDWQE_WQE_IDX UDWQE_FIELD_LOC(30, 15)
+#define UDWQE_FLAG UDWQE_FIELD_LOC(31, 31)
+#define UDWQE_MSG_LEN UDWQE_FIELD_LOC(63, 32)
+#define UDWQE_IMMTDATA UDWQE_FIELD_LOC(95, 64)
+#define UDWQE_PD UDWQE_FIELD_LOC(119, 96)
+#define UDWQE_SGE_NUM UDWQE_FIELD_LOC(127, 120)
+#define UDWQE_MSG_START_SGE_IDX UDWQE_FIELD_LOC(151, 128)
+#define UDWQE_RSVD3 UDWQE_FIELD_LOC(158, 152)
+#define UDWQE_INLINE_TYPE UDWQE_FIELD_LOC(159, 159)
+#define UDWQE_RSVD4 UDWQE_FIELD_LOC(175, 160)
+#define UDWQE_UDPSPN UDWQE_FIELD_LOC(191, 176)
+#define UDWQE_QKEY UDWQE_FIELD_LOC(223, 192)
+#define UDWQE_DQPN UDWQE_FIELD_LOC(247, 224)
+#define UDWQE_RSVD5 UDWQE_FIELD_LOC(255, 248)
+#define UDWQE_VLAN UDWQE_FIELD_LOC(271, 256)
+#define UDWQE_HOPLIMIT UDWQE_FIELD_LOC(279, 272)
+#define UDWQE_TCLASS UDWQE_FIELD_LOC(287, 280)
+#define UDWQE_FLOW_LABEL UDWQE_FIELD_LOC(307, 288)
+#define UDWQE_SL UDWQE_FIELD_LOC(311, 308)
+#define UDWQE_PORTN UDWQE_FIELD_LOC(314, 312)
+#define UDWQE_RSVD6 UDWQE_FIELD_LOC(317, 315)
+#define UDWQE_UD_VLAN_EN UDWQE_FIELD_LOC(318, 318)
+#define UDWQE_LBI UDWQE_FIELD_LOC(319, 319)
+#define UDWQE_DMAC_L UDWQE_FIELD_LOC(351, 320)
+#define UDWQE_DMAC_H UDWQE_FIELD_LOC(367, 352)
+#define UDWQE_GMV_IDX UDWQE_FIELD_LOC(383, 368)
+#define UDWQE_DGID0 UDWQE_FIELD_LOC(415, 384)
+#define UDWQE_DGID1 UDWQE_FIELD_LOC(447, 416)
+#define UDWQE_DGID2 UDWQE_FIELD_LOC(479, 448)
+#define UDWQE_DGID3 UDWQE_FIELD_LOC(511, 480)
+
+#define UDWQE_INLINE_DATA_15_0 UDWQE_FIELD_LOC(63, 48)
+#define UDWQE_INLINE_DATA_23_16 UDWQE_FIELD_LOC(127, 120)
+#define UDWQE_INLINE_DATA_47_24 UDWQE_FIELD_LOC(151, 128)
+#define UDWQE_INLINE_DATA_63_48 UDWQE_FIELD_LOC(175, 160)
#define MAX_SERVICE_LEVEL 0x7
--
2.27.0