summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c104
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c20
2 files changed, 62 insertions, 62 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a07d8e124a80..83fa16fa4644 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->local_ip = la->sin_addr.s_addr;
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
- t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
- t5_req6->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
* due to the limit in the number of bits in the RCV_BUFSIZ field,
* then add the overage in to the credits returned.
*/
- if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
- credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
+ if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
+ credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+ req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win));
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
- RX_CHANNEL(0) |
+ RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32) SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
- req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
- req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
+ req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
+ req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos >> 2) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack && req->tcpopt.sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
@@ -3557,7 +3557,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* We store the qid in opt2 which will be used by the firmware
* to send us the wr response.
*/
- req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+ req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
/*
* We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TCB picks up the correct value. If this was 0
* TP will ignore any value > 0 for MSS index.
*/
- req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 9335148c1ad9..0744455cd88b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -78,14 +78,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
(wait ? FW_WR_COMPL_F : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
- sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@@ -107,12 +107,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
- __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
- cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
else
- cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
+ cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -144,14 +144,14 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cmd;
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
sc = (struct ulptx_idata *)(req + 1);
- sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
to_dp = (u8 *)(sc + 1);