diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 12:31:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 12:31:59 -0700 |
commit | 5518b69b76680a4f2df96b1deca260059db0c2de (patch) | |
tree | f33cd1519c8efb4590500f2f9617400be233238c /drivers/infiniband/hw | |
parent | 8ad06e56dcbc1984ef0ff8f6e3c19982c5809f73 (diff) | |
parent | 0e72582270c07850b92cac351c8b97d4f9c123b9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Reasonably busy this cycle, but perhaps not as busy as in the 4.12
merge window:
1) Several optimizations for UDP processing under high load from
Paolo Abeni.
2) Support pacing internally in TCP when using the sch_fq packet
scheduler for this is not practical. From Eric Dumazet.
3) Support mutliple filter chains per qdisc, from Jiri Pirko.
4) Move to 1ms TCP timestamp clock, from Eric Dumazet.
5) Add batch dequeueing to vhost_net, from Jason Wang.
6) Flesh out more completely SCTP checksum offload support, from
Davide Caratti.
7) More plumbing of extended netlink ACKs, from David Ahern, Pablo
Neira Ayuso, and Matthias Schiffer.
8) Add devlink support to nfp driver, from Simon Horman.
9) Add RTM_F_FIB_MATCH flag to RTM_GETROUTE queries, from Roopa
Prabhu.
10) Add stack depth tracking to BPF verifier and use this information
in the various eBPF JITs. From Alexei Starovoitov.
11) Support XDP on qed device VFs, from Yuval Mintz.
12) Introduce BPF PROG ID for better introspection of installed BPF
programs. From Martin KaFai Lau.
13) Add bpf_set_hash helper for TC bpf programs, from Daniel Borkmann.
14) For loads, allow narrower accesses in bpf verifier checking, from
Yonghong Song.
15) Support MIPS in the BPF selftests and samples infrastructure, the
MIPS eBPF JIT will be merged in via the MIPS GIT tree. From David
Daney.
16) Support kernel based TLS, from Dave Watson and others.
17) Remove completely DST garbage collection, from Wei Wang.
18) Allow installing TCP MD5 rules using prefixes, from Ivan
Delalande.
19) Add XDP support to Intel i40e driver, from Björn Töpel
20) Add support for TC flower offload in nfp driver, from Simon
Horman, Pieter Jansen van Vuuren, Benjamin LaHaise, Jakub
Kicinski, and Bert van Leeuwen.
21) IPSEC offloading support in mlx5, from Ilan Tayari.
22) Add HW PTP support to macb driver, from Rafal Ozieblo.
23) Networking refcount_t conversions, From Elena Reshetova.
24) Add sock_ops support to BPF, from Lawrence Brako. This is useful
for tuning the TCP sockopt settings of a group of applications,
currently via CGROUPs"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1899 commits)
net: phy: dp83867: add workaround for incorrect RX_CTRL pin strap
dt-bindings: phy: dp83867: provide a workaround for incorrect RX_CTRL pin strap
cxgb4: Support for get_ts_info ethtool method
cxgb4: Add PTP Hardware Clock (PHC) support
cxgb4: time stamping interface for PTP
nfp: default to chained metadata prepend format
nfp: remove legacy MAC address lookup
nfp: improve order of interfaces in breakout mode
net: macb: remove extraneous return when MACB_EXT_DESC is defined
bpf: add missing break in for the TCP_BPF_SNDCWND_CLAMP case
bpf: fix return in load_bpf_file
mpls: fix rtm policy in mpls_getroute
net, ax25: convert ax25_cb.refcount from atomic_t to refcount_t
net, ax25: convert ax25_route.refcount from atomic_t to refcount_t
net, ax25: convert ax25_uid_assoc.refcount from atomic_t to refcount_t
net, sctp: convert sctp_ep_common.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_transport.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_chunk.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_datamsg.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_auth_bytes.refcnt from atomic_t to refcount_t
...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_hal.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mad.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 132 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_cm.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr_cm.c | 240 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 6 |
19 files changed, 337 insertions, 237 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 558d6a03375d..3eff6541bd6f 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -142,8 +142,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) pr_debug("%s alloc_skb failed\n", __func__); return -ENOMEM; } - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7, T3_SOPEOP); @@ -561,8 +560,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) | V_EC_TYPE(0) | V_EC_GEN(1) | V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32; - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, T3_CTL_QP_TID, 7, T3_SOPEOP); wqe->flags = cpu_to_be32(MODQP_WRITE_EC); @@ -837,7 +835,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) if (!skb) return -ENOMEM; pr_debug("%s rdev_p %p\n", __func__, rdev_p); - wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index b61630eba912..86975370a4c0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -175,7 +175,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; - req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; @@ -190,7 +190,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -211,7 +211,7 @@ int iwch_resume_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -398,7 +398,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); - req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); @@ -417,8 +417,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, abort_arp_failure); - req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); @@ -456,7 +455,7 @@ static int send_connect(struct iwch_ep *ep) skb->priority = CPL_PRIORITY_SETUP; set_arp_failure_handler(skb, act_open_req_arp_failure); - req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); req->local_port = ep->com.local_addr.sin_port; @@ -514,7 +513,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -547,7 +546,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) return -ENOMEM; } skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; @@ -565,7 +564,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(mpalen); @@ -597,7 +596,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) } skb->priority = CPL_PRIORITY_DATA; skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | @@ -616,7 +615,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -801,7 +800,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) return 0; } - req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); @@ -1206,7 +1205,7 @@ static int listen_start(struct iwch_listen_ep *ep) return -ENOMEM; } - req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); req->local_port = ep->com.local_addr.sin_port; @@ -1247,7 +1246,7 @@ static int listen_stop(struct iwch_listen_ep *ep) pr_err("%s - failed to alloc skb\n", __func__); return -ENOMEM; } - req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); @@ -1615,7 +1614,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto out; } rpl_skb->priority = CPL_PRIORITY_DATA; - rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); + rpl = skb_put(rpl_skb, sizeof(*rpl)); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ba6d5d281b03..7f633da0185d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -670,8 +670,7 @@ int iwch_post_zb_read(struct iwch_ep *ep) pr_err("%s cannot send zb_read!!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr)); - memset(wqe, 0, sizeof(struct t3_rdma_read_wr)); + wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr)); wqe->read.rdmaop = T3_READ_REQ; wqe->read.reserved[0] = 0; wqe->read.reserved[1] = 0; @@ -702,8 +701,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) pr_err("%s cannot send TERMINATE!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, 40); - memset(wqe, 0, 40); + wqe = skb_put_zero(skb, 40); wqe->send.rdmaop = T3_TERMINATE; /* immediate data length */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0910faf3587b..e49b34c3b136 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -398,7 +398,8 @@ void _c4iw_free_ep(struct kref *kref) (const u32 *)&sin6->sin6_addr.s6_addr, 1); } - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); if (ep->mpa_skb) @@ -596,7 +597,7 @@ static int send_flowc(struct c4iw_ep *ep) else nparams = 9; - flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); + flowc = __skb_put(skb, FLOWC_LEN); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); @@ -786,18 +787,16 @@ static int send_connect(struct c4iw_ep *ep) if (ep->com.remote_addr.ss_family == AF_INET) { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req = (struct cpl_act_open_req *)skb_put(skb, wrlen); + req = skb_put(skb, wrlen); INIT_TP_WR(req, 0); break; case CHELSIO_T5: - t5req = (struct cpl_t5_act_open_req *)skb_put(skb, - wrlen); + t5req = skb_put(skb, wrlen); INIT_TP_WR(t5req, 0); req = (struct cpl_act_open_req *)t5req; break; case CHELSIO_T6: - t6req = (struct cpl_t6_act_open_req *)skb_put(skb, - wrlen); + t6req = skb_put(skb, wrlen); INIT_TP_WR(t6req, 0); req = (struct cpl_act_open_req *)t6req; t5req = (struct cpl_t5_act_open_req *)t6req; @@ -838,18 +837,16 @@ static int send_connect(struct c4iw_ep *ep) } else { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); + req6 = skb_put(skb, wrlen); INIT_TP_WR(req6, 0); break; case CHELSIO_T5: - t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, - wrlen); + t5req6 = skb_put(skb, wrlen); INIT_TP_WR(t5req6, 0); req6 = (struct cpl_act_open_req6 *)t5req6; break; case CHELSIO_T6: - t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, - wrlen); + t6req6 = skb_put(skb, wrlen); INIT_TP_WR(t6req6, 0); req6 = (struct cpl_act_open_req6 *)t6req6; t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; @@ -926,8 +923,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1033,8 +1029,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1114,8 +1109,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1199,7 +1193,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) /* setup the hwtid for this connection */ ep->hwtid = tid; - cxgb4_insert_tid(t, ep, tid); + cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); insert_ep_tid(ep); ep->snd_seq = be32_to_cpu(req->snd_isn); @@ -1906,8 +1900,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); - req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( @@ -2304,7 +2297,8 @@ fail: (const u32 *)&sin6->sin6_addr.s6_addr, 1); } if (status && act_open_has_tid(status)) - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), + ep->com.local_addr.ss_family); remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); cxgb4_free_atid(t, atid); @@ -2581,7 +2575,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); - cxgb4_insert_tid(t, child_ep, hwtid); + cxgb4_insert_tid(t, child_ep, hwtid, + child_ep->com.local_addr.ss_family); insert_ep_tid(child_ep); if (accept_cr(child_ep, skb, req)) { c4iw_put_ep(&parent_ep->com); @@ -2849,7 +2844,8 @@ out: 1); } remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); @@ -3752,9 +3748,9 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); - tcp_parse_options(skb, &tmp_opt, 0, NULL); + tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); - req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); + req = __skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); req->l2info = cpu_to_be16(SYN_INTF_V(intf) | SYN_MAC_IDX_V(RX_MACIDX_G( @@ -3806,8 +3802,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); if (!req_skb) return; - req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(req_skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 14de5bde1b63..e16fcaf6b5a3 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -44,8 +44,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, wr_len = sizeof *res_wr + sizeof *res; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | @@ -114,8 +113,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ee7f43e419a..5332f06b99ba 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -81,8 +81,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | (wait ? FW_WR_COMPL_F : 0)); @@ -142,8 +141,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 8e4154b4253e..bfc77596acbe 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -293,8 +293,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(2) | @@ -1228,7 +1227,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( @@ -1350,7 +1349,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | @@ -1419,7 +1418,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 521d0def2d9e..75b2f7d4cd95 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -61,8 +61,7 @@ #include <rdma/mlx4-abi.h> #define DRV_NAME MLX4_IB_DRV_NAME -#define DRV_VERSION "2.2-1" -#define DRV_RELDATE "Feb 2014" +#define DRV_VERSION "4.0-0" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF @@ -79,7 +78,7 @@ MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_ass static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" - DRV_VERSION " (" DRV_RELDATE ")\n"; + DRV_VERSION "\n"; static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 94c049b62c2f..a384d72ea3cd 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_db; @@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_buf; @@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto ex_resize; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f1b56de64871..95db929bdc34 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters_ext *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; @@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9ecc089d4529..dc2f59e33971 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -60,8 +60,7 @@ #include "cmd.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "2.2-1" -#define DRIVER_RELDATE "Feb 2014" +#define DRIVER_VERSION "5.0-0" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); @@ -70,7 +69,7 @@ MODULE_VERSION(DRIVER_VERSION); static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" - DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + DRIVER_VERSION "\n"; enum { MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, @@ -224,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, - struct ib_port_attr *props) +static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, + struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_core_dev *mdev = dev->mdev; @@ -233,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, enum ib_mtu ndev_ib_mtu; u16 qkey_viol_cntr; u32 eth_prot_oper; + int err; /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - if (mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num)) - return; + err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num); + if (err) + return err; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); @@ -259,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ndev = mlx5_ib_get_netdev(device, port_num); if (!ndev) - return; + return 0; if (mlx5_lag_is_active(dev->mdev)) { rcu_read_lock(); @@ -282,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, dev_put(ndev); props->active_mtu = min(props->max_mtu, ndev_ib_mtu); + return 0; } -static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, - const struct ib_gid_attr *attr, - void *mlx5_addr) +static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr) { -#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) - char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_l3_address); - void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_mac_47_32); - - if (!gid) - return; + enum ib_gid_type gid_type = IB_GID_TYPE_IB; + u8 roce_version = 0; + u8 roce_l3_type = 0; + bool vlan = false; + u8 mac[ETH_ALEN]; + u16 vlan_id = 0; - ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr); + if (gid) { + gid_type = attr->gid_type; + ether_addr_copy(mac, attr->ndev->dev_addr); - if (is_vlan_dev(attr->ndev)) { - MLX5_SET_RA(mlx5_addr, vlan_valid, 1); - MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); + if (is_vlan_dev(attr->ndev)) { + vlan = true; + vlan_id = vlan_dev_vlan_id(attr->ndev); + } } - switch (attr->gid_type) { + switch (gid_type) { case IB_GID_TYPE_IB: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); + roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); + roce_version = MLX5_ROCE_VERSION_2; + if (ipv6_addr_v4mapped((void *)gid)) + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; + else + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; break; default: - WARN_ON(true); + mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); } - if (attr->gid_type != IB_GID_TYPE_IB) { - if (ipv6_addr_v4mapped((void *)gid)) - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV4); - else - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV6); - } - - if ((attr->gid_type == IB_GID_TYPE_IB) || - !ipv6_addr_v4mapped((void *)gid)) - memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); - else - memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); -} - -static int set_roce_addr(struct ib_device *device, u8 port_num, - unsigned int index, - const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; - void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); - enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); - - if (ll != IB_LINK_LAYER_ETHERNET) - return -EINVAL; - - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); - - MLX5_SET(set_roce_address_in, in, roce_address_index, index); - MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, + roce_l3_type, gid->raw, mac, vlan, + vlan_id); } static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, @@ -358,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(device, port_num, index, gid, attr); + return set_roce_addr(to_mdev(device), port_num, index, gid, attr); } static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, unsigned int index, __always_unused void **context) { - return set_roce_addr(device, port_num, index, NULL, NULL); + return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL); } __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, @@ -440,7 +415,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); u8 atomic_req_8B_endianness_mode = - MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); + MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond @@ -979,20 +954,31 @@ out: int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { + unsigned int count; + int ret; + switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: - return mlx5_query_mad_ifc_port(ibdev, port, props); + ret = mlx5_query_mad_ifc_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_HCA: - return mlx5_query_hca_port(ibdev, port, props); + ret = mlx5_query_hca_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_NIC: - mlx5_query_port_roce(ibdev, port, props); - return 0; + ret = mlx5_query_port_roce(ibdev, port, props); + break; default: - return -EINVAL; + ret = -EINVAL; + } + + if (!ret && props) { + count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev); + props->gid_tbl_len -= count; } + return ret; } static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, @@ -2263,7 +2249,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler || !spec) { err = -ENOMEM; @@ -3468,7 +3454,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev, __be32 val; int ret, i; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3497,7 +3483,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, int ret, i; int offset = port->cnts.num_q_counters; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..763bb5b36144 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1110,7 +1110,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*pas) * ((npages + 1) / 2) * 2; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ebb6768684de..0889ff367c86 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_umem; @@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_buf; @@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, return err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_umem; @@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, u32 rq_pas_size = get_rq_pas_size(qpc); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1372,7 +1372,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1633,7 +1633,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2164,7 +2164,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2189,7 +2189,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2434,7 +2434,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2479,7 +2479,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4281,7 +4281,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4308,7 +4308,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_rq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4612,7 +4612,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4842,7 +4842,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, return ERR_PTR(-ENOMEM); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err; @@ -4921,7 +4921,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, return -EOPNOTSUPP; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7cb145f9a6db..43707b101f47 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_umem; } - in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); + in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_umem; @@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); - in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages); + in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_buf; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 30b256a2c54e..de4025deaa4a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (type == NES_TIMER_TYPE_SEND) { new_send->seq_num = ntohl(tcp_hdr(skb)->seq); - atomic_inc(&new_send->skb->users); + refcount_inc(&new_send->skb->users); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); cm_node->send_entry = new_send; add_ref_cm_node(cm_node); @@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass) flags); break; } - atomic_inc(&send_entry->skb->users); + refcount_inc(&send_entry->skb->users); cm_packets_retrans++; nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " "for node %p, jiffies = %lu, time to send = " diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 6a72095d6c7a..b5851fd67d4f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -37,7 +37,7 @@ #include <linux/iommu.h> #include <linux/pci.h> #include <net/addrconf.h> -#include <linux/qed/qede_roce.h> + #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qedr.h" @@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), - &cnq->pbl); + &cnq->pbl, NULL); if (rc) goto err4; @@ -886,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev) memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ - rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, - dev->gsi_ll2_mac_address, - dev->ndev->dev_addr); + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); @@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) * initialization done before RoCE driver notifies * event to stack. */ -static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: @@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = { static int __init qedr_init_module(void) { - return qede_roce_register_driver(&qedr_drv); + return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { - qede_roce_unregister_driver(&qedr_drv); + qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index d961f79b317c..ab7784bfdac6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -36,8 +36,8 @@ #include <rdma/ib_addr.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_chain.h> -#include <linux/qed/qed_roce_if.h> -#include <linux/qed/qede_roce.h> +#include <linux/qed/qed_rdma_if.h> +#include <linux/qed/qede_rdma.h> #include <linux/qed/roce_common.h> #include "qedr_hsi_rdma.h" @@ -153,6 +153,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; + u8 gsi_ll2_handle; + uint wq_multiplier; u8 gsi_ll2_mac_address[ETH_ALEN]; int gsi_qp_created; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index d86dbe814d98..4689e802b332 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -44,7 +44,7 @@ #include <rdma/ib_cache.h> #include <linux/qed/qed_if.h> -#include <linux/qed/qed_roce_if.h> +#include <linux/qed/qed_rdma_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> @@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, dev->gsi_qp = qp; } -void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +void qedr_ll2_complete_tx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) { - struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; + struct qed_roce_ll2_packet *pkt = cookie; struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; @@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } -void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params) +void qedr_ll2_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { - struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); - qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; - qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; - qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; - ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? + -EINVAL : 0; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; + /* note: length stands for data length i.e. GRH is excluded */ + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = + data->length.data_length; + *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) = + ntohl(data->opaque_data_0); + *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) = + ntohs((u16)data->opaque_data_1); qedr_inc_sw_gsi_cons(&qp->rq); @@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } +void qedr_ll2_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, bool b_last_packet) +{ + /* Do nothing... */ +} + static void qedr_destroy_gsi_cq(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { @@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, return 0; } +static int qedr_ll2_post_tx(struct qedr_dev *dev, + struct qed_roce_ll2_packet *pkt) +{ + enum qed_ll2_roce_flavor_type roce_flavor; + struct qed_ll2_tx_pkt_info ll2_tx_pkt; + int rc; + int i; + + memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt)); + + roce_flavor = (pkt->roce_mode == ROCE_V1) ? + QED_LL2_ROCE : QED_LL2_RROCE; + + if (pkt->roce_mode == ROCE_V2_IPV4) + ll2_tx_pkt.enable_ip_cksum = 1; + + ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg; + ll2_tx_pkt.vlan = 0; + ll2_tx_pkt.tx_dest = pkt->tx_dest; + ll2_tx_pkt.qed_roce_flavor = roce_flavor; + ll2_tx_pkt.first_frag = pkt->header.baddr; + ll2_tx_pkt.first_frag_len = pkt->header.len; + ll2_tx_pkt.cookie = pkt; + + /* tx header */ + rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx, + dev->gsi_ll2_handle, + &ll2_tx_pkt, 1); + if (rc) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + + DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc); + return rc; + } + + /* tx payload */ + for (i = 0; i < pkt->n_seg; i++) { + rc = dev->ops->ll2_set_fragment_of_tx_packet( + dev->rdma_ctx, + dev->gsi_ll2_handle, + pkt->payload[i].baddr, + pkt->payload[i].len); + + if (rc) { + /* if failed not much to do here, partial packet has + * been posted we can't free memory, will need to wait + * for completion + */ + DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc); + return rc; + } + } + + return 0; +} + +int qedr_ll2_stop(struct qedr_dev *dev) +{ + int rc; + + if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE) + return 0; + + /* remove LL2 MAC address filter */ + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, NULL); + + rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) + DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc); + + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE; + + return rc; +} + +int qedr_ll2_start(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, struct qedr_qp *qp) +{ + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + int rc; + + /* configure and start LL2 */ + cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; + cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; + cbs.rx_release_cb = qedr_ll2_release_rx_packet; + cbs.tx_release_cb = qedr_ll2_complete_tx_packet; + cbs.cookie = dev; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_ROCE; + data.input.mtu = dev->ndev->mtu; + data.input.rx_num_desc = attrs->cap.max_recv_wr; + data.input.rx_drop_ttl0_flg = true; + data.input.rx_vlan_removal_en = false; + data.input.tx_num_desc = attrs->cap.max_send_wr; + data.input.tx_tc = 0; + data.input.tx_dest = QED_LL2_TX_DEST_NW; + data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET; + data.input.ai_err_no_buf = QED_LL2_DROP_PACKET; + data.input.gsi_enable = 1; + data.p_connection_handle = &dev->gsi_ll2_handle; + data.cbs = &cbs; + + rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to acquire LL2 connection (rc=%d)\n", + rc); + return rc; + } + + rc = dev->ops->ll2_establish_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to establish LL2 connection (rc=%d)\n", + rc); + goto err1; + } + + rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr); + if (rc) + goto err2; + + return 0; + +err2: + dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); +err1: + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + return rc; +} + struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { - struct qed_roce_ll2_params ll2_params; int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return ERR_PTR(rc); - /* configure and start LL2 */ - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.max_tx_buffers = attrs->cap.max_send_wr; - ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; - ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; - ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; - ll2_params.cb_cookie = (void *)dev; - ll2_params.mtu = dev->ndev->mtu; - ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); - rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return ERR_PTR(rc); @@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, err: kfree(qp->rqe_wr_id); - rc = dev->ops->roce_ll2_stop(dev->cdev); + rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); @@ -223,15 +374,7 @@ err: int qedr_destroy_gsi_qp(struct qedr_dev *dev) { - int rc; - - rc = dev->ops->roce_ll2_stop(dev->cdev); - if (rc) - DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); - else - DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); - - return rc; + return qedr_ll2_stop(dev); } #define QEDR_MAX_UD_HEADER_SIZE (100) @@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_tx_params params; struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; @@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - memset(¶ms, 0, sizeof(params)); - spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); @@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + rc = qedr_ll2_post_tx(dev, pkt); + if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); @@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); } else { - if (rc == QED_ROCE_TX_HEAD_FAILURE) { - /* TX failed while posting header - release resources */ - dma_free_coherent(&dev->pdev->dev, pkt->header.len, - pkt->header.vaddr, pkt->header.baddr); - kfree(pkt); - } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { - /* NTD since TX failed while posting a fragment. We will - * release the resources on TX callback - */ - } - DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; @@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_buffer buf; unsigned long flags; - int status = 0; - int rc; + int rc = 0; if ((qp->state != QED_ROCE_QP_STATE_RTR) && (qp->state != QED_ROCE_QP_STATE_RTS)) { @@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, return -EINVAL; } - memset(&buf, 0, sizeof(buf)); - spin_lock_irqsave(&qp->q_lock, flags); while (wr) { @@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto err; } - buf.baddr = wr->sg_list[0].addr; - buf.len = wr->sg_list[0].length; - - rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx, + dev->gsi_ll2_handle, + wr->sg_list[0].addr, + wr->sg_list[0].length, + 0 /* cookie */, + 1 /* notify_fw */); if (rc) { DP_ERR(dev, "gsi post recv: failed to post rx buffer (rc=%d)\n", @@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&qp->q_lock, flags); - return status; + return rc; err: spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index d6723c365c7f..548e4d1e998f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -935,7 +935,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, QED_CHAIN_CNT_TYPE_U32, chain_entries, sizeof(union rdma_cqe), - &cq->pbl); + &cq->pbl, NULL); if (rc) goto err1; @@ -1423,7 +1423,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_sq_elems, QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl); + &qp->sq.pbl, NULL); if (rc) return rc; @@ -1437,7 +1437,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_rq_elems, QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl); + &qp->rq.pbl, NULL); if (rc) return rc; |