diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-19 12:29:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-19 12:29:23 -0700 |
commit | 2eb7f910c158fd675ab33aff67904512779996e8 (patch) | |
tree | cc5b453afad998da001ee3611f358b7bdc218981 /drivers/infiniband/hw | |
parent | 1f6075f99073a8b5ec9649ae8c0bf2e06fdd42f1 (diff) | |
parent | 7b909bb49ac204bfd2e628707db37beb490dbc5c (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/RDMA updates from Roland Dreier:
- large set of iSER initiator improvements
- hardware driver fixes for cxgb4, mlx5 and ocrdma
- small fixes to core midlayer
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (47 commits)
RDMA/cxgb4: Fix ntuple calculation for ipv6 and remove duplicate line
RDMA/cxgb4: Add missing neigh_release in find_route
RDMA/cxgb4: Take IPv6 into account for best_mtu and set_emss
RDMA/cxgb4: Make c4iw_wr_log_size_order static
IB/core: Fix XRC race condition in ib_uverbs_open_qp
IB/core: Clear AH attr variable to prevent garbage data
RDMA/ocrdma: Save the bit environment, spare unncessary parenthesis
RDMA/ocrdma: The kernel has a perfectly good BIT() macro - use it
RDMA/ocrdma: Don't memset() buffers we just allocated with kzalloc()
RDMA/ocrdma: Remove a unused-label warning
RDMA/ocrdma: Convert kernel VA to PA for mmap in user
RDMA/ocrdma: Get vlan tag from ib_qp_attrs
RDMA/ocrdma: Add default GID at index 0
IB/mlx5, iser, isert: Add Signature API additions
Target/iser: Centralize ib_sig_domain setting
IB/iser: Centralize ib_sig_domain settings
IB/mlx5: Use extended internal signature layout
IB/iser: Set IP_CSUM as default guard type
IB/iser: Remove redundant assignment
IB/mlx5: Use enumerations for PI copy mask
...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mem.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 149 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_main.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 238 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 10 |
10 files changed, 242 insertions, 258 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index c2fb71c182a8..fb61f6685809 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -236,10 +236,12 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) static void set_emss(struct c4iw_ep *ep, u16 opt) { ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - - sizeof(struct iphdr) - sizeof(struct tcphdr); + ((AF_INET == ep->com.remote_addr.ss_family) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - + sizeof(struct tcphdr); ep->mss = ep->emss; if (GET_TCPOPT_TSTAMP(opt)) - ep->emss -= 12; + ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); if (ep->emss < 128) ep->emss = 128; if (ep->emss & 7) @@ -415,6 +417,7 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, return NULL; if (!our_interface(dev, n->dev) && !(n->dev->flags & IFF_LOOPBACK)) { + neigh_release(n); dst_release(&rt->dst); return NULL; } @@ -581,11 +584,14 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, } static void best_mtu(const unsigned short *mtus, unsigned short mtu, - unsigned int *idx, int use_ts) + unsigned int *idx, int use_ts, int ipv6) { - unsigned short hdr_size = sizeof(struct iphdr) + + unsigned short hdr_size = (ipv6 ? + sizeof(struct ipv6hdr) : + sizeof(struct iphdr)) + sizeof(struct tcphdr) + - (use_ts ? 12 : 0); + (use_ts ? + round_up(TCPOLEN_TIMESTAMP, 4) : 0); unsigned short data_size = mtu - hdr_size; cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); @@ -634,7 +640,8 @@ static int send_connect(struct c4iw_ep *ep) set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps); + enable_tcp_timestamps, + (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); wscale = compute_wscale(rcv_win); /* @@ -668,6 +675,7 @@ static int send_connect(struct c4iw_ep *ep) if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { opt2 |= T5_OPT_2_VALID; opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ } t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); @@ -713,8 +721,6 @@ static int send_connect(struct c4iw_ep *ep) } else { u32 isn = (prandom_u32() & ~7UL) - 1; - opt2 |= T5_OPT_2_VALID; - opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ if (peer2peer) isn += 4; @@ -756,10 +762,10 @@ static int send_connect(struct c4iw_ep *ep) t5_req6->peer_ip_lo = *((__be64 *) (ra6->sin6_addr.s6_addr + 8)); t5_req6->opt0 = cpu_to_be64(opt0); - t5_req6->params = (__force __be64)cpu_to_be32( + t5_req6->params = cpu_to_be64(V_FILTER_TUPLE( cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], - ep->l2t)); + ep->l2t))); t5_req6->rsvd = cpu_to_be32(isn); PDBG("%s snd_isn %u\n", __func__, be32_to_cpu(t5_req6->rsvd)); @@ -1763,7 +1769,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) req->tcb.tx_max = (__force __be32) jiffies; req->tcb.rcv_adv = htons(1); best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps); + enable_tcp_timestamps, + (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); wscale = compute_wscale(rcv_win); /* @@ -2162,7 +2169,8 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ep->hwtid)); best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps && req->tcpopt.tstamp); + enable_tcp_timestamps && req->tcpopt.tstamp, + (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); wscale = compute_wscale(rcv_win); /* diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index f25df5276c22..72f1f052e88c 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -60,7 +60,7 @@ int c4iw_wr_log = 0; module_param(c4iw_wr_log, int, 0444); MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); -int c4iw_wr_log_size_order = 12; +static int c4iw_wr_log_size_order = 12; module_param(c4iw_wr_log_size_order, int, 0444); MODULE_PARM_DESC(c4iw_wr_log_size_order, "Number of entries (log2) in the work request timing log."); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index f3114d1132fb..1ba6c42e4df8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -657,13 +657,13 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm return -EINVAL; idx = get_index(vma->vm_pgoff); + if (idx >= uuari->num_uars) + return -EINVAL; + pfn = uar_index2pfn(dev, uuari->uars[idx].index); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, (unsigned long long)pfn); - if (idx >= uuari->num_uars) - return -EINVAL; - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, vma->vm_page_prot)) @@ -1425,8 +1425,8 @@ err_dealloc: static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) { struct mlx5_ib_dev *dev = context; - destroy_umrc_res(dev); ib_unregister_device(&dev->ib_dev); + destroy_umrc_res(dev); destroy_dev_resources(&dev->devr); free_comp_eqs(dev); ib_dealloc_device(&dev->ib_dev); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index a3e81444c825..dae07eae9507 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -55,16 +55,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, u64 pfn; struct scatterlist *sg; int entry; + unsigned long page_shift = ilog2(umem->page_size); - addr = addr >> PAGE_SHIFT; + addr = addr >> page_shift; tmp = (unsigned long)addr; m = find_first_bit(&tmp, sizeof(tmp)); skip = 1 << m; mask = skip - 1; i = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - len = sg_dma_len(sg) >> PAGE_SHIFT; - pfn = sg_dma_address(sg) >> PAGE_SHIFT; + len = sg_dma_len(sg) >> page_shift; + pfn = sg_dma_address(sg) >> page_shift; for (k = 0; k < len; k++) { if (!(i & mask)) { tmp = (unsigned long)pfn; @@ -103,14 +104,15 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, *ncont = 0; } - *shift = PAGE_SHIFT + m; + *shift = page_shift + m; *count = i; } void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, int page_shift, __be64 *pas, int umr) { - int shift = page_shift - PAGE_SHIFT; + unsigned long umem_page_shift = ilog2(umem->page_size); + int shift = page_shift - umem_page_shift; int mask = (1 << shift) - 1; int i, k; u64 cur = 0; @@ -121,11 +123,11 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, i = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - len = sg_dma_len(sg) >> PAGE_SHIFT; + len = sg_dma_len(sg) >> umem_page_shift; base = sg_dma_address(sg); for (k = 0; k < len; k++) { if (!(i & mask)) { - cur = base + (k << PAGE_SHIFT); + cur = base + (k << umem_page_shift); if (umr) cur |= 3; @@ -134,7 +136,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, i >> shift, be64_to_cpu(pas[i >> shift])); } else mlx5_ib_dbg(dev, "=====> 0x%llx\n", - base + (k << PAGE_SHIFT)); + base + (k << umem_page_shift)); i++; } } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 80b3c63eab5d..8ee7cb46e059 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -881,12 +881,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int order; int err; - mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", - start, virt_addr, length); + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", + start, virt_addr, length, access_flags); umem = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); if (IS_ERR(umem)) { - mlx5_ib_dbg(dev, "umem get failed\n"); + mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); return (void *)umem; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index dbfe498870c1..e261a53f9a02 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1317,6 +1317,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, path->rlid = cpu_to_be16(ah->dlid); if (ah->ah_flags & IB_AH_GRH) { + if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { + pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", + ah->grh.sgid_index, gen->port[port - 1].gid_table_len); + return -EINVAL; + } path->grh_mlid |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; @@ -1332,22 +1337,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, path->static_rate = err; path->port = port; - if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { - pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, gen->port[port - 1].gid_table_len); - return -EINVAL; - } - - path->grh_mlid |= 1 << 7; - path->mgid_index = ah->grh.sgid_index; - path->hop_limit = ah->grh.hop_limit; - path->tclass_flowlabel = - cpu_to_be32((ah->grh.traffic_class << 20) | - (ah->grh.flow_label)); - memcpy(path->rgid, ah->grh.dgid.raw, 16); - } - if (attr_mask & IB_QP_TIMEOUT) path->ackto_lt = attr->timeout << 3; @@ -2039,56 +2028,31 @@ static u8 bs_selector(int block_size) } } -static int format_selector(struct ib_sig_attrs *attr, - struct ib_sig_domain *domain, - int *selector) +static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain, + struct mlx5_bsf_inl *inl) { + /* Valid inline section and allow BSF refresh */ + inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | + MLX5_BSF_REFRESH_DIF); + inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); + inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); + /* repeating block */ + inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; + inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? + MLX5_DIF_CRC : MLX5_DIF_IPCS; -#define FORMAT_DIF_NONE 0 -#define FORMAT_DIF_CRC_INC 8 -#define FORMAT_DIF_CRC_NO_INC 12 -#define FORMAT_DIF_CSUM_INC 13 -#define FORMAT_DIF_CSUM_NO_INC 14 + if (domain->sig.dif.ref_remap) + inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; - switch (domain->sig.dif.type) { - case IB_T10DIF_NONE: - /* No DIF */ - *selector = FORMAT_DIF_NONE; - break; - case IB_T10DIF_TYPE1: /* Fall through */ - case IB_T10DIF_TYPE2: - switch (domain->sig.dif.bg_type) { - case IB_T10DIF_CRC: - *selector = FORMAT_DIF_CRC_INC; - break; - case IB_T10DIF_CSUM: - *selector = FORMAT_DIF_CSUM_INC; - break; - default: - return 1; - } - break; - case IB_T10DIF_TYPE3: - switch (domain->sig.dif.bg_type) { - case IB_T10DIF_CRC: - *selector = domain->sig.dif.type3_inc_reftag ? - FORMAT_DIF_CRC_INC : - FORMAT_DIF_CRC_NO_INC; - break; - case IB_T10DIF_CSUM: - *selector = domain->sig.dif.type3_inc_reftag ? - FORMAT_DIF_CSUM_INC : - FORMAT_DIF_CSUM_NO_INC; - break; - default: - return 1; - } - break; - default: - return 1; + if (domain->sig.dif.app_escape) { + if (domain->sig.dif.ref_escape) + inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; + else + inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; } - return 0; + inl->dif_app_bitmask_check = + cpu_to_be16(domain->sig.dif.apptag_check_mask); } static int mlx5_set_bsf(struct ib_mr *sig_mr, @@ -2099,45 +2063,49 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, struct mlx5_bsf_basic *basic = &bsf->basic; struct ib_sig_domain *mem = &sig_attrs->mem; struct ib_sig_domain *wire = &sig_attrs->wire; - int ret, selector; memset(bsf, 0, sizeof(*bsf)); + + /* Basic + Extended + Inline */ + basic->bsf_size_sbs = 1 << 7; + /* Input domain check byte mask */ + basic->check_byte_mask = sig_attrs->check_mask; + basic->raw_data_size = cpu_to_be32(data_size); + + /* Memory domain */ switch (sig_attrs->mem.sig_type) { + case IB_SIG_TYPE_NONE: + break; case IB_SIG_TYPE_T10_DIF: - if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) - return -EINVAL; + basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); + basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); + mlx5_fill_inl_bsf(mem, &bsf->m_inl); + break; + default: + return -EINVAL; + } - /* Input domain check byte mask */ - basic->check_byte_mask = sig_attrs->check_mask; + /* Wire domain */ + switch (sig_attrs->wire.sig_type) { + case IB_SIG_TYPE_NONE: + break; + case IB_SIG_TYPE_T10_DIF: if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && - mem->sig.dif.type == wire->sig.dif.type) { + mem->sig_type == wire->sig_type) { /* Same block structure */ - basic->bsf_size_sbs = 1 << 4; + basic->bsf_size_sbs |= 1 << 4; if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) - basic->wire.copy_byte_mask |= 0xc0; + basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) - basic->wire.copy_byte_mask |= 0x30; + basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) - basic->wire.copy_byte_mask |= 0x0f; + basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; } else basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); - basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); - basic->raw_data_size = cpu_to_be32(data_size); - - ret = format_selector(sig_attrs, mem, &selector); - if (ret) - return -EINVAL; - basic->m_bfs_psv = cpu_to_be32(selector << 24 | - msig->psv_memory.psv_idx); - - ret = format_selector(sig_attrs, wire, &selector); - if (ret) - return -EINVAL; - basic->w_bfs_psv = cpu_to_be32(selector << 24 | - msig->psv_wire.psv_idx); + basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); + mlx5_fill_inl_bsf(wire, &bsf->w_inl); break; - default: return -EINVAL; } @@ -2336,20 +2304,21 @@ static int set_psv_wr(struct ib_sig_domain *domain, memset(psv_seg, 0, sizeof(*psv_seg)); psv_seg->psv_num = cpu_to_be32(psv_idx); switch (domain->sig_type) { + case IB_SIG_TYPE_NONE: + break; case IB_SIG_TYPE_T10_DIF: psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | domain->sig.dif.app_tag); psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); - - *seg += sizeof(*psv_seg); - *size += sizeof(*psv_seg) / 16; break; - default: pr_err("Bad signature type given.\n"); return 1; } + *seg += sizeof(*psv_seg); + *size += sizeof(*psv_seg) / 16; + return 0; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index dd35ae558ae1..638bff1ffc6c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -348,11 +348,6 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) return mqe; } -static void *ocrdma_alloc_mqe(void) -{ - return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); -} - static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) { dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); @@ -566,8 +561,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; - cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE); - cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE); + cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); + cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); cmd->async_cqid_ringsize = cq->id; cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << @@ -1189,10 +1184,10 @@ int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset) { struct ocrdma_rdma_stats_req *req = dev->stats_mem.va; struct ocrdma_mqe *mqe = &dev->stats_mem.mqe; - struct ocrdma_rdma_stats_resp *old_stats = NULL; + struct ocrdma_rdma_stats_resp *old_stats; int status; - old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL); + old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL); if (old_stats == NULL) return -ENOMEM; @@ -1235,10 +1230,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp; struct mgmt_hba_attribs *hba_attribs; - mqe = ocrdma_alloc_mqe(); + mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); if (!mqe) return status; - memset(mqe, 0, sizeof(*mqe)); dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp); dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev, @@ -2279,7 +2273,8 @@ mbx_err: static int ocrdma_set_av_params(struct ocrdma_qp *qp, struct ocrdma_modify_qp *cmd, - struct ib_qp_attr *attrs) + struct ib_qp_attr *attrs, + int attr_mask) { int status; struct ib_ah_attr *ah_attr = &attrs->ah_attr; @@ -2319,8 +2314,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); - vlan_id = ah_attr->vlan_id; - if (vlan_id && (vlan_id < 0x1000)) { + if (attr_mask & IB_QP_VID) { + vlan_id = attrs->vlan_id; cmd->params.vlan_dmac_b4_to_b5 |= vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; @@ -2347,7 +2342,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; } if (attr_mask & IB_QP_AV) { - status = ocrdma_set_av_params(qp, cmd, attrs); + status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask); if (status) return status; } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 256a06bc0b68..b0b2257b8e04 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -388,6 +388,15 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); } +static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) +{ + /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ + union ib_gid *sgid = &dev->sgid_tbl[0]; + + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + ocrdma_get_guid(dev, &sgid->raw[8]); +} + static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev, struct net_device *net) { @@ -434,6 +443,7 @@ static void ocrdma_init_gid_table(struct ocrdma_dev *dev) rdma_vlan_dev_real_dev(net_dev) : net_dev; if (real_dev == dev->nic_info.netdev) { + ocrdma_add_default_sgid(dev); ocrdma_init_ipv4_gids(dev, net_dev); ocrdma_init_ipv6_gids(dev, net_dev); } @@ -646,8 +656,10 @@ static int __init ocrdma_init_module(void) return 0; err_be_reg: +#if IS_ENABLED(CONFIG_IPV6) ocrdma_unregister_inet6addr_notifier(); err_notifier6: +#endif ocrdma_unregister_inetaddr_notifier(); return status; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 904989ec5eaa..4e036480c1a8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -28,8 +28,6 @@ #ifndef __OCRDMA_SLI_H__ #define __OCRDMA_SLI_H__ -#define Bit(_b) (1 << (_b)) - enum { OCRDMA_ASIC_GEN_SKH_R = 0x04, OCRDMA_ASIC_GEN_LANCER = 0x0B @@ -103,7 +101,7 @@ enum { QTYPE_MCCQ = 3 }; -#define OCRDMA_MAX_SGID (8) +#define OCRDMA_MAX_SGID 8 #define OCRDMA_MAX_QP 2048 #define OCRDMA_MAX_CQ 2048 @@ -128,33 +126,33 @@ enum { #define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */ /* qid #2 msbits at 12-11 */ #define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1 -#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT 16 /* bits 16 - 28 */ /* Rearm bit */ -#define OCRDMA_DB_CQ_REARM_SHIFT (29) /* bit 29 */ +#define OCRDMA_DB_CQ_REARM_SHIFT 29 /* bit 29 */ /* solicited bit */ -#define OCRDMA_DB_CQ_SOLICIT_SHIFT (31) /* bit 31 */ +#define OCRDMA_DB_CQ_SOLICIT_SHIFT 31 /* bit 31 */ #define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */ #define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */ -#define OCRDMA_EQ_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 at 11-15 */ +#define OCRDMA_EQ_ID_EXT_MASK_SHIFT 2 /* qid bits 9-13 at 11-15 */ /* Clear the interrupt for this eq */ -#define OCRDMA_EQ_CLR_SHIFT (9) /* bit 9 */ +#define OCRDMA_EQ_CLR_SHIFT 9 /* bit 9 */ /* Must be 1 */ -#define OCRDMA_EQ_TYPE_SHIFT (10) /* bit 10 */ +#define OCRDMA_EQ_TYPE_SHIFT 10 /* bit 10 */ /* Number of event entries processed */ -#define OCRDMA_NUM_EQE_SHIFT (16) /* bits 16 - 28 */ +#define OCRDMA_NUM_EQE_SHIFT 16 /* bits 16 - 28 */ /* Rearm bit */ -#define OCRDMA_REARM_SHIFT (29) /* bit 29 */ +#define OCRDMA_REARM_SHIFT 29 /* bit 29 */ #define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */ /* Number of entries posted */ -#define OCRDMA_MQ_NUM_MQE_SHIFT (16) /* bits 16 - 29 */ +#define OCRDMA_MQ_NUM_MQE_SHIFT 16 /* bits 16 - 29 */ -#define OCRDMA_MIN_HPAGE_SIZE (4096) +#define OCRDMA_MIN_HPAGE_SIZE 4096 -#define OCRDMA_MIN_Q_PAGE_SIZE (4096) -#define OCRDMA_MAX_Q_PAGES (8) +#define OCRDMA_MIN_Q_PAGE_SIZE 4096 +#define OCRDMA_MAX_Q_PAGES 8 #define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C #define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF @@ -170,14 +168,14 @@ enum { # 6: 256K Bytes # 7: 512K Bytes */ -#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (8) +#define OCRDMA_MAX_Q_PAGE_SIZE_CNT 8 #define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES) -#define MAX_OCRDMA_QP_PAGES (8) +#define MAX_OCRDMA_QP_PAGES 8 #define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE) -#define OCRDMA_CREATE_CQ_MAX_PAGES (4) -#define OCRDMA_DPP_CQE_SIZE (4) +#define OCRDMA_CREATE_CQ_MAX_PAGES 4 +#define OCRDMA_DPP_CQE_SIZE 4 #define OCRDMA_GEN2_MAX_CQE 1024 #define OCRDMA_GEN2_CQ_PAGE_SIZE 4096 @@ -238,7 +236,7 @@ struct ocrdma_mqe_sge { enum { OCRDMA_MQE_HDR_EMB_SHIFT = 0, - OCRDMA_MQE_HDR_EMB_MASK = Bit(0), + OCRDMA_MQE_HDR_EMB_MASK = BIT(0), OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3, OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT, OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24, @@ -292,7 +290,7 @@ struct ocrdma_pa { u32 hi; }; -#define MAX_OCRDMA_EQ_PAGES (8) +#define MAX_OCRDMA_EQ_PAGES 8 struct ocrdma_create_eq_req { struct ocrdma_mbx_hdr req; u32 num_pages; @@ -304,7 +302,7 @@ struct ocrdma_create_eq_req { }; enum { - OCRDMA_CREATE_EQ_VALID = Bit(29), + OCRDMA_CREATE_EQ_VALID = BIT(29), OCRDMA_CREATE_EQ_CNT_SHIFT = 26, OCRDMA_CREATE_CQ_DELAY_SHIFT = 13, }; @@ -314,7 +312,7 @@ struct ocrdma_create_eq_rsp { u32 vector_eqid; }; -#define OCRDMA_EQ_MINOR_OTHER (0x1) +#define OCRDMA_EQ_MINOR_OTHER 0x1 enum { OCRDMA_MCQE_STATUS_SHIFT = 0, @@ -322,13 +320,13 @@ enum { OCRDMA_MCQE_ESTATUS_SHIFT = 16, OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT, OCRDMA_MCQE_CONS_SHIFT = 27, - OCRDMA_MCQE_CONS_MASK = Bit(27), + OCRDMA_MCQE_CONS_MASK = BIT(27), OCRDMA_MCQE_CMPL_SHIFT = 28, - OCRDMA_MCQE_CMPL_MASK = Bit(28), + OCRDMA_MCQE_CMPL_MASK = BIT(28), OCRDMA_MCQE_AE_SHIFT = 30, - OCRDMA_MCQE_AE_MASK = Bit(30), + OCRDMA_MCQE_AE_MASK = BIT(30), OCRDMA_MCQE_VALID_SHIFT = 31, - OCRDMA_MCQE_VALID_MASK = Bit(31) + OCRDMA_MCQE_VALID_MASK = BIT(31) }; struct ocrdma_mcqe { @@ -339,13 +337,13 @@ struct ocrdma_mcqe { }; enum { - OCRDMA_AE_MCQE_QPVALID = Bit(31), + OCRDMA_AE_MCQE_QPVALID = BIT(31), OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF, - OCRDMA_AE_MCQE_CQVALID = Bit(31), + OCRDMA_AE_MCQE_CQVALID = BIT(31), OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF, - OCRDMA_AE_MCQE_VALID = Bit(31), - OCRDMA_AE_MCQE_AE = Bit(30), + OCRDMA_AE_MCQE_VALID = BIT(31), + OCRDMA_AE_MCQE_AE = BIT(30), OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16, OCRDMA_AE_MCQE_EVENT_TYPE_MASK = 0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT, @@ -386,9 +384,9 @@ enum { OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF << OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT, OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30, - OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = Bit(30), + OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = BIT(30), OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31, - OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = Bit(31) + OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = BIT(31) }; struct ocrdma_ae_mpa_mcqe { @@ -412,9 +410,9 @@ enum { OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF << OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT, OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30, - OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = Bit(30), + OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = BIT(30), OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31, - OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = Bit(31) + OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = BIT(31) }; struct ocrdma_ae_qp_mcqe { @@ -449,9 +447,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE { /* mailbox command request and responses */ enum { OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, - OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = Bit(2), + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = BIT(2), OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3, - OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = Bit(3), + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = BIT(3), OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8, OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF << OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT, @@ -672,9 +670,9 @@ enum { OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF, OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12, - OCRDMA_CREATE_CQ_COALESCWM_MASK = Bit(13) | Bit(12), - OCRDMA_CREATE_CQ_FLAGS_NODELAY = Bit(14), - OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = Bit(15), + OCRDMA_CREATE_CQ_COALESCWM_MASK = BIT(13) | BIT(12), + OCRDMA_CREATE_CQ_FLAGS_NODELAY = BIT(14), + OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = BIT(15), OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF, OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF @@ -687,8 +685,8 @@ enum { OCRDMA_CREATE_CQ_EQID_SHIFT = 22, OCRDMA_CREATE_CQ_CNT_SHIFT = 27, - OCRDMA_CREATE_CQ_FLAGS_VALID = Bit(29), - OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = Bit(31), + OCRDMA_CREATE_CQ_FLAGS_VALID = BIT(29), + OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = BIT(31), OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID | OCRDMA_CREATE_CQ_FLAGS_EVENTABLE | OCRDMA_CREATE_CQ_FLAGS_NODELAY @@ -731,8 +729,8 @@ enum { OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22, OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16, OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16, - OCRDMA_CREATE_MQ_VALID = Bit(31), - OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0) + OCRDMA_CREATE_MQ_VALID = BIT(31), + OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = BIT(0) }; struct ocrdma_create_mq_req { @@ -783,7 +781,7 @@ enum { OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16, OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19, OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29, - OCRDMA_CREATE_QP_REQ_QPT_MASK = Bit(31) | Bit(30) | Bit(29), + OCRDMA_CREATE_QP_REQ_QPT_MASK = BIT(31) | BIT(30) | BIT(29), OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0, OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF, @@ -798,23 +796,23 @@ enum { OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT, OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0, - OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = Bit(0), + OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = BIT(0), OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1, - OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = Bit(1), + OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = BIT(1), OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2, - OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = Bit(2), + OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = BIT(2), OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3, - OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = Bit(3), + OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = BIT(3), OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4, - OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = Bit(4), + OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = BIT(4), OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5, - OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = Bit(5), + OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = BIT(5), OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6, - OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = Bit(6), + OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = BIT(6), OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7, - OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = Bit(7), + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = BIT(7), OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8, - OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = Bit(8), + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = BIT(8), OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16, OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF << OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT, @@ -927,7 +925,7 @@ enum { OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF << OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT, - OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = Bit(0), + OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = BIT(0), OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1, OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF << OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT, @@ -964,38 +962,38 @@ enum { OCRDMA_MODIFY_QP_ID_SHIFT = 0, OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF, - OCRDMA_QP_PARA_QPS_VALID = Bit(0), - OCRDMA_QP_PARA_SQD_ASYNC_VALID = Bit(1), - OCRDMA_QP_PARA_PKEY_VALID = Bit(2), - OCRDMA_QP_PARA_QKEY_VALID = Bit(3), - OCRDMA_QP_PARA_PMTU_VALID = Bit(4), - OCRDMA_QP_PARA_ACK_TO_VALID = Bit(5), - OCRDMA_QP_PARA_RETRY_CNT_VALID = Bit(6), - OCRDMA_QP_PARA_RRC_VALID = Bit(7), - OCRDMA_QP_PARA_RQPSN_VALID = Bit(8), - OCRDMA_QP_PARA_MAX_IRD_VALID = Bit(9), - OCRDMA_QP_PARA_MAX_ORD_VALID = Bit(10), - OCRDMA_QP_PARA_RNT_VALID = Bit(11), - OCRDMA_QP_PARA_SQPSN_VALID = Bit(12), - OCRDMA_QP_PARA_DST_QPN_VALID = Bit(13), - OCRDMA_QP_PARA_MAX_WQE_VALID = Bit(14), - OCRDMA_QP_PARA_MAX_RQE_VALID = Bit(15), - OCRDMA_QP_PARA_SGE_SEND_VALID = Bit(16), - OCRDMA_QP_PARA_SGE_RECV_VALID = Bit(17), - OCRDMA_QP_PARA_SGE_WR_VALID = Bit(18), - OCRDMA_QP_PARA_INB_RDEN_VALID = Bit(19), - OCRDMA_QP_PARA_INB_WREN_VALID = Bit(20), - OCRDMA_QP_PARA_FLOW_LBL_VALID = Bit(21), - OCRDMA_QP_PARA_BIND_EN_VALID = Bit(22), - OCRDMA_QP_PARA_ZLKEY_EN_VALID = Bit(23), - OCRDMA_QP_PARA_FMR_EN_VALID = Bit(24), - OCRDMA_QP_PARA_INBAT_EN_VALID = Bit(25), - OCRDMA_QP_PARA_VLAN_EN_VALID = Bit(26), - - OCRDMA_MODIFY_QP_FLAGS_RD = Bit(0), - OCRDMA_MODIFY_QP_FLAGS_WR = Bit(1), - OCRDMA_MODIFY_QP_FLAGS_SEND = Bit(2), - OCRDMA_MODIFY_QP_FLAGS_ATOMIC = Bit(3) + OCRDMA_QP_PARA_QPS_VALID = BIT(0), + OCRDMA_QP_PARA_SQD_ASYNC_VALID = BIT(1), + OCRDMA_QP_PARA_PKEY_VALID = BIT(2), + OCRDMA_QP_PARA_QKEY_VALID = BIT(3), + OCRDMA_QP_PARA_PMTU_VALID = BIT(4), + OCRDMA_QP_PARA_ACK_TO_VALID = BIT(5), + OCRDMA_QP_PARA_RETRY_CNT_VALID = BIT(6), + OCRDMA_QP_PARA_RRC_VALID = BIT(7), + OCRDMA_QP_PARA_RQPSN_VALID = BIT(8), + OCRDMA_QP_PARA_MAX_IRD_VALID = BIT(9), + OCRDMA_QP_PARA_MAX_ORD_VALID = BIT(10), + OCRDMA_QP_PARA_RNT_VALID = BIT(11), + OCRDMA_QP_PARA_SQPSN_VALID = BIT(12), + OCRDMA_QP_PARA_DST_QPN_VALID = BIT(13), + OCRDMA_QP_PARA_MAX_WQE_VALID = BIT(14), + OCRDMA_QP_PARA_MAX_RQE_VALID = BIT(15), + OCRDMA_QP_PARA_SGE_SEND_VALID = BIT(16), + OCRDMA_QP_PARA_SGE_RECV_VALID = BIT(17), + OCRDMA_QP_PARA_SGE_WR_VALID = BIT(18), + OCRDMA_QP_PARA_INB_RDEN_VALID = BIT(19), + OCRDMA_QP_PARA_INB_WREN_VALID = BIT(20), + OCRDMA_QP_PARA_FLOW_LBL_VALID = BIT(21), + OCRDMA_QP_PARA_BIND_EN_VALID = BIT(22), + OCRDMA_QP_PARA_ZLKEY_EN_VALID = BIT(23), + OCRDMA_QP_PARA_FMR_EN_VALID = BIT(24), + OCRDMA_QP_PARA_INBAT_EN_VALID = BIT(25), + OCRDMA_QP_PARA_VLAN_EN_VALID = BIT(26), + + OCRDMA_MODIFY_QP_FLAGS_RD = BIT(0), + OCRDMA_MODIFY_QP_FLAGS_WR = BIT(1), + OCRDMA_MODIFY_QP_FLAGS_SEND = BIT(2), + OCRDMA_MODIFY_QP_FLAGS_ATOMIC = BIT(3) }; enum { @@ -1014,15 +1012,15 @@ enum { OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF << OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT, - OCRDMA_QP_PARAMS_FLAGS_FMR_EN = Bit(0), - OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = Bit(1), - OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = Bit(2), - OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = Bit(3), - OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = Bit(4), + OCRDMA_QP_PARAMS_FLAGS_FMR_EN = BIT(0), + OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = BIT(1), + OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = BIT(2), + OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = BIT(3), + OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = BIT(4), OCRDMA_QP_PARAMS_STATE_SHIFT = 5, - OCRDMA_QP_PARAMS_STATE_MASK = Bit(5) | Bit(6) | Bit(7), - OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = Bit(8), - OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = Bit(9), + OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7), + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8), + OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9), OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16, OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF << OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT, @@ -1277,7 +1275,7 @@ struct ocrdma_alloc_pd { }; enum { - OCRDMA_ALLOC_PD_RSP_DPP = Bit(16), + OCRDMA_ALLOC_PD_RSP_DPP = BIT(16), OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20, OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF, }; @@ -1309,18 +1307,18 @@ enum { OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF, OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0, - OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = Bit(0), + OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = BIT(0), OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1, - OCRDMA_ALLOC_LKEY_FMR_MASK = Bit(1), + OCRDMA_ALLOC_LKEY_FMR_MASK = BIT(1), OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2, - OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = Bit(2), + OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = BIT(2), OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3, - OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = Bit(3), + OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = BIT(3), OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4, - OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = Bit(4), + OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = BIT(4), OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5, - OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = Bit(5), - OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = Bit(6), + OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = BIT(5), + OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = BIT(6), OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6, OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16, OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF << @@ -1379,21 +1377,21 @@ enum { OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF << OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT, OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24, - OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = Bit(24), + OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = BIT(24), OCRDMA_REG_NSMR_ZB_SHIFT = 25, - OCRDMA_REG_NSMR_ZB_SHIFT_MASK = Bit(25), + OCRDMA_REG_NSMR_ZB_SHIFT_MASK = BIT(25), OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26, - OCRDMA_REG_NSMR_REMOTE_INV_MASK = Bit(26), + OCRDMA_REG_NSMR_REMOTE_INV_MASK = BIT(26), OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27, - OCRDMA_REG_NSMR_REMOTE_WR_MASK = Bit(27), + OCRDMA_REG_NSMR_REMOTE_WR_MASK = BIT(27), OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28, - OCRDMA_REG_NSMR_REMOTE_RD_MASK = Bit(28), + OCRDMA_REG_NSMR_REMOTE_RD_MASK = BIT(28), OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29, - OCRDMA_REG_NSMR_LOCAL_WR_MASK = Bit(29), + OCRDMA_REG_NSMR_LOCAL_WR_MASK = BIT(29), OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30, - OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = Bit(30), + OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = BIT(30), OCRDMA_REG_NSMR_LAST_SHIFT = 31, - OCRDMA_REG_NSMR_LAST_MASK = Bit(31) + OCRDMA_REG_NSMR_LAST_MASK = BIT(31) }; struct ocrdma_reg_nsmr { @@ -1420,7 +1418,7 @@ enum { OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT, OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31, - OCRDMA_REG_NSMR_CONT_LAST_MASK = Bit(31) + OCRDMA_REG_NSMR_CONT_LAST_MASK = BIT(31) }; struct ocrdma_reg_nsmr_cont { @@ -1566,7 +1564,7 @@ struct ocrdma_delete_ah_tbl_rsp { enum { OCRDMA_EQE_VALID_SHIFT = 0, - OCRDMA_EQE_VALID_MASK = Bit(0), + OCRDMA_EQE_VALID_MASK = BIT(0), OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << @@ -1624,11 +1622,11 @@ enum { OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT, OCRDMA_CQE_STATUS_SHIFT = 16, OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT, - OCRDMA_CQE_VALID = Bit(31), - OCRDMA_CQE_INVALIDATE = Bit(30), - OCRDMA_CQE_QTYPE = Bit(29), - OCRDMA_CQE_IMM = Bit(28), - OCRDMA_CQE_WRITE_IMM = Bit(27), + OCRDMA_CQE_VALID = BIT(31), + OCRDMA_CQE_INVALIDATE = BIT(30), + OCRDMA_CQE_QTYPE = BIT(29), + OCRDMA_CQE_IMM = BIT(28), + OCRDMA_CQE_WRITE_IMM = BIT(27), OCRDMA_CQE_QTYPE_SQ = 0, OCRDMA_CQE_QTYPE_RQ = 1, OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF @@ -1772,8 +1770,8 @@ struct ocrdma_grh { u16 rsvd; } __packed; -#define OCRDMA_AV_VALID Bit(7) -#define OCRDMA_AV_VLAN_VALID Bit(1) +#define OCRDMA_AV_VALID BIT(7) +#define OCRDMA_AV_VLAN_VALID BIT(1) struct ocrdma_av { struct ocrdma_eth_vlan eth_hdr; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e8b8569788c0..4c68305ee781 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -388,7 +388,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, memset(&resp, 0, sizeof(resp)); resp.ah_tbl_len = ctx->ah_tbl.len; - resp.ah_tbl_page = ctx->ah_tbl.pa; + resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); if (status) @@ -870,7 +870,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, uresp.page_size = PAGE_ALIGN(cq->len); uresp.num_pages = 1; uresp.max_hw_cqe = cq->max_hw_cqe; - uresp.page_addr[0] = cq->pa; + uresp.page_addr[0] = virt_to_phys(cq->va); uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); uresp.db_page_size = dev->nic_info.db_page_size; uresp.phase_change = cq->phase_change ? 1 : 0; @@ -1123,13 +1123,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, uresp.sq_dbid = qp->sq.dbid; uresp.num_sq_pages = 1; uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); - uresp.sq_page_addr[0] = qp->sq.pa; + uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); uresp.num_wqe_allocated = qp->sq.max_cnt; if (!srq) { uresp.rq_dbid = qp->rq.dbid; uresp.num_rq_pages = 1; uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); - uresp.rq_page_addr[0] = qp->rq.pa; + uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); uresp.num_rqe_allocated = qp->rq.max_cnt; } uresp.db_page_addr = usr_db; @@ -1680,7 +1680,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, memset(&uresp, 0, sizeof(uresp)); uresp.rq_dbid = srq->rq.dbid; uresp.num_rq_pages = 1; - uresp.rq_page_addr[0] = srq->rq.pa; + uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va); uresp.rq_page_size = srq->rq.len; uresp.db_page_addr = dev->nic_info.unmapped_db + (srq->pd->id * dev->nic_info.db_page_size); |