summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rdmavt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rdmavt')
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c155
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c24
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
6 files changed, 27 insertions, 186 deletions
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ee02c6176007..40480add7dd3 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -98,14 +98,14 @@ EXPORT_SYMBOL(rvt_check_ah);
*
* Return: 0 on success
*/
-int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata)
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct rvt_ah *ah = ibah_to_rvtah(ibah);
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
unsigned long flags;
- if (rvt_check_ah(ibah->device, ah_attr))
+ if (rvt_check_ah(ibah->device, init_attr->ah_attr))
return -EINVAL;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
@@ -117,10 +117,11 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- rdma_copy_ah_attr(&ah->attr, ah_attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
if (dev->driver_f.notify_new_ah)
- dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
+ dev->driver_f.notify_new_ah(ibah->device,
+ init_attr->ah_attr, ah);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index bbb4d3bdec4e..40b7123fec76 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -50,8 +50,8 @@
#include <rdma/rdma_vt.h>
-int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata);
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 72f6534fbb52..60864e5ca7cb 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -97,7 +97,6 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -714,160 +713,6 @@ bail:
EXPORT_SYMBOL(rvt_invalidate_rkey);
/**
- * rvt_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Return: the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct rvt_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
- fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
- PERCPU_REF_INIT_ATOMIC);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = rvt_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- rvt_deinit_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * rvt_map_phys_fmr - set up a fast memory region
- * @ibfmr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- *
- * Return: 0 on success
- */
-
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- int m, n;
- unsigned long i;
- u32 ps;
- struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
-
- i = atomic_long_read(&fmr->mr.refcount.count);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs)
- return -EINVAL;
-
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
- if (++n == RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- return 0;
-}
-
-/**
- * rvt_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Return: 0 on success.
- */
-int rvt_unmap_fmr(struct list_head *fmr_list)
-{
- struct rvt_fmr *fmr;
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- struct rvt_dev_info *rdi;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rdi = ib_to_rvt(fmr->ibfmr.device);
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * rvt_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Return: 0 on success.
- */
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
-
- rvt_free_lkey(&fmr->mr);
- rvt_put_mr(&fmr->mr); /* will set completion if last */
- ret = rvt_check_refs(&fmr->mr, __func__);
- if (ret)
- goto out;
- rvt_deinit_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-/**
* rvt_sge_adjacent - is isge compressible
* @last_sge: last outgoing SGE written
* @sge: SGE to check
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 2c8d0752e8e3..780fc63af98b 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -49,10 +49,6 @@
*/
#include <rdma/rdma_vt.h>
-struct rvt_fmr {
- struct ib_fmr ibfmr;
- struct rvt_mregion mr; /* must be last */
-};
struct rvt_mr {
struct ib_mr ibmr;
@@ -60,11 +56,6 @@ struct rvt_mr {
struct rvt_mregion mr; /* must be last */
};
-static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct rvt_fmr, ibfmr);
-}
-
static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct rvt_mr, ibmr);
@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-int rvt_unmap_fmr(struct list_head *fmr_list);
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 500a7ee04c44..511b72809e14 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -525,15 +525,18 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
* @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
*
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num)
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
if (rdi->driver_f.alloc_qpn)
return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
@@ -553,7 +556,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
}
qpn = qpt->last + qpt->incr;
- if (qpn >= RVT_QPN_MAX)
+ if (qpn >= max_qpn)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK;
@@ -987,6 +990,9 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
struct rvt_qpn_map *map;
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
if (map->page)
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
@@ -1074,13 +1080,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
size_t sqsize;
+ u8 exclude_prefix = 0;
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags)
+ (init_attr->create_flags &&
+ init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1199,14 +1207,20 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_driver_priv;
}
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num,
+ exclude_prefix);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
}
qp->ibqp.qp_num = err;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 72b031ab7092..f904bb34477a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -378,7 +378,6 @@ enum {
static const struct ib_device_ops rvt_dev_ops = {
.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
- .alloc_fmr = rvt_alloc_fmr,
.alloc_mr = rvt_alloc_mr,
.alloc_pd = rvt_alloc_pd,
.alloc_ucontext = rvt_alloc_ucontext,
@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq,
.create_qp = rvt_create_qp,
.create_srq = rvt_create_srq,
- .dealloc_fmr = rvt_dealloc_fmr,
.dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr,
@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.get_dma_mr = rvt_get_dma_mr,
.get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg,
- .map_phys_fmr = rvt_map_phys_fmr,
.mmap = rvt_mmap,
.modify_ah = rvt_modify_ah,
.modify_device = rvt_modify_device,
@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.reg_user_mr = rvt_reg_user_mr,
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
- .unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),