summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c120
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c29
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h33
3 files changed, 84 insertions, 98 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index b7034d4d2620..f18ee76ae870 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -42,25 +42,31 @@
#include "qib.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+/*
+ * mask field which was present in now deleted qib_qpn_table
+ * is not present in rvt_qpn_table. Defining the same field
+ * as qpt_mask here instead of adding the mask field to
+ * rvt_qpn_table.
+ */
+static u16 qpt_mask;
-static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
-static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off,
+static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off,
unsigned n)
{
- if (qpt->mask) {
+ if (qpt_mask) {
off++;
- if (((off & qpt->mask) >> 1) >= n)
- off = (off | qpt->mask) + 2;
- } else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ if (((off & qpt_mask) >> 1) >= n)
+ off = (off | qpt_mask) + 2;
+ } else {
+ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
+ }
return off;
}
@@ -101,7 +107,7 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
gfp_t gfp)
{
unsigned long page = get_zeroed_page(gfp);
@@ -122,11 +128,11 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
* Allocate the next available QPN or
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
-static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+static int alloc_qpn(struct qib_devdata *dd, struct rvt_qpn_table *qpt,
enum ib_qp_type type, u8 port, gfp_t gfp)
{
u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
u32 ret;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
@@ -144,12 +150,12 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
}
qpn = qpt->last + 2;
- if (qpn >= QPN_MAX)
+ if (qpn >= RVT_QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
+ qpn = (qpn | qpt_mask) + 2;
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
@@ -174,14 +180,14 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
* We just need to be sure we don't loop
* forever.
*/
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
* the size of the bitmap table.
*/
if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
offset = 0;
@@ -201,19 +207,19 @@ bail:
return ret;
}
-static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
+static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
- map = qpt->map + qpn / BITS_PER_PAGE;
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
}
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
{
return jhash_1word(qpn, dev->qp_rnd) &
- (dev->qp_table_size - 1);
+ (dev->rdi.qp_dev->qp_table_size - 1);
}
@@ -228,18 +234,18 @@ static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qpt_lock, flags);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
if (qp->ibqp.qp_num == 0)
rcu_assign_pointer(ibp->rvp.qp[0], qp);
else if (qp->ibqp.qp_num == 1)
rcu_assign_pointer(ibp->rvp.qp[1], qp);
else {
- qp->next = dev->qp_table[n];
- rcu_assign_pointer(dev->qp_table[n], qp);
+ qp->next = dev->rdi.qp_dev->qp_table[n];
+ rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
}
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
}
/*
@@ -252,34 +258,36 @@ static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
unsigned long flags;
int removed = 1;
+ spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
- spin_lock_irqsave(&dev->qpt_lock, flags);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
+ qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
if (rcu_dereference_protected(ibp->rvp.qp[0],
- lockdep_is_held(&dev->qpt_lock)) == qp) {
+ lockdep_is_held(qpt_lock_ptr)) == qp) {
RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
} else if (rcu_dereference_protected(ibp->rvp.qp[1],
- lockdep_is_held(&dev->qpt_lock)) == qp) {
+ lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
} else {
struct rvt_qp *q;
struct rvt_qp __rcu **qpp;
removed = 0;
- qpp = &dev->qp_table[n];
+ qpp = &dev->rdi.qp_dev->qp_table[n];
for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock))) != NULL;
+ lockdep_is_held(qpt_lock_ptr))) != NULL;
qpp = &q->next)
if (q == qp) {
RCU_INIT_POINTER(*qpp,
rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)));
+ lockdep_is_held(qpt_lock_ptr)));
removed = 1;
break;
}
}
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
if (removed) {
synchronize_rcu();
atomic_dec(&qp->refcount);
@@ -299,6 +307,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
unsigned long flags;
struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
+ spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
for (n = 0; n < dd->num_pports; n++) {
struct qib_ibport *ibp = &dd->pport[n].ibport_data;
@@ -313,17 +322,18 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
rcu_read_unlock();
}
- spin_lock_irqsave(&dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_table[n],
- lockdep_is_held(&dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_table[n], NULL);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
+ qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
+ for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
+ qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n],
+ lockdep_is_held(qpt_lock_ptr));
+ RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL);
for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)))
+ lockdep_is_held(qpt_lock_ptr)))
qp_inuse++;
}
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
synchronize_rcu();
return qp_inuse;
@@ -353,7 +363,7 @@ struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
unsigned n = qpn_hash(dev, qpn);
- for (qp = rcu_dereference(dev->qp_table[n]); qp;
+ for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
qp = rcu_dereference(qp->next))
if (qp->ibqp.qp_num == qpn) {
atomic_inc(&qp->refcount);
@@ -1121,8 +1131,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
qp->s_flags = QIB_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device);
dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
- init_attr->port_num, gfp);
+ err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table,
+ init_attr->qp_type, init_attr->port_num, gfp);
if (err < 0) {
ret = ERR_PTR(err);
vfree(qp->r_rq.wq);
@@ -1199,7 +1209,7 @@ bail_ip:
kref_put(&qp->ip->ref, rvt_release_mmap_info);
else
vfree(qp->r_rq.wq);
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+ free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
kfree(priv->s_hdr);
kfree(priv);
@@ -1250,7 +1260,7 @@ int qib_destroy_qp(struct ib_qp *ibqp)
spin_unlock_irq(&qp->s_lock);
/* all user's cleaned up, mark it available */
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+ free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
spin_lock(&dev->n_qps_lock);
dev->n_qps_allocated--;
spin_unlock(&dev->n_qps_lock);
@@ -1270,19 +1280,19 @@ int qib_destroy_qp(struct ib_qp *ibqp)
* qib_init_qpn_table - initialize the QP number table for a device
* @qpt: the QPN table
*/
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+void qib_init_qpn_table(struct qib_devdata *dd, struct rvt_qpn_table *qpt)
{
spin_lock_init(&qpt->lock);
qpt->last = 1; /* start with QPN 2 */
qpt->nmaps = 1;
- qpt->mask = dd->qpn_mask;
+ qpt_mask = dd->qpn_mask;
}
/**
* qib_free_qpn_table - free the QP number table for a device
* @qpt: the QPN table
*/
-void qib_free_qpn_table(struct qib_qpn_table *qpt)
+void qib_free_qpn_table(struct rvt_qpn_table *qpt)
{
int i;
@@ -1361,11 +1371,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct rvt_qp *pqp = iter->qp;
struct rvt_qp *qp;
- for (; n < dev->qp_table_size; n++) {
+ for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
if (pqp)
qp = rcu_dereference(pqp->next);
else
- qp = rcu_dereference(dev->qp_table[n]);
+ qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
pqp = qp;
if (qp) {
iter->qp = qp;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 74cb50104e0e..2daca8f31ca2 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1946,24 +1946,29 @@ int qib_register_ib_device(struct qib_devdata *dd)
unsigned i, ctxt;
int ret;
- dev->qp_table_size = ib_qib_qp_table_size;
+ /* allocate parent object */
+ dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL);
+ if (!dev->rdi.qp_dev)
+ return -ENOMEM;
+ dev->rdi.qp_dev->qp_table_size = ib_qib_qp_table_size;
+ dev->rdi.qp_dev->qp_table_bits = ilog2(ib_qib_qp_table_size);
get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
- dev->qp_table = kmalloc_array(
- dev->qp_table_size,
- sizeof(*dev->qp_table),
+ dev->rdi.qp_dev->qp_table = kmalloc_array(
+ dev->rdi.qp_dev->qp_table_size,
+ sizeof(*dev->rdi.qp_dev->qp_table),
GFP_KERNEL);
- if (!dev->qp_table) {
+ if (!dev->rdi.qp_dev->qp_table) {
ret = -ENOMEM;
goto err_qpt;
}
- for (i = 0; i < dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_table[i], NULL);
+ for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL);
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- spin_lock_init(&dev->qpt_lock);
+ spin_lock_init(&dev->rdi.qp_dev->qpt_lock);
spin_lock_init(&dev->n_cqs_lock);
spin_lock_init(&dev->n_qps_lock);
spin_lock_init(&dev->n_srqs_lock);
@@ -1972,7 +1977,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dev->mem_timer.function = mem_timer;
dev->mem_timer.data = (unsigned long) dev;
- qib_init_qpn_table(dd, &dev->qpn_table);
+ qib_init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table);
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
@@ -2159,7 +2164,7 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- kfree(dev->qp_table);
+ kfree(dev->rdi.qp_dev->qp_table);
err_qpt:
qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
bail:
@@ -2192,7 +2197,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
qps_inuse);
del_timer_sync(&dev->mem_timer);
- qib_free_qpn_table(&dev->qpn_table);
+ qib_free_qpn_table(&dev->rdi.qp_dev->qpn_table);
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
struct qib_verbs_txreq *tx;
@@ -2206,7 +2211,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
dd->pport->sdma_descq_cnt *
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
- kfree(dev->qp_table);
+ kfree(dev->rdi.qp_dev->qp_table);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index eade66850640..e10ab80db3b1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -55,9 +55,6 @@ struct qib_verbs_txreq;
#define QIB_MAX_RDMA_ATOMIC 16
#define QIB_GUIDS_PER_PORT 5
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
/*
* Increment this value if any changes that break userspace ABI
* compatibility are made.
@@ -364,26 +361,6 @@ static inline struct rvt_rwqe *get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
rq->max_sge * sizeof(struct ib_sge)) * n);
}
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- void *page;
-};
-
-struct qib_qpn_table {
- spinlock_t lock; /* protect changes in this struct */
- unsigned flags; /* flags for QP0/1 allocated for each port */
- u32 last; /* last QP number allocated */
- u32 nmaps; /* size of the map table */
- u16 limit;
- u16 mask;
- /* bit map of free QP numbers other than 0/1 */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
struct qib_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
@@ -429,21 +406,15 @@ struct qib_ibport {
struct qib_ibdev {
struct rvt_dev_info rdi;
- /* QP numbers are shared by all IB ports */
- struct qib_qpn_table qpn_table;
struct list_head piowait; /* list for wait PIO buf */
struct list_head dmawait; /* list for wait DMA */
struct list_head txwait; /* list for wait qib_verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free;
struct timer_list mem_timer;
- struct rvt_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
- /* list of QPs waiting for RNR timer */
- u32 qp_table_size; /* size of the hash table */
u32 qp_rnd; /* random bytes for hash */
- spinlock_t qpt_lock;
u32 n_piowait;
u32 n_txwait;
@@ -581,9 +552,9 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
unsigned qib_free_all_qps(struct qib_devdata *dd);
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
+void qib_init_qpn_table(struct qib_devdata *dd, struct rvt_qpn_table *qpt);
-void qib_free_qpn_table(struct qib_qpn_table *qpt);
+void qib_free_qpn_table(struct rvt_qpn_table *qpt);
#ifdef CONFIG_DEBUG_FS