summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDean Luick <dean.luick@intel.com>2015-12-10 16:52:30 -0500
committerDoug Ledford <dledford@redhat.com>2015-12-24 00:17:30 -0500
commit0d6ed314de18b65a8063cbed450a2ca0c6a16c52 (patch)
treeefb33d2fc3f4d828f9890d029e4d84261af17897 /drivers/infiniband
parent051f263098a90d208e2d20251bfd4834bc783214 (diff)
IB/mad: Ensure fairness in ib_mad_completion_handler
It was found that when a process was rapidly sending MADs other processes could be hung in their unregister calls. This would happen when process A was injecting packets fast enough that the single threaded workqueue was never exiting ib_mad_completion_handler. Therefore when process B called flush_workqueue via the unregister call it would hang until process A stopped sending MADs. The fix is to periodically reschedule ib_mad_completion_handler after processing a large number of completions. The number of completions chosen was decided based on the defaults for the recv queue size. However, it was kept fixed such that increasing those queue sizes would not adversely affect fairness in the future. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 2281de122038..d4d2a618fd66 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -61,6 +61,18 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+/*
+ * Define a limit on the number of completions which will be processed by the
+ * worker thread in a single work item. This ensures that other work items
+ * (potentially from other users) are processed fairly.
+ *
+ * The number of completions was derived from the default queue sizes above.
+ * We use a value which is double the larger of the 2 queues (receive @ 512)
+ * but keep it fixed such that an increase in that value does not introduce
+ * unfairness.
+ */
+#define MAD_COMPLETION_PROC_LIMIT 1024
+
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
@@ -2555,6 +2567,7 @@ static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
+ int count = 0;
port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
@@ -2574,6 +2587,11 @@ static void ib_mad_completion_handler(struct work_struct *work)
}
} else
mad_error_handler(port_priv, &wc);
+
+ if (++count > MAD_COMPLETION_PROC_LIMIT) {
+ queue_work(port_priv->wq, &port_priv->work);
+ break;
+ }
}
}