summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-04-13 10:20:23 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-04-15 12:20:56 +0200
commit73810a069120aa831debb4d967310ab900f628ad (patch)
tree81d0cc6b3b9d1f8b97c295319d63779272d1d2db /drivers/crypto
parent12699ac53a2e5fbd1fd7c164b11685d55c8aa28b (diff)
crypto: N2 - Replace racy task affinity logic
spu_queue_register() needs to invoke setup functions on a particular CPU. This is achieved by temporarily setting the affinity of the calling user space thread to the requested CPU and reset it to the original affinity afterwards. That's racy vs. CPU hotplug and concurrent affinity settings for that thread resulting in code executing on the wrong CPU and overwriting the new affinity setting. Replace it by using work_on_cpu_safe() which guarantees to run the code on the requested CPU or to fail in case the CPU is offline. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: "David S. Miller" <davem@davemloft.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: linux-crypto@vger.kernel.org Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Tejun Heo <tj@kernel.org> Cc: Len Brown <lenb@kernel.org> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131019420.2408@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/n2_core.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index c5aac25a5738..4ecb77aa60e1 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -65,6 +65,11 @@ struct spu_queue {
struct list_head list;
};
+struct spu_qreg {
+ struct spu_queue *queue;
+ unsigned long type;
+};
+
static struct spu_queue **cpu_to_cwq;
static struct spu_queue **cpu_to_mau;
@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
}
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+static long spu_queue_register_workfn(void *arg)
{
- cpumask_var_t old_allowed;
+ struct spu_qreg *qr = arg;
+ struct spu_queue *p = qr->queue;
+ unsigned long q_type = qr->type;
unsigned long hv_ret;
- if (cpumask_empty(&p->sharing))
- return -EINVAL;
-
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_copy(old_allowed, &current->cpus_allowed);
-
- set_cpus_allowed_ptr(current, &p->sharing);
-
hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
CWQ_NUM_ENTRIES, &p->qhandle);
if (!hv_ret)
sun4v_ncs_sethead_marker(p->qhandle, 0);
- set_cpus_allowed_ptr(current, old_allowed);
+ return hv_ret ? -EINVAL : 0;
+}
- free_cpumask_var(old_allowed);
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+ int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
+ struct spu_qreg qr = { .queue = p, .type = q_type };
- return (hv_ret ? -EINVAL : 0);
+ return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
}
static int spu_queue_setup(struct spu_queue *p)