diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-03-28 07:44:25 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-28 07:44:25 +0200 |
commit | d652f4bbca35100358bad83c29ec0e40a1f8e5cc (patch) | |
tree | a59e6ad6dca5e98f82ce87da2c2b69922c6a4da5 /arch/x86/events | |
parent | e3a6a62400520452fe39740dca90a1d0b94b8f92 (diff) | |
parent | a01851faab4b485e94c2ceaa1d0208a8d16ce367 (diff) |
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events')
-rw-r--r-- | arch/x86/events/core.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 349d4d17aa7f..2aa1ad194db2 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event) static void refresh_pce(void *ignored) { - if (current->mm) - load_mm_cr4(current->mm); + if (current->active_mm) + load_mm_cr4(current->active_mm); } static void x86_pmu_event_mapped(struct perf_event *event) @@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; + /* + * This function relies on not being called concurrently in two + * tasks in the same mm. Otherwise one task could observe + * perf_rdpmc_allowed > 1 and return all the way back to + * userspace with CR4.PCE clear while another task is still + * doing on_each_cpu_mask() to propagate CR4.PCE. + * + * For now, this can't happen because all callers hold mmap_sem + * for write. If this changes, we'll need a different solution. + */ + lockdep_assert_held_exclusive(¤t->mm->mmap_sem); + if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); } |