summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaghavendra Rao Ananta <rananta@codeaurora.org>2018-09-14 17:06:11 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2018-11-14 19:31:18 -0800
commit3186af9f3ece8d5d07a3d62cc1a64b448eab375b (patch)
tree893e9af436e6e055ee8be7dad914eaad30a2efe9
parent678b7e877dc6aac573ffb010dce85b1bf29674e9 (diff)
perf: core: Avoid race condition when releasing perf-events
The function, perf_event_release_kernel(), that's used to free the perf events is dependent on the CPU associated to this event be online. The function checks at the beginning: if the CPU is offline, put it in a zombie list and return immediately. Else, proceed and make a cross-cpu call (from perf_remove_from_context()) to complete the functionality. However, there's a potential chance of a race if the CPU went offline between the initial check and the cross-cpu call. The cross-cpu call deletes the event from the context's list, but if the CPU is offline, this deletion doesn't happen. Later the event is freed irrespective of this failure and the event still exists in the list. Now, when the list is traversed, it would try to access the memory which is freed, resulting in a memory abort. As a result, before calling perf_event_release_kernel(), capture the perf's pmus_mutex lock to prevent the CPU from going offline during the operation. Change-Id: I20241639ea9a8dc87e5a88cf81e940b3d6cb773c Signed-off-by: Raghavendra Rao Ananta <rananta@codeaurora.org> Signed-off-by: Swetha Chikkaboraiah <schikk@codeaurora.org>
-rw-r--r--kernel/events/core.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ee74fff..eb57fb3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4289,7 +4289,7 @@ static DEFINE_SPINLOCK(zombie_list_lock);
* object, it will not preserve its functionality. Once the last 'user'
* gives up the object, we'll destroy the thing.
*/
-int perf_event_release_kernel(struct perf_event *event)
+static int __perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
@@ -4300,7 +4300,7 @@ int perf_event_release_kernel(struct perf_event *event)
* back online.
*/
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
- if (event->cpu != -1 && !cpu_online(event->cpu)) {
+ if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu)) {
if (event->state == PERF_EVENT_STATE_ZOMBIE)
return 0;
@@ -4417,6 +4417,17 @@ no_ctx:
put_event(event); /* Must be the 'last' reference */
return 0;
}
+
+int perf_event_release_kernel(struct perf_event *event)
+{
+ int ret;
+
+ mutex_lock(&pmus_lock);
+ ret = __perf_event_release_kernel(event);
+ mutex_unlock(&pmus_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
@@ -11130,7 +11141,7 @@ static void perf_event_zombie_cleanup(unsigned int cpu)
* PMU expects it to be in an active state
*/
event->state = PERF_EVENT_STATE_ACTIVE;
- perf_event_release_kernel(event);
+ __perf_event_release_kernel(event);
spin_lock(&zombie_list_lock);
}
@@ -11145,6 +11156,7 @@ static int perf_event_start_swevents(unsigned int cpu)
struct perf_event *event;
int idx;
+ mutex_lock(&pmus_lock);
perf_event_zombie_cleanup(cpu);
idx = srcu_read_lock(&pmus_srcu);
@@ -11159,6 +11171,8 @@ static int perf_event_start_swevents(unsigned int cpu)
}
srcu_read_unlock(&pmus_srcu, idx);
per_cpu(is_hotplugging, cpu) = false;
+ mutex_unlock(&pmus_lock);
+
return 0;
}
@@ -11220,8 +11234,10 @@ static void perf_event_exit_cpu_context(int cpu) { }
int perf_event_exit_cpu(unsigned int cpu)
{
+ mutex_lock(&pmus_lock);
per_cpu(is_hotplugging, cpu) = true;
perf_event_exit_cpu_context(cpu);
+ mutex_unlock(&pmus_lock);
return 0;
}