perf: Make sure to use pmu_ctx->pmu for groups
Oliver reported that x86_pmu_del() ended up doing an out-of-bound memory access
when group_sched_in() fails and needs to roll back.
This *should* be handled by the transaction callbacks, but he found that when
the group leader is a software event, the transaction handlers of the wrong PMU
are used. Despite the move_group case in perf_event_open() and group_sched_in()
using pmu_ctx->pmu.
Turns out, inherit uses event->pmu to clone the events, effectively undoing the
move_group case for all inherited contexts. Fix this by also making inherit use
pmu_ctx->pmu, ensuring all inherited counters end up in the same pmu context.
Similarly, __perf_event_read() should use equally use pmu_ctx->pmu for the
group case.
Fixes: bd27568117 ("perf: Rewrite core context handling")
Reported-by: Oliver Rosenberg <olrose55@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ian Rogers <irogers@google.com>
Link: https://patch.msgid.link/20260309133713.GB606826@noisy.programming.kicks-ass.net
This commit is contained in:
parent
f1cac6ac62
commit
4b9ce67196
|
|
@ -4813,7 +4813,7 @@ static void __perf_event_read(void *info)
|
||||||
struct perf_event *sub, *event = data->event;
|
struct perf_event *sub, *event = data->event;
|
||||||
struct perf_event_context *ctx = event->ctx;
|
struct perf_event_context *ctx = event->ctx;
|
||||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
|
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
|
||||||
struct pmu *pmu = event->pmu;
|
struct pmu *pmu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a task context, we need to check whether it is
|
* If this is a task context, we need to check whether it is
|
||||||
|
|
@ -4825,7 +4825,7 @@ static void __perf_event_read(void *info)
|
||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock(&ctx->lock);
|
guard(raw_spinlock)(&ctx->lock);
|
||||||
ctx_time_update_event(ctx, event);
|
ctx_time_update_event(ctx, event);
|
||||||
|
|
||||||
perf_event_update_time(event);
|
perf_event_update_time(event);
|
||||||
|
|
@ -4833,25 +4833,22 @@ static void __perf_event_read(void *info)
|
||||||
perf_event_update_sibling_time(event);
|
perf_event_update_sibling_time(event);
|
||||||
|
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||||
goto unlock;
|
return;
|
||||||
|
|
||||||
if (!data->group) {
|
if (!data->group) {
|
||||||
pmu->read(event);
|
perf_pmu_read(event);
|
||||||
data->ret = 0;
|
data->ret = 0;
|
||||||
goto unlock;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pmu = event->pmu_ctx->pmu;
|
||||||
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
|
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
|
||||||
|
|
||||||
pmu->read(event);
|
perf_pmu_read(event);
|
||||||
|
|
||||||
for_each_sibling_event(sub, event)
|
for_each_sibling_event(sub, event)
|
||||||
perf_pmu_read(sub);
|
perf_pmu_read(sub);
|
||||||
|
|
||||||
data->ret = pmu->commit_txn(pmu);
|
data->ret = pmu->commit_txn(pmu);
|
||||||
|
|
||||||
unlock:
|
|
||||||
raw_spin_unlock(&ctx->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 perf_event_count(struct perf_event *event, bool self)
|
static inline u64 perf_event_count(struct perf_event *event, bool self)
|
||||||
|
|
@ -14744,7 +14741,7 @@ inherit_event(struct perf_event *parent_event,
|
||||||
get_ctx(child_ctx);
|
get_ctx(child_ctx);
|
||||||
child_event->ctx = child_ctx;
|
child_event->ctx = child_ctx;
|
||||||
|
|
||||||
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
|
pmu_ctx = find_get_pmu_context(parent_event->pmu_ctx->pmu, child_ctx, child_event);
|
||||||
if (IS_ERR(pmu_ctx)) {
|
if (IS_ERR(pmu_ctx)) {
|
||||||
free_event(child_event);
|
free_event(child_event);
|
||||||
return ERR_CAST(pmu_ctx);
|
return ERR_CAST(pmu_ctx);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue