sched/fair: Fold the sched_avg update
Nine (and a half) instances of the same pattern is just silly, fold the lot. Notably, the half instance in enqueue_load_avg() is right after setting cfs_rq->avg.load_sum to cfs_rq->avg.load_avg * get_pelt_divider(&cfs_rq->avg). Since get_pelt_divisor() >= PELT_MIN_DIVIDER, this ends up being a no-op change. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://patch.msgid.link/20251127154725.413564507@infradead.org
This commit is contained in:
parent
38a68b982d
commit
089d84203a
|
|
@ -3693,7 +3693,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
*/
|
||||
#define add_positive(_ptr, _val) do { \
|
||||
typeof(_ptr) ptr = (_ptr); \
|
||||
typeof(_val) val = (_val); \
|
||||
__signed_scalar_typeof(*ptr) val = (_val); \
|
||||
typeof(*ptr) res, var = READ_ONCE(*ptr); \
|
||||
\
|
||||
res = var + val; \
|
||||
|
|
@ -3704,23 +3704,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
WRITE_ONCE(*ptr, res); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Unsigned subtract and clamp on underflow.
|
||||
*
|
||||
* Explicitly do a load-store to ensure the intermediate value never hits
|
||||
* memory. This allows lockless observations without ever seeing the negative
|
||||
* values.
|
||||
*/
|
||||
#define sub_positive(_ptr, _val) do { \
|
||||
typeof(_ptr) ptr = (_ptr); \
|
||||
typeof(*ptr) val = (_val); \
|
||||
typeof(*ptr) res, var = READ_ONCE(*ptr); \
|
||||
res = var - val; \
|
||||
if (res > var) \
|
||||
res = 0; \
|
||||
WRITE_ONCE(*ptr, res); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Remove and clamp on negative, from a local variable.
|
||||
*
|
||||
|
|
@ -3732,21 +3715,37 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
*ptr -= min_t(typeof(*ptr), *ptr, _val); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/*
|
||||
* Because of rounding, se->util_sum might ends up being +1 more than
|
||||
* cfs->util_sum. Although this is not a problem by itself, detaching
|
||||
* a lot of tasks with the rounding problem between 2 updates of
|
||||
* util_avg (~1ms) can make cfs->util_sum becoming null whereas
|
||||
* cfs_util_avg is not.
|
||||
*
|
||||
* Check that util_sum is still above its lower bound for the new
|
||||
* util_avg. Given that period_contrib might have moved since the last
|
||||
* sync, we are only sure that util_sum must be above or equal to
|
||||
* util_avg * minimum possible divider
|
||||
*/
|
||||
#define __update_sa(sa, name, delta_avg, delta_sum) do { \
|
||||
add_positive(&(sa)->name##_avg, delta_avg); \
|
||||
add_positive(&(sa)->name##_sum, delta_sum); \
|
||||
(sa)->name##_sum = max_t(typeof((sa)->name##_sum), \
|
||||
(sa)->name##_sum, \
|
||||
(sa)->name##_avg * PELT_MIN_DIVIDER); \
|
||||
} while (0)
|
||||
|
||||
static inline void
|
||||
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
cfs_rq->avg.load_avg += se->avg.load_avg;
|
||||
cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
|
||||
__update_sa(&cfs_rq->avg, load, se->avg.load_avg, se->avg.load_sum);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
|
||||
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
|
||||
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(&cfs_rq->avg, load, -se->avg.load_avg, -se->avg.load_sum);
|
||||
}
|
||||
|
||||
static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
|
||||
|
|
@ -4242,7 +4241,6 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
|||
*/
|
||||
divider = get_pelt_divider(&cfs_rq->avg);
|
||||
|
||||
|
||||
/* Set new sched_entity's utilization */
|
||||
se->avg.util_avg = gcfs_rq->avg.util_avg;
|
||||
new_sum = se->avg.util_avg * divider;
|
||||
|
|
@ -4250,12 +4248,7 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
|||
se->avg.util_sum = new_sum;
|
||||
|
||||
/* Update parent cfs_rq utilization */
|
||||
add_positive(&cfs_rq->avg.util_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.util_sum, delta_sum);
|
||||
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
|
||||
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(&cfs_rq->avg, util, delta_avg, delta_sum);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
@ -4281,11 +4274,7 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
|||
se->avg.runnable_sum = new_sum;
|
||||
|
||||
/* Update parent cfs_rq runnable */
|
||||
add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
|
||||
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(&cfs_rq->avg, runnable, delta_avg, delta_sum);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
@ -4349,11 +4338,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
|||
|
||||
se->avg.load_sum = runnable_sum;
|
||||
se->avg.load_avg = load_avg;
|
||||
add_positive(&cfs_rq->avg.load_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.load_sum, delta_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
|
||||
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(&cfs_rq->avg, load, delta_avg, delta_sum);
|
||||
}
|
||||
|
||||
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
||||
|
|
@ -4552,33 +4537,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|||
raw_spin_unlock(&cfs_rq->removed.lock);
|
||||
|
||||
r = removed_load;
|
||||
sub_positive(&sa->load_avg, r);
|
||||
sub_positive(&sa->load_sum, r * divider);
|
||||
/* See sa->util_sum below */
|
||||
sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(sa, load, -r, -r*divider);
|
||||
|
||||
r = removed_util;
|
||||
sub_positive(&sa->util_avg, r);
|
||||
sub_positive(&sa->util_sum, r * divider);
|
||||
/*
|
||||
* Because of rounding, se->util_sum might ends up being +1 more than
|
||||
* cfs->util_sum. Although this is not a problem by itself, detaching
|
||||
* a lot of tasks with the rounding problem between 2 updates of
|
||||
* util_avg (~1ms) can make cfs->util_sum becoming null whereas
|
||||
* cfs_util_avg is not.
|
||||
* Check that util_sum is still above its lower bound for the new
|
||||
* util_avg. Given that period_contrib might have moved since the last
|
||||
* sync, we are only sure that util_sum must be above or equal to
|
||||
* util_avg * minimum possible divider
|
||||
*/
|
||||
sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(sa, util, -r, -r*divider);
|
||||
|
||||
r = removed_runnable;
|
||||
sub_positive(&sa->runnable_avg, r);
|
||||
sub_positive(&sa->runnable_sum, r * divider);
|
||||
/* See sa->util_sum above */
|
||||
sa->runnable_sum = max_t(u32, sa->runnable_sum,
|
||||
sa->runnable_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(sa, runnable, -r, -r*divider);
|
||||
|
||||
/*
|
||||
* removed_runnable is the unweighted version of removed_load so we
|
||||
|
|
@ -4663,17 +4628,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
|
||||
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
|
||||
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
|
||||
|
||||
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
|
||||
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
|
||||
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
|
||||
__update_sa(&cfs_rq->avg, util, -se->avg.util_avg, -se->avg.util_sum);
|
||||
__update_sa(&cfs_rq->avg, runnable, -se->avg.runnable_avg, -se->avg.runnable_sum);
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue