diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-20 09:44:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-20 09:44:52 -0700 |
commit | cba5e97280f53ec7feb656fcdf0ec00a5c6dd539 (patch) | |
tree | 1d8e115d75a97fdae7b6fa9e4f439c0edc1222f8 | |
parent | 9df7f15ee922429dcd6bcde68559ccb23f2dbd20 (diff) | |
parent | a7b359fc6a37faaf472125867c8dc5a068c90982 (diff) |
Merge tag 'sched_urgent_for_v5.13_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fix from Borislav Petkov:
"A single fix to restore fairness between control groups with equal
priority"
* tag 'sched_urgent_for_v5.13_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Correctly insert cfs_rq's to list on unthrottle
-rw-r--r-- | kernel/sched/fair.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2c8a9352590d..bfaa6e1f6067 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3298,6 +3298,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) #ifdef CONFIG_SMP #ifdef CONFIG_FAIR_GROUP_SCHED + +static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) +{ + if (cfs_rq->load.weight) + return false; + + if (cfs_rq->avg.load_sum) + return false; + + if (cfs_rq->avg.util_sum) + return false; + + if (cfs_rq->avg.runnable_sum) + return false; + + return true; +} + /** * update_tg_load_avg - update the tg's load avg * @cfs_rq: the cfs_rq whose avg changed @@ -4091,6 +4109,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) #else /* CONFIG_SMP */ +static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) +{ + return true; +} + #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 #define DO_ATTACH 0x0 @@ -4749,8 +4772,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - cfs_rq->throttled_clock_task; - /* Add cfs_rq with already running entity in the list */ - if (cfs_rq->nr_running >= 1) + /* Add cfs_rq with load or one or more already running entities to the list */ + if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) list_add_leaf_cfs_rq(cfs_rq); } @@ -7996,23 +8019,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done) #ifdef CONFIG_FAIR_GROUP_SCHED -static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) -{ - if (cfs_rq->load.weight) - return false; - - if (cfs_rq->avg.load_sum) - return false; - - if (cfs_rq->avg.util_sum) - return false; - - if (cfs_rq->avg.runnable_sum) - return false; - - return true; -} - static bool __update_blocked_fair(struct rq *rq, bool *done) { struct cfs_rq *cfs_rq, *pos; |