diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 5 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 10 |
3 files changed, 13 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8d7c13af8c8e..f217924c10f2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4263,7 +4263,7 @@ static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead, bool check_groups; rcu_read_lock(); - check_groups = rcu_access_pointer(p->grp) != NULL; + check_groups = task_in_related_thread_group(p); rcu_read_unlock(); if (!same_freq_domain(src_cpu, dest_cpu)) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0dba600dd28f..a4f3af6fc175 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4182,8 +4182,7 @@ unsigned int cpu_temp(int cpu) struct cpu_select_env; struct sched_cluster; -static inline int task_will_fit(struct task_struct *p, int cpu, - enum sched_boost_type boost_type) +static inline int task_will_fit(struct task_struct *p, int cpu) { return 1; } @@ -7700,7 +7699,7 @@ static void detach_task(struct task_struct *p, struct lb_env *env) deactivate_task(env->src_rq, p, 0); double_lock_balance(env->src_rq, env->dst_rq); set_task_cpu(p, env->dst_cpu); - if (rcu_access_pointer(p->grp)) + if (task_in_related_thread_group(p)) env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK; double_unlock_balance(env->src_rq, env->dst_rq); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 414c46d20f41..af1d33ad82e6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1246,6 +1246,11 @@ static inline int sched_cpu_high_irqload(int cpu) return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload; } +static inline bool task_in_related_thread_group(struct task_struct *p) +{ + return !!(rcu_access_pointer(p->grp) != NULL); +} + static inline struct related_thread_group *task_related_thread_group(struct task_struct *p) { @@ -1381,6 +1386,11 @@ static inline int sched_cpu_high_irqload(int cpu) { return 0; } static inline void set_preferred_cluster(struct related_thread_group *grp) { } +static inline bool task_in_related_thread_group(struct task_struct *p) +{ + return false; +} + static inline struct related_thread_group *task_related_thread_group(struct task_struct *p) { |
