diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 1 | ||||
| -rw-r--r-- | kernel/sched/hmp.c | 43 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 3 | ||||
| -rw-r--r-- | kernel/sysctl.c | 7 |
4 files changed, 54 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a790d101d120..94bd3cffa5ac 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2540,6 +2540,7 @@ void wake_up_new_task(struct task_struct *p) raw_spin_lock_irqsave(&p->pi_lock, flags); init_new_task_load(p); + add_new_task_to_grp(p); /* Initialize new task's runnable average */ init_entity_runnable_average(&p->se); #ifdef CONFIG_SMP diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 162d4a0c950c..898da9b83a72 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -696,6 +696,13 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); unsigned int __read_mostly sysctl_sched_enable_colocation = 1; +/* + * Enable colocation and frequency aggregation for all threads in a process. + * The children inherits the group id from the parent. + */ +unsigned int __read_mostly sysctl_sched_enable_thread_grouping; + + __read_mostly unsigned int sysctl_sched_new_task_windows = 5; #define SCHED_FREQ_ACCOUNT_WAIT_TIME 0 @@ -3562,6 +3569,42 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp) return 0; } +void add_new_task_to_grp(struct task_struct *new) +{ + unsigned long flags; + struct related_thread_group *grp; + struct task_struct *parent; + + if (!sysctl_sched_enable_thread_grouping) + return; + + if (thread_group_leader(new)) + return; + + parent = new->group_leader; + + /* + * The parent's pi_lock is required here to protect race + * against the parent task being removed from the + * group. + */ + raw_spin_lock_irqsave(&parent->pi_lock, flags); + + /* protected by pi_lock. */ + grp = task_related_thread_group(parent); + if (!grp) { + raw_spin_unlock_irqrestore(&parent->pi_lock, flags); + return; + } + raw_spin_lock(&grp->lock); + + rcu_assign_pointer(new->grp, grp); + list_add(&new->grp_list, &grp->tasks); + + raw_spin_unlock(&grp->lock); + raw_spin_unlock_irqrestore(&parent->pi_lock, flags); +} + int sched_set_group_id(struct task_struct *p, unsigned int group_id) { int rc = 0, destroy = 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b09d3a1a026f..cdfccdeb4eea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1100,6 +1100,7 @@ extern void update_up_down_migrate(void); extern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load); extern void set_preferred_cluster(struct related_thread_group *grp); +extern void add_new_task_to_grp(struct task_struct *new); enum sched_boost_type { SCHED_BOOST_NONE, @@ -1575,6 +1576,8 @@ static inline int update_preferred_cluster(struct related_thread_group *grp, return 0; } +static inline void add_new_task_to_grp(struct task_struct *new) {} + #define sched_enable_hmp 0 #define sched_freq_legacy_mode 1 #define sched_migration_fixup 0 diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8b9ca50dc53f..ac34212f6881 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -406,6 +406,13 @@ static struct ctl_table kern_table[] = { .proc_handler = sched_hmp_proc_update_handler, }, { + .procname = "sched_enable_thread_grouping", + .data = &sysctl_sched_enable_thread_grouping, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "sched_new_task_windows", .data = &sysctl_sched_new_task_windows, .maxlen = sizeof(unsigned int), |
