summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/sched-hmp.txt11
-rw-r--r--fs/proc/base.c74
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched/fair.c22
4 files changed, 105 insertions, 5 deletions
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 354905acb103..245f077eeafb 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -825,7 +825,16 @@ power-efficient cpu found while scanning cluster's online cpus.
- PF_WAKE_UP_IDLE
Any task that has this flag set in its 'task_struct.flags' field will be
always woken to idle cpu. Further any task woken by such tasks will be also
-placed on idle cpu.
+placed on idle cpu. PF_WAKE_UP_IDLE flag is inherited by children of a task.
+It can be modified for a task in two ways:
+
+ > kernel-space interface
+ set_wake_up_idle() needs to be called in the context of a task
+ to set or clear its PF_WAKE_UP_IDLE flag.
+
+ > user-space interface
+ /proc/[pid]/sched_wake_up_idle file needs to be written to for
+ setting or clearing PF_WAKE_UP_IDLE flag for a given task
For some low band of frequency, spread of task on all available cpus can be
groslly power-inefficient. As an example, consider two tasks that each need
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7737df006b0d..faee999d19ff 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1408,11 +1408,78 @@ static const struct file_operations proc_pid_sched_operations = {
#endif
-#ifdef CONFIG_SCHED_HMP
-
/*
* Print out various scheduling related per-task fields:
*/
+
+#ifdef CONFIG_SMP
+
+static int sched_wake_up_idle_show(struct seq_file *m, void *v)
+{
+ struct inode *inode = m->private;
+ struct task_struct *p;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ seq_printf(m, "%d\n", sched_get_wake_up_idle(p));
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static ssize_t
+sched_wake_up_idle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct inode *inode = file_inode(file);
+ struct task_struct *p;
+ char buffer[PROC_NUMBUF];
+ int wake_up_idle, err;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
+ if (err)
+ goto out;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ err = sched_set_wake_up_idle(p, wake_up_idle);
+
+ put_task_struct(p);
+
+out:
+ return err < 0 ? err : count;
+}
+
+static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_wake_up_idle_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
+ .open = sched_wake_up_idle_open,
+ .read = seq_read,
+ .write = sched_wake_up_idle_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_SCHED_HMP
+
static int sched_init_task_load_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
@@ -2811,6 +2878,9 @@ static const struct pid_entry tgid_base_stuff[] = {
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
ONE("limits", S_IRUGO, proc_pid_limits),
+#ifdef CONFIG_SMP
+ REG("sched_wake_up_idle", S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
+#endif
#ifdef CONFIG_SCHED_HMP
REG("sched_init_task_load", S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3be9ca55987a..0a8ff7c97d54 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2330,6 +2330,9 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
}
#endif
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+
#ifdef CONFIG_SCHED_HMP
extern int sched_set_boost(int enable);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cffeceec285c..4da8b618232e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -83,7 +83,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
* Controls whether, when SD_SHARE_PKG_RESOURCES is on, if all
* tasks go to idle CPUs when woken. If this is off, note that the
- * per-task flag PF_WAKE_ON_IDLE can still cause a task to go to an
+ * per-task flag PF_WAKE_UP_IDLE can still cause a task to go to an
* idle CPU upon being woken.
*/
unsigned int __read_mostly sysctl_sched_wake_to_idle;
@@ -2453,7 +2453,25 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_SMP
-/* Precomputed fixed inverse multiplies for multiplication by y^n */
+u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+ u32 enabled = p->flags & PF_WAKE_UP_IDLE;
+
+ return !!enabled;
+}
+
+int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
+{
+ int enable = !!wake_up_idle;
+
+ if (enable)
+ p->flags |= PF_WAKE_UP_IDLE;
+ else
+ p->flags &= ~PF_WAKE_UP_IDLE;
+
+ return 0;
+}
+
static const u32 runnable_avg_yN_inv[] = {
0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,