diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-11-21 18:25:11 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:03 -0700 |
| commit | 8e3aa6790ca4ff4744e30720e97c458375a35237 (patch) | |
| tree | b850a59db6aaa036957f304fb6ee6edae1c3a8c4 /drivers/base | |
| parent | 2365b0cbd64fe7a00ec2cfd3b7d8a20df640e095 (diff) | |
sched: Packing support until a frequency threshold
Add another dimension for task packing based on frequency. This patch
adds a per-cpu tunable, rq->mostly_idle_freq, which when set will
result in tasks being packed on a single cpu in cluster as long as
cluster frequency is less than set threshold.
Change-Id: I318e9af6c8788ddf5dfcda407d621449ea5343c0
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/cpu.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 763fd00c697b..a59fa57ef42e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -217,6 +217,42 @@ static ssize_t __ref store_sched_mostly_idle_load(struct device *dev, return err; } +static ssize_t show_sched_mostly_idle_freq(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + ssize_t rc; + int cpunum; + unsigned int mostly_idle_freq; + + cpunum = cpu->dev.id; + + mostly_idle_freq = sched_get_cpu_mostly_idle_freq(cpunum); + + rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_freq); + + return rc; +} + +static ssize_t __ref store_sched_mostly_idle_freq(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int cpuid = cpu->dev.id, err; + unsigned int mostly_idle_freq; + + err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_freq); + if (err) + return err; + + err = sched_set_cpu_mostly_idle_freq(cpuid, mostly_idle_freq); + if (err >= 0) + err = count; + + return err; +} + static ssize_t show_sched_mostly_idle_nr_run(struct device *dev, struct device_attribute *attr, char *buf) { @@ -253,6 +289,8 @@ static ssize_t __ref store_sched_mostly_idle_nr_run(struct device *dev, return err; } +static DEVICE_ATTR(sched_mostly_idle_freq, 0664, show_sched_mostly_idle_freq, + store_sched_mostly_idle_freq); static DEVICE_ATTR(sched_mostly_idle_load, 0664, show_sched_mostly_idle_load, store_sched_mostly_idle_load); static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664, @@ -261,6 +299,7 @@ static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664, static struct attribute *hmp_sched_cpu_attrs[] = { &dev_attr_sched_mostly_idle_load.attr, &dev_attr_sched_mostly_idle_nr_run.attr, + &dev_attr_sched_mostly_idle_freq.attr, NULL }; |
