summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2015-06-19 13:03:11 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:20 -0700
commit87fe20de7e7c54fd00a1acea8e84a70756bf9922 (patch)
tree86112001808745671d2edb249af6296f9f48fdc6 /include
parentd590f251533a39ac3f061dcf6e9e3b9cf8a89b4d (diff)
sched: Update the wakeup placement logic for fair and rt tasks
For the fair sched class, update the select_best_cpu() policy to do power based placement. The hope is to minimize the voltage at which the CPU runs. While RT tasks already do power based placement, their placement preference has to now take into account the power cost of all tasks on a given CPU. Also remove the check for sched_boost since sched_boost no longer intends to elevate all tasks to the highest capacity cluster. Change-Id: Ic6a7625c97d567254d93b94cec3174a91727cb87 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/trace/events/sched.h26
2 files changed, 11 insertions, 24 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ba49767ee09f..fa5ca5e90917 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2339,15 +2339,6 @@ extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
-extern int sched_set_cpu_prefer_idle(int cpu, int prefer_idle);
-extern int sched_get_cpu_prefer_idle(int cpu);
-extern int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct);
-extern int sched_get_cpu_mostly_idle_load(int cpu);
-extern int sched_set_cpu_mostly_idle_nr_run(int cpu, int nr_run);
-extern int sched_get_cpu_mostly_idle_nr_run(int cpu);
-extern int
-sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq);
-extern unsigned int sched_get_cpu_mostly_idle_freq(int cpu);
#else
static inline int sched_set_boost(int enable)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4da5f52da450..3b10396d61b6 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -116,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task,
TRACE_EVENT(sched_task_load,
TP_PROTO(struct task_struct *p, int boost, int reason,
- int sync, int prefer_idle),
+ int sync, int need_idle),
- TP_ARGS(p, boost, reason, sync, prefer_idle),
+ TP_ARGS(p, boost, reason, sync, need_idle),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -130,7 +130,7 @@ TRACE_EVENT(sched_task_load,
__field( int, boost )
__field( int, reason )
__field( int, sync )
- __field( int, prefer_idle )
+ __field( int, need_idle )
),
TP_fast_assign(
@@ -143,27 +143,24 @@ TRACE_EVENT(sched_task_load,
__entry->boost = boost;
__entry->reason = reason;
__entry->sync = sync;
- __entry->prefer_idle = prefer_idle;
+ __entry->need_idle = need_idle;
),
- TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d prefer_idle=%d",
+ TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d, need_idle=%d",
__entry->pid, __entry->comm, __entry->sum,
__entry->sum_scaled, __entry->period, __entry->demand,
- __entry->boost, __entry->reason, __entry->sync,
- __entry->prefer_idle)
+ __entry->boost, __entry->reason, __entry->sync, __entry->need_idle)
);
TRACE_EVENT(sched_cpu_load,
- TP_PROTO(struct rq *rq, int idle, int mostly_idle, u64 irqload,
- unsigned int power_cost, int temp),
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, mostly_idle, irqload, power_cost, temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp),
TP_STRUCT__entry(
__field(unsigned int, cpu )
__field(unsigned int, idle )
- __field(unsigned int, mostly_idle )
__field(unsigned int, nr_running )
__field(unsigned int, nr_big_tasks )
__field(unsigned int, load_scale_factor )
@@ -180,7 +177,6 @@ TRACE_EVENT(sched_cpu_load,
TP_fast_assign(
__entry->cpu = rq->cpu;
__entry->idle = idle;
- __entry->mostly_idle = mostly_idle;
__entry->nr_running = rq->nr_running;
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
__entry->load_scale_factor = rq->load_scale_factor;
@@ -194,9 +190,9 @@ TRACE_EVENT(sched_cpu_load,
__entry->temp = temp;
),
- TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
- __entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
- __entry->nr_big_tasks, __entry->load_scale_factor, __entry->capacity,
+ TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
+ __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
+ __entry->load_scale_factor, __entry->capacity,
__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
__entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
);