summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-09-15 09:35:53 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:29 -0700
commit446beddcd4aeb4246c6a1f391063138bd4e899ee (patch)
tree7e6e81abd773faa01ffe2d25d104437504ef4f3d /include
parent809ea3fd1e70d8f572249d90530b7c956b70f8d8 (diff)
sched: account new task load so that governor can apply different policy
Account amount of load contributed by new tasks within CPU load so that governor can apply different policy when CPU is loaded by new tasks. To be able to distinguish new task load a new tunable sched_new_task_windows also introduced. The tunable defines tasks as new when the tasks are have been active less than configured windows. Change-Id: I2e2e62e4103882f7362154b792ab978b181b9f59 Suggested-by: Saravana Kannan <skannan@codeaurora.org> [joonwoop@codeaurora.org: ommited changes for drivers/cpufreq/cpufreq_interactive.c. cpufreq changes needs to be applied separately later. fixed conflict in include/linux/sched.h and include/linux/sched/sysctl.h. omitted changes for qhmp_core.c] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/sysctl.h3
-rw-r--r--include/trace/events/sched.h31
3 files changed, 35 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77d3b4c106cd..fc0f5db45791 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1289,6 +1289,7 @@ struct ravg {
u32 sum_history[RAVG_HIST_SIZE_MAX];
#ifdef CONFIG_SCHED_FREQ_INPUT
u32 curr_window, prev_window;
+ u16 active_windows;
#endif
};
@@ -2125,10 +2126,15 @@ static inline cputime_t task_gtime(struct task_struct *t)
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+struct sched_load {
+ unsigned long prev_load;
+ unsigned long new_task_load;
+};
+
#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(unsigned long *busy,
+extern void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
int sched_update_freq_max_load(const cpumask_t *cpumask);
@@ -2141,6 +2147,8 @@ static inline unsigned long sched_get_busy(int cpu)
{
return 0;
}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus) {};
static inline void sched_set_io_is_busy(int val) {};
static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 5db0256590c6..0fb660306a9f 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -69,6 +69,9 @@ extern unsigned int sysctl_sched_powerband_limit_pct;
extern unsigned int sysctl_sched_lowspill_freq;
extern unsigned int sysctl_sched_pack_freq;
extern unsigned int sysctl_sched_boost;
+#if defined(CONFIG_SCHED_FREQ_INPUT)
+extern unsigned int sysctl_sched_new_task_windows;
+#endif
#else /* CONFIG_SCHED_HMP */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 69aede209948..ae04e2095389 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -244,6 +244,9 @@ TRACE_EVENT(sched_update_task_ravg,
__field( u64, ps )
__field( u32, curr_window )
__field( u32, prev_window )
+ __field( u64, nt_cs )
+ __field( u64, nt_ps )
+ __field( u32, active_windows )
#endif
),
@@ -267,12 +270,15 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->ps = rq->prev_runnable_sum;
__entry->curr_window = p->ravg.curr_window;
__entry->prev_window = p->ravg.prev_window;
+ __entry->nt_cs = rq->nt_curr_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->active_windows = p->ravg.active_windows;
#endif
),
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
#ifdef CONFIG_SCHED_FREQ_INPUT
- " cs %llu ps %llu cur_window %u prev_window %u"
+ " cs %llu ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u"
#endif
, __entry->wallclock, __entry->win_start, __entry->delta,
task_event_names[__entry->evt], __entry->cpu,
@@ -282,7 +288,9 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->sum, __entry->irqtime
#ifdef CONFIG_SCHED_FREQ_INPUT
, __entry->cs, __entry->ps, __entry->curr_window,
- __entry->prev_window
+ __entry->prev_window,
+ __entry->nt_cs, __entry->nt_ps,
+ __entry->active_windows
#endif
)
);
@@ -374,37 +382,44 @@ TRACE_EVENT(sched_migration_update_sum,
__field(int, pid )
__field( u64, cs )
__field( u64, ps )
+ __field( s64, nt_cs )
+ __field( s64, nt_ps )
),
TP_fast_assign(
__entry->cpu = cpu_of(rq);
__entry->cs = rq->curr_runnable_sum;
__entry->ps = rq->prev_runnable_sum;
+ __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
+ __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
__entry->pid = p->pid;
),
- TP_printk("cpu %d: cs %llu ps %llu pid %d", __entry->cpu,
- __entry->cs, __entry->ps, __entry->pid)
+ TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+ __entry->cpu, __entry->cs, __entry->ps,
+ __entry->nt_cs, __entry->nt_ps, __entry->pid)
);
TRACE_EVENT(sched_get_busy,
- TP_PROTO(int cpu, u64 load),
+ TP_PROTO(int cpu, u64 load, u64 nload),
- TP_ARGS(cpu, load),
+ TP_ARGS(cpu, load, nload),
TP_STRUCT__entry(
__field( int, cpu )
__field( u64, load )
+ __field( u64, nload )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->load = load;
+ __entry->nload = nload;
),
- TP_printk("cpu %d load %lld",
- __entry->cpu, __entry->load)
+ TP_printk("cpu %d load %lld new_task_load %lld",
+ __entry->cpu, __entry->load, __entry->nload)
);
TRACE_EVENT(sched_freq_alert,