summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-08-26 22:22:38 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2016-08-26 22:22:38 -0700
commit1b7819036eaa2ba0c93bcc65fbbc1e72fa9b824a (patch)
treea08a77e7e4a5c5bd332a7a98637516dd65b217d3 /include/linux
parent373c6ba99fddd8071aa8eebd889c73426760fa3c (diff)
parent2552980f79e476b99d9f489c265a836dd61a2102 (diff)
Merge "sched: handle frequency alert notifications better"
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h120
-rw-r--r--include/linux/sched/sysctl.h18
2 files changed, 67 insertions, 71 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 06dd540192c7..74b2a11b1d1c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1232,9 +1232,6 @@ struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
unsigned long load_avg, util_avg;
-#ifdef CONFIG_SCHED_HMP
- u32 runnable_avg_sum_scaled;
-#endif
};
#ifdef CONFIG_SCHEDSTATS
@@ -1308,12 +1305,10 @@ struct ravg {
u64 mark_start;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
-#ifdef CONFIG_SCHED_FREQ_INPUT
u32 curr_window, prev_window;
u16 active_windows;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
-#endif
};
struct sched_entity {
@@ -2155,32 +2150,6 @@ static inline cputime_t task_gtime(struct task_struct *t)
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
-struct sched_load {
- unsigned long prev_load;
- unsigned long new_task_load;
- unsigned long predicted_load;
-};
-
-#if defined(CONFIG_SCHED_FREQ_INPUT)
-extern int sched_set_window(u64 window_start, unsigned int window_size);
-extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus);
-extern void sched_set_io_is_busy(int val);
-#else
-static inline int sched_set_window(u64 window_start, unsigned int window_size)
-{
- return -EINVAL;
-}
-static inline unsigned long sched_get_busy(int cpu)
-{
- return 0;
-}
-static inline void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus) {};
-static inline void sched_set_io_is_busy(int val) {};
-#endif
-
/*
* Per process flags
*/
@@ -2349,10 +2318,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
-extern void sched_set_cpu_cstate(int cpu, int cstate,
- int wakeup_energy, int wakeup_latency);
-extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2365,24 +2330,27 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
-static inline void
-sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
-{
-}
-
-static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
- int dstate, int wakeup_energy, int wakeup_latency)
-{
-}
#endif
+struct sched_load {
+ unsigned long prev_load;
+ unsigned long new_task_load;
+ unsigned long predicted_load;
+};
+
extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
extern u32 sched_get_wake_up_idle(struct task_struct *p);
-extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
-extern unsigned int sched_get_group_id(struct task_struct *p);
-#ifdef CONFIG_SCHED_HMP
+struct cpu_cycle_counter_cb {
+ u64 (*get_cpu_cycle_counter)(int cpu);
+};
+#ifdef CONFIG_SCHED_HMP
+extern int sched_set_window(u64 window_start, unsigned int window_size);
+extern unsigned long sched_get_busy(int cpu);
+extern void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus);
+extern void sched_set_io_is_busy(int val);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
@@ -2391,9 +2359,42 @@ extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
extern int sched_update_freq_max_load(const cpumask_t *cpumask);
-extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32
- fmax);
-#else
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax);
+extern void sched_set_cpu_cstate(int cpu, int cstate,
+ int wakeup_energy, int wakeup_latency);
+extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+ int wakeup_energy, int wakeup_latency);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern u64 sched_ktime_clock(void);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+
+#else /* CONFIG_SCHED_HMP */
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ return 0;
+}
+
+static inline int sched_set_window(u64 window_start, unsigned int window_size)
+{
+ return -EINVAL;
+}
+static inline unsigned long sched_get_busy(int cpu)
+{
+ return 0;
+}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus) {};
+
+static inline void sched_set_io_is_busy(int val) {};
+
static inline int sched_set_boost(int enable)
{
return -EINVAL;
@@ -2406,7 +2407,17 @@ static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }
-#endif
+
+static inline void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+}
+
+static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
+ int dstate, int wakeup_energy, int wakeup_latency)
+{
+}
+#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
@@ -2441,8 +2452,6 @@ extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
-extern u64 sched_ktime_clock(void);
-
extern void sched_clock_init(void);
extern int sched_clock_initialized(void);
@@ -3379,9 +3388,4 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
-struct cpu_cycle_counter_cb {
- u64 (*get_cpu_cycle_counter)(int cpu);
-};
-int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3e5fd5619367..68a9bdde6604 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,21 +40,14 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_wake_to_idle;
-extern unsigned int sysctl_sched_wakeup_load_threshold;
+
+#ifdef CONFIG_SCHED_HMP
+extern int sysctl_sched_freq_inc_notify;
+extern int sysctl_sched_freq_dec_notify;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_cpu_high_irqload;
-
-#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sysctl_sched_init_task_load_pct;
-#endif
-
-#ifdef CONFIG_SCHED_FREQ_INPUT
-extern int sysctl_sched_freq_inc_notify;
-extern int sysctl_sched_freq_dec_notify;
-#endif
-
-#ifdef CONFIG_SCHED_HMP
extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
@@ -66,11 +59,10 @@ extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_enable_colocation;
extern unsigned int sysctl_sched_restrict_cluster_spill;
-#if defined(CONFIG_SCHED_FREQ_INPUT)
extern unsigned int sysctl_sched_new_task_windows;
extern unsigned int sysctl_sched_pred_alert_freq;
extern unsigned int sysctl_sched_freq_aggregate;
-#endif
+extern unsigned int sysctl_sched_enable_thread_grouping;
#else /* CONFIG_SCHED_HMP */