summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h31
1 files changed, 27 insertions, 4 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 27b28369440d..471dc9faab35 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -351,13 +351,23 @@ struct cfs_bandwidth { };
#ifdef CONFIG_SCHED_HMP
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
struct hmp_sched_stats {
int nr_big_tasks;
u64 cumulative_runnable_avg;
u64 pred_demands_sum;
};
+struct load_subtractions {
+ u64 window_start;
+ u64 subs;
+ u64 new_subs;
+};
+
struct sched_cluster {
+ raw_spinlock_t load_lock;
struct list_head list;
struct cpumask cpus;
int id;
@@ -742,6 +752,13 @@ struct rq {
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
+ DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
+ NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
+ u8 *top_tasks[NUM_TRACKED_WINDOWS];
+ u8 curr_table;
+ int prev_top;
+ int curr_top;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1017,6 +1034,10 @@ static inline void sched_ttwu_pending(void) { }
#define WINDOW_STATS_AVG 3
#define WINDOW_STATS_INVALID_POLICY 4
+#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
+#define FREQ_REPORT_CPU_LOAD 1
+#define FREQ_REPORT_TOP_TASK 2
+
#define MAJOR_TASK_PCT 85
#define SCHED_UPMIGRATE_MIN_NICE 15
#define EXITING_TASK_MARKER 0xdeaddead
@@ -1056,8 +1077,9 @@ extern unsigned int __read_mostly sched_spill_load;
extern unsigned int __read_mostly sched_upmigrate;
extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
+extern unsigned int __read_mostly sched_load_granule;
-extern void init_new_task_load(struct task_struct *p);
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
extern int got_boost_kick(void);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
@@ -1401,6 +1423,7 @@ extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 upmigrate_discourage);
extern void sched_hmp_parse_dt(void);
extern void init_sched_hmp_boost_policy(void);
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#else /* CONFIG_SCHED_HMP */
@@ -1503,7 +1526,9 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
return NULL;
}
-static inline void init_new_task_load(struct task_struct *p) { }
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
@@ -1570,8 +1595,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void add_new_task_to_grp(struct task_struct *new) {}
#define sched_enable_hmp 0
-#define sched_freq_legacy_mode 1
-#define sched_migration_fixup 0
#define PRED_DEMAND_DELTA (0)
static inline void