summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h69
1 files changed, 61 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 07a3cd3c6fbc..fa4d0ab014b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -33,8 +33,10 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*
@@ -226,9 +228,8 @@ struct cfs_bandwidth {
ktime_t period;
u64 quota, runtime;
s64 hierarchical_quota;
- u64 runtime_expires;
- int idle, period_active;
+ short idle, period_active;
struct hrtimer period_timer, slack_timer;
struct list_head throttled_cfs_rq;
@@ -430,7 +431,6 @@ struct related_thread_group {
};
extern struct list_head cluster_head;
-extern int num_clusters;
extern struct sched_cluster *sched_cluster[NR_CPUS];
struct cpu_cycle {
@@ -441,6 +441,7 @@ struct cpu_cycle {
#define for_each_sched_cluster(cluster) \
list_for_each_entry_rcu(cluster, &cluster_head, list)
+extern unsigned int sched_disable_window_stats;
#endif /* CONFIG_SCHED_HMP */
/* CFS-related fields in a runqueue */
@@ -511,6 +512,10 @@ struct cfs_rq {
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
+#ifdef CONFIG_SCHED_WALT
+ u64 cumulative_runnable_avg;
+#endif
+
#ifdef CONFIG_CFS_BANDWIDTH
#ifdef CONFIG_SCHED_HMP
@@ -518,7 +523,6 @@ struct cfs_rq {
#endif
int runtime_enabled;
- u64 runtime_expires;
s64 runtime_remaining;
u64 throttled_clock, throttled_clock_task;
@@ -793,6 +797,7 @@ struct rq {
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
+ u64 load_reported_window;
unsigned long hmp_flags;
u64 cur_irqload;
@@ -818,6 +823,7 @@ struct rq {
#endif
#ifdef CONFIG_SCHED_WALT
+ unsigned int cur_freq;
u64 cumulative_runnable_avg;
u64 window_start;
u64 curr_runnable_sum;
@@ -1466,7 +1472,6 @@ static inline bool is_short_burst_task(struct task_struct *p)
p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
}
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
@@ -1726,7 +1731,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return 1;
}
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_task_count_change(void) { }
static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }
@@ -1853,7 +1857,7 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
-#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
extern struct static_key_false sched_numa_balancing;
@@ -2026,6 +2030,7 @@ static const u32 prio_to_wmult[40] = {
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
+#define DEQUEUE_IDLE 0x80 /* The last dequeue before IDLE */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
@@ -2352,6 +2357,26 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
}
#endif
+#ifndef arch_scale_max_freq_capacity
+static __always_inline
+unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
+#ifndef arch_scale_min_freq_capacity
+static __always_inline
+unsigned long arch_scale_min_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ /*
+ * Multiplied with any capacity value, this scale factor will return
+ * 0, which represents an un-capped state
+ */
+ return 0;
+}
+#endif
+
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -2378,6 +2403,19 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_ravg_window;
extern bool walt_disabled;
+static inline unsigned long task_util(struct task_struct *p)
+{
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ unsigned long demand = p->ravg.demand;
+ return (demand << 10) / walt_ravg_window;
+ }
+#endif
+ return p->se.avg.util_avg;
+}
+
+
/*
* cpu_util returns the amount of capacity of a CPU that is used by CFS
* tasks. The unit of the return value must be the one of capacity so we can
@@ -2852,7 +2890,22 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
struct update_util_data *data;
- data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+#ifdef CONFIG_SCHED_HMP
+ /*
+ * Skip if we've already reported, but not if this is an inter-cluster
+ * migration. Also only allow WALT update sites.
+ */
+ if (!(flags & SCHED_CPUFREQ_WALT))
+ return;
+ if (!sched_disable_window_stats &&
+ (rq->load_reported_window == rq->window_start) &&
+ !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+ return;
+ rq->load_reported_window = rq->window_start;
+#endif
+
+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+ cpu_of(rq)));
if (data)
data->func(data, rq_clock(rq), flags);
}