summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h251
1 files changed, 247 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 145c34cb106e..b1351226b102 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -174,6 +174,9 @@ extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+extern void sched_update_nr_prod(int cpu, long delta, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
@@ -314,6 +317,25 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16
+extern const char *sched_window_reset_reasons[];
+
+enum task_event {
+ PUT_PREV_TASK = 0,
+ PICK_NEXT_TASK = 1,
+ TASK_WAKE = 2,
+ TASK_MIGRATE = 3,
+ TASK_UPDATE = 4,
+ IRQ_UPDATE = 5,
+};
+
+/* Note: this need to be in sync with migrate_type_names array */
+enum migrate_types {
+ GROUP_TO_RQ,
+ RQ_TO_GROUP,
+ RQ_TO_RQ,
+ GROUP_TO_GROUP,
+};
+
#include <linux/spinlock.h>
/*
@@ -334,13 +356,48 @@ extern int lockdep_tasklist_lock_is_held(void);
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
+extern int sched_isolate_cpu(int cpu);
+extern int sched_unisolate_cpu(int cpu);
+extern int sched_unisolate_cpu_unlocked(int cpu);
+#else
+static inline int sched_isolate_count(const cpumask_t *mask,
+ bool include_offline)
+{
+ cpumask_t count_mask;
+
+ if (include_offline)
+ cpumask_andnot(&count_mask, mask, cpu_online_mask);
+ else
+ return 0;
+
+ return cpumask_weight(&count_mask);
+}
+
+static inline int sched_isolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu_unlocked(int cpu)
+{
+ return 0;
+}
+#endif
+
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -377,6 +434,7 @@ extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR
+extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
@@ -386,7 +444,12 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
+extern void watchdog_enable(unsigned int cpu);
+extern void watchdog_disable(unsigned int cpu);
#else
+static inline void touch_softlockup_watchdog_sched(void)
+{
+}
static inline void touch_softlockup_watchdog(void)
{
}
@@ -399,6 +462,12 @@ static inline void touch_all_softlockup_watchdogs(void)
static inline void lockup_detector_init(void)
{
}
+static inline void watchdog_enable(unsigned int cpu)
+{
+}
+static inline void watchdog_disable(unsigned int cpu)
+{
+}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -1243,6 +1312,52 @@ struct sched_statistics {
};
#endif
+#define RAVG_HIST_SIZE_MAX 5
+#define NUM_BUSY_BUCKETS 10
+
+/* ravg represents frequency scaled cpu-demand of tasks */
+struct ravg {
+ /*
+ * 'mark_start' marks the beginning of an event (task waking up, task
+ * starting to execute, task being preempted) within a window
+ *
+ * 'sum' represents how runnable a task has been within current
+ * window. It incorporates both running time and wait time and is
+ * frequency scaled.
+ *
+ * 'sum_history' keeps track of history of 'sum' seen over previous
+ * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
+ * ignored.
+ *
+ * 'demand' represents maximum sum seen over previous
+ * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
+ * demand for tasks.
+ *
+ * 'curr_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the current window
+ *
+ * 'prev_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the previous window
+ *
+ * 'curr_window' represents the sum of all entries in curr_window_cpu
+ *
+ * 'prev_window' represents the sum of all entries in prev_window_cpu
+ *
+ * 'pred_demand' represents task's current predicted cpu busy time
+ *
+ * 'busy_buckets' groups historical busy time into different buckets
+ * used for prediction
+ */
+ u64 mark_start;
+ u32 sum, demand;
+ u32 sum_history[RAVG_HIST_SIZE_MAX];
+ u32 *curr_window_cpu, *prev_window_cpu;
+ u32 curr_window, prev_window;
+ u16 active_windows;
+ u32 pred_demand;
+ u8 busy_buckets[NUM_BUSY_BUCKETS];
+};
+
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
@@ -1400,6 +1515,20 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_HMP
+ struct ravg ravg;
+ /*
+ * 'init_load_pct' represents the initial task load assigned to children
+ * of this task
+ */
+ u32 init_load_pct;
+ u64 last_wake_ts;
+ u64 last_switch_out_ts;
+ u64 last_cpu_selected_ts;
+ struct related_thread_group *grp;
+ struct list_head grp_list;
+ u64 cpu_cycles;
+#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -1632,6 +1761,9 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_UBSAN
+ unsigned int in_ubsan;
+#endif
/* journalling filesystem info */
void *journal_info;
@@ -2068,6 +2200,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
+#define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2246,6 +2379,93 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
}
#endif
+struct sched_load {
+ unsigned long prev_load;
+ unsigned long new_task_load;
+ unsigned long predicted_load;
+};
+
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+
+struct cpu_cycle_counter_cb {
+ u64 (*get_cpu_cycle_counter)(int cpu);
+};
+
+#ifdef CONFIG_SCHED_HMP
+extern int sched_set_window(u64 window_start, unsigned int window_size);
+extern unsigned long sched_get_busy(int cpu);
+extern void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_boost(int enable);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
+extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
+extern int sched_update_freq_max_load(const cpumask_t *cpumask);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax);
+extern void sched_set_cpu_cstate(int cpu, int cstate,
+ int wakeup_energy, int wakeup_latency);
+extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+ int wakeup_energy, int wakeup_latency);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern u64 sched_ktime_clock(void);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+
+#else /* CONFIG_SCHED_HMP */
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ return 0;
+}
+
+static inline int sched_set_window(u64 window_start, unsigned int window_size)
+{
+ return -EINVAL;
+}
+static inline unsigned long sched_get_busy(int cpu)
+{
+ return 0;
+}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus) {};
+
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+ return -EINVAL;
+}
+
+static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+ return 0;
+}
+
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax) { }
+
+static inline void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+}
+
+static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
+ int dstate, int wakeup_energy, int wakeup_latency)
+{
+}
+#endif /* CONFIG_SCHED_HMP */
+
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
@@ -2254,6 +2474,14 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
+static inline void set_wake_up_idle(bool enabled)
+{
+ if (enabled)
+ current->flags |= PF_WAKE_UP_IDLE;
+ else
+ current->flags &= ~PF_WAKE_UP_IDLE;
+}
+
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2271,8 +2499,8 @@ extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
-
extern void sched_clock_init(void);
+extern int sched_clock_initialized(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
@@ -2319,7 +2547,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2453,6 +2681,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2461,6 +2690,11 @@ extern void wake_up_new_task(struct task_struct *tsk);
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
+#ifdef CONFIG_SCHED_HMP
+extern void sched_exit(struct task_struct *p);
+#else
+static inline void sched_exit(struct task_struct *p) { }
+#endif
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
@@ -2583,7 +2817,7 @@ static inline void mmdrop(struct mm_struct * mm)
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
@@ -3110,6 +3344,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern struct atomic_notifier_head migration_notifier_head;
+struct migration_notify_data {
+ int src_cpu;
+ int dest_cpu;
+ int load;
+};
+
+extern struct atomic_notifier_head load_alert_notifier_head;
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);