summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h263
1 files changed, 250 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b78769af4591..8f6894c6e83c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,6 +178,28 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern u64 nr_running_integral(unsigned int cpu);
#endif
+extern void sched_update_nr_prod(int cpu, long delta, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr,
+ unsigned int *big_max_nr);
+extern u64 sched_get_cpu_last_busy_time(int cpu);
+
+#ifdef CONFIG_SMP
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
+#else
+static inline u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int sched_set_wake_up_idle(struct task_struct *p,
+ int wake_up_idle)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
@@ -319,6 +341,8 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16
+extern const char *sched_window_reset_reasons[];
+
enum task_event {
PUT_PREV_TASK = 0,
PICK_NEXT_TASK = 1,
@@ -328,6 +352,12 @@ enum task_event {
IRQ_UPDATE = 5,
};
+/* Note: this need to be in sync with migrate_type_names array */
+enum migrate_types {
+ GROUP_TO_RQ,
+ RQ_TO_GROUP,
+};
+
#include <linux/spinlock.h>
/*
@@ -348,13 +378,48 @@ extern int lockdep_tasklist_lock_is_held(void);
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
+extern int sched_isolate_cpu(int cpu);
+extern int sched_unisolate_cpu(int cpu);
+extern int sched_unisolate_cpu_unlocked(int cpu);
+#else
+static inline int sched_isolate_count(const cpumask_t *mask,
+ bool include_offline)
+{
+ cpumask_t count_mask;
+
+ if (include_offline)
+ cpumask_andnot(&count_mask, mask, cpu_online_mask);
+ else
+ return 0;
+
+ return cpumask_weight(&count_mask);
+}
+
+static inline int sched_isolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu_unlocked(int cpu)
+{
+ return 0;
+}
+#endif
+
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -391,6 +456,7 @@ extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR
+extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
@@ -400,7 +466,13 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
+extern void watchdog_enable(unsigned int cpu);
+extern void watchdog_disable(unsigned int cpu);
+extern bool watchdog_configured(unsigned int cpu);
#else
+static inline void touch_softlockup_watchdog_sched(void)
+{
+}
static inline void touch_softlockup_watchdog(void)
{
}
@@ -413,6 +485,20 @@ static inline void touch_all_softlockup_watchdogs(void)
static inline void lockup_detector_init(void)
{
}
+static inline void watchdog_enable(unsigned int cpu)
+{
+}
+static inline void watchdog_disable(unsigned int cpu)
+{
+}
+static inline bool watchdog_configured(unsigned int cpu)
+{
+ /*
+ * Predend the watchdog is always configured.
+ * We will be waiting for the watchdog to be enabled in core isolation
+ */
+ return true;
+}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -1361,8 +1447,8 @@ struct sched_statistics {
};
#endif
-#ifdef CONFIG_SCHED_WALT
#define RAVG_HIST_SIZE_MAX 5
+#define NUM_BUSY_BUCKETS 10
/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
@@ -1382,19 +1468,31 @@ struct ravg {
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
- * 'curr_window' represents task's contribution to cpu busy time
- * statistics (rq->curr_runnable_sum) in current window
+ * 'curr_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the current window
+ *
+ * 'prev_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the previous window
*
- * 'prev_window' represents task's contribution to cpu busy time
- * statistics (rq->prev_runnable_sum) in previous window
+ * 'curr_window' represents the sum of all entries in curr_window_cpu
+ *
+ * 'prev_window' represents the sum of all entries in prev_window_cpu
+ *
+ * 'pred_demand' represents task's current predicted cpu busy time
+ *
+ * 'busy_buckets' groups historical busy time into different buckets
+ * used for prediction
*/
u64 mark_start;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
+ u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
+ u64 curr_burst, avg_burst, avg_sleep_time;
u16 active_windows;
+ u32 pred_demand;
+ u8 busy_buckets[NUM_BUSY_BUCKETS];
};
-#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
@@ -1433,6 +1531,8 @@ struct sched_rt_entity {
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
+ unsigned short on_rq;
+ unsigned short on_list;
/* Accesses for these must be guarded by rq->lock of the task's rq */
bool schedtune_enqueued;
@@ -1568,16 +1668,21 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
-#ifdef CONFIG_SCHED_WALT
+#ifdef CONFIG_SCHED_HMP
struct ravg ravg;
/*
* 'init_load_pct' represents the initial task load assigned to children
* of this task
*/
u32 init_load_pct;
+ u64 last_wake_ts;
+ u64 last_switch_out_ts;
+ u64 last_cpu_selected_ts;
+ struct related_thread_group *grp;
+ struct list_head grp_list;
+ u64 cpu_cycles;
u64 last_sleep_ts;
#endif
-
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -1819,6 +1924,9 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_UBSAN
+ unsigned int in_ubsan;
+#endif
/* journalling filesystem info */
void *journal_info;
@@ -2131,8 +2239,8 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
return tsk->tgid;
}
-
static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
@@ -2269,6 +2377,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
+#define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2442,6 +2551,7 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2454,8 +2564,103 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
+static inline bool cpupri_check_rt(void)
+{
+ return false;
+}
#endif
+struct sched_load {
+ unsigned long prev_load;
+ unsigned long new_task_load;
+ unsigned long predicted_load;
+};
+
+struct cpu_cycle_counter_cb {
+ u64 (*get_cpu_cycle_counter)(int cpu);
+};
+
+#define MAX_NUM_CGROUP_COLOC_ID 20
+
+#ifdef CONFIG_SCHED_HMP
+extern void free_task_load_ptrs(struct task_struct *p);
+extern int sched_set_window(u64 window_start, unsigned int window_size);
+extern unsigned long sched_get_busy(int cpu);
+extern void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_boost(int enable);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
+extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
+extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
+extern unsigned int sched_get_cluster_wake_idle(int cpu);
+extern int sched_update_freq_max_load(const cpumask_t *cpumask);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax);
+extern void sched_set_cpu_cstate(int cpu, int cstate,
+ int wakeup_energy, int wakeup_latency);
+extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+ int wakeup_energy, int wakeup_latency);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern u64 sched_ktime_clock(void);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+
+#else /* CONFIG_SCHED_HMP */
+static inline void free_task_load_ptrs(struct task_struct *p) { }
+
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ return 0;
+}
+
+static inline int sched_set_window(u64 window_start, unsigned int window_size)
+{
+ return -EINVAL;
+}
+static inline unsigned long sched_get_busy(int cpu)
+{
+ return 0;
+}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus) {};
+
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+ return -EINVAL;
+}
+
+static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+ return 0;
+}
+
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax) { }
+
+static inline void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+}
+
+static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
+ int dstate, int wakeup_energy, int wakeup_latency)
+{
+}
+#endif /* CONFIG_SCHED_HMP */
+
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
@@ -2464,6 +2669,14 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
+static inline void set_wake_up_idle(bool enabled)
+{
+ if (enabled)
+ current->flags |= PF_WAKE_UP_IDLE;
+ else
+ current->flags &= ~PF_WAKE_UP_IDLE;
+}
+
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2481,8 +2694,8 @@ extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
-
extern void sched_clock_init(void);
+extern int sched_clock_initialized(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
@@ -2529,7 +2742,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2665,6 +2878,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2673,6 +2887,11 @@ extern void wake_up_new_task(struct task_struct *tsk);
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
+#ifdef CONFIG_SCHED_HMP
+extern void sched_exit(struct task_struct *p);
+#else
+static inline void sched_exit(struct task_struct *p) { }
+#endif
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
@@ -2795,7 +3014,7 @@ static inline void mmdrop(struct mm_struct * mm)
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *);
/* same as above but performs the slow path from the async kontext. Can
* be called from the atomic context as well
*/
@@ -3215,6 +3434,15 @@ static inline void cond_resched_rcu(void)
#endif
}
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ return p->preempt_disable_ip;
+#else
+ return 0;
+#endif
+}
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@ -3370,6 +3598,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern struct atomic_notifier_head migration_notifier_head;
+struct migration_notify_data {
+ int src_cpu;
+ int dest_cpu;
+ int load;
+};
+
+extern struct atomic_notifier_head load_alert_notifier_head;
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);