diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 262 |
1 files changed, 250 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0af3977aeda2..5f59b7fc2a13 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -178,6 +178,28 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern u64 nr_running_integral(unsigned int cpu); #endif +extern void sched_update_nr_prod(int cpu, long delta, bool inc); +extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, + unsigned int *max_nr, + unsigned int *big_max_nr); +extern u64 sched_get_cpu_last_busy_time(int cpu); + +#ifdef CONFIG_SMP +extern u32 sched_get_wake_up_idle(struct task_struct *p); +extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle); +#else +static inline u32 sched_get_wake_up_idle(struct task_struct *p) +{ + return 0; +} + +static inline int sched_set_wake_up_idle(struct task_struct *p, + int wake_up_idle) +{ + return 0; +} +#endif /* CONFIG_SMP */ + extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) @@ -319,6 +341,8 @@ extern char ___assert_task_state[1 - 2*!!( /* Task command name length */ #define TASK_COMM_LEN 16 +extern const char *sched_window_reset_reasons[]; + enum task_event { PUT_PREV_TASK = 0, PICK_NEXT_TASK = 1, @@ -328,6 +352,12 @@ enum task_event { IRQ_UPDATE = 5, }; +/* Note: this need to be in sync with migrate_type_names array */ +enum migrate_types { + GROUP_TO_RQ, + RQ_TO_GROUP, +}; + #include <linux/spinlock.h> /* @@ -355,6 +385,41 @@ extern cpumask_var_t cpu_isolated_map; extern int runqueue_is_locked(int cpu); +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_isolate_count(const cpumask_t *mask, bool include_offline); +extern int sched_isolate_cpu(int cpu); +extern int sched_unisolate_cpu(int cpu); +extern int sched_unisolate_cpu_unlocked(int cpu); +#else +static inline int sched_isolate_count(const cpumask_t *mask, + bool include_offline) +{ + cpumask_t count_mask; + + if (include_offline) + cpumask_andnot(&count_mask, mask, cpu_online_mask); + else + return 0; + + return cpumask_weight(&count_mask); +} + +static inline int sched_isolate_cpu(int cpu) +{ + return 0; +} + +static inline int sched_unisolate_cpu(int cpu) +{ + return 0; +} + +static inline int sched_unisolate_cpu_unlocked(int cpu) +{ + return 0; +} +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern void set_cpu_sd_state_idle(void); @@ -391,6 +456,7 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); @@ -400,7 +466,13 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, extern unsigned int softlockup_panic; extern unsigned int hardlockup_panic; void lockup_detector_init(void); +extern void watchdog_enable(unsigned int cpu); +extern void watchdog_disable(unsigned int cpu); +extern bool watchdog_configured(unsigned int cpu); #else +static inline void touch_softlockup_watchdog_sched(void) +{ +} static inline void touch_softlockup_watchdog(void) { } @@ -413,6 +485,20 @@ static inline void touch_all_softlockup_watchdogs(void) static inline void lockup_detector_init(void) { } +static inline void watchdog_enable(unsigned int cpu) +{ +} +static inline void watchdog_disable(unsigned int cpu) +{ +} +static inline bool watchdog_configured(unsigned int cpu) +{ + /* + * Predend the watchdog is always configured. + * We will be waiting for the watchdog to be enabled in core isolation + */ + return true; +} #endif #ifdef CONFIG_DETECT_HUNG_TASK @@ -1361,8 +1447,8 @@ struct sched_statistics { }; #endif -#ifdef CONFIG_SCHED_WALT #define RAVG_HIST_SIZE_MAX 5 +#define NUM_BUSY_BUCKETS 10 /* ravg represents frequency scaled cpu-demand of tasks */ struct ravg { @@ -1382,19 +1468,31 @@ struct ravg { * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency * demand for tasks. * - * 'curr_window' represents task's contribution to cpu busy time - * statistics (rq->curr_runnable_sum) in current window + * 'curr_window_cpu' represents task's contribution to cpu busy time on + * various CPUs in the current window + * + * 'prev_window_cpu' represents task's contribution to cpu busy time on + * various CPUs in the previous window * - * 'prev_window' represents task's contribution to cpu busy time - * statistics (rq->prev_runnable_sum) in previous window + * 'curr_window' represents the sum of all entries in curr_window_cpu + * + * 'prev_window' represents the sum of all entries in prev_window_cpu + * + * 'pred_demand' represents task's current predicted cpu busy time + * + * 'busy_buckets' groups historical busy time into different buckets + * used for prediction */ u64 mark_start; u32 sum, demand; u32 sum_history[RAVG_HIST_SIZE_MAX]; + u32 *curr_window_cpu, *prev_window_cpu; u32 curr_window, prev_window; + u64 curr_burst, avg_burst, avg_sleep_time; u16 active_windows; + u32 pred_demand; + u8 busy_buckets[NUM_BUSY_BUCKETS]; }; -#endif struct sched_entity { struct load_weight load; /* for load-balancing */ @@ -1433,6 +1531,8 @@ struct sched_rt_entity { unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; /* Accesses for these must be guarded by rq->lock of the task's rq */ bool schedtune_enqueued; @@ -1568,16 +1668,21 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; -#ifdef CONFIG_SCHED_WALT +#ifdef CONFIG_SCHED_HMP struct ravg ravg; /* * 'init_load_pct' represents the initial task load assigned to children * of this task */ u32 init_load_pct; + u64 last_wake_ts; + u64 last_switch_out_ts; + u64 last_cpu_selected_ts; + struct related_thread_group *grp; + struct list_head grp_list; + u64 cpu_cycles; u64 last_sleep_ts; #endif - #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; #endif @@ -1819,6 +1924,9 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; gfp_t lockdep_reclaim_gfp; #endif +#ifdef CONFIG_UBSAN + unsigned int in_ubsan; +#endif /* journalling filesystem info */ void *journal_info; @@ -2131,8 +2239,8 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } - static inline int pid_alive(const struct task_struct *p); +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) @@ -2269,6 +2377,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, /* * Per process flags */ +#define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -2451,6 +2560,7 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +extern bool cpupri_check_rt(void); #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) @@ -2463,8 +2573,103 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return -EINVAL; return 0; } +static inline bool cpupri_check_rt(void) +{ + return false; +} #endif +struct sched_load { + unsigned long prev_load; + unsigned long new_task_load; + unsigned long predicted_load; +}; + +struct cpu_cycle_counter_cb { + u64 (*get_cpu_cycle_counter)(int cpu); +}; + +#define MAX_NUM_CGROUP_COLOC_ID 20 + +#ifdef CONFIG_SCHED_HMP +extern void free_task_load_ptrs(struct task_struct *p); +extern int sched_set_window(u64 window_start, unsigned int window_size); +extern unsigned long sched_get_busy(int cpu); +extern void sched_get_cpus_busy(struct sched_load *busy, + const struct cpumask *query_cpus); +extern void sched_set_io_is_busy(int val); +extern int sched_set_boost(int enable); +extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct); +extern u32 sched_get_init_task_load(struct task_struct *p); +extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost); +extern unsigned int sched_get_static_cpu_pwr_cost(int cpu); +extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost); +extern unsigned int sched_get_static_cluster_pwr_cost(int cpu); +extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle); +extern unsigned int sched_get_cluster_wake_idle(int cpu); +extern int sched_update_freq_max_load(const cpumask_t *cpumask); +extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, + u32 fmin, u32 fmax); +extern void sched_set_cpu_cstate(int cpu, int cstate, + int wakeup_energy, int wakeup_latency); +extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate, + int wakeup_energy, int wakeup_latency); +extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); +extern u64 sched_ktime_clock(void); +extern int sched_set_group_id(struct task_struct *p, unsigned int group_id); +extern unsigned int sched_get_group_id(struct task_struct *p); + +#else /* CONFIG_SCHED_HMP */ +static inline void free_task_load_ptrs(struct task_struct *p) { } + +static inline u64 sched_ktime_clock(void) +{ + return 0; +} + +static inline int +register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb) +{ + return 0; +} + +static inline int sched_set_window(u64 window_start, unsigned int window_size) +{ + return -EINVAL; +} +static inline unsigned long sched_get_busy(int cpu) +{ + return 0; +} +static inline void sched_get_cpus_busy(struct sched_load *busy, + const struct cpumask *query_cpus) {}; + +static inline void sched_set_io_is_busy(int val) {}; + +static inline int sched_set_boost(int enable) +{ + return -EINVAL; +} + +static inline int sched_update_freq_max_load(const cpumask_t *cpumask) +{ + return 0; +} + +static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus, + u32 fmin, u32 fmax) { } + +static inline void +sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency) +{ +} + +static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, + int dstate, int wakeup_energy, int wakeup_latency) +{ +} +#endif /* CONFIG_SCHED_HMP */ + #ifdef CONFIG_NO_HZ_COMMON void calc_load_enter_idle(void); void calc_load_exit_idle(void); @@ -2473,6 +2678,14 @@ static inline void calc_load_enter_idle(void) { } static inline void calc_load_exit_idle(void) { } #endif /* CONFIG_NO_HZ_COMMON */ +static inline void set_wake_up_idle(bool enabled) +{ + if (enabled) + current->flags |= PF_WAKE_UP_IDLE; + else + current->flags &= ~PF_WAKE_UP_IDLE; +} + /* * Do not use outside of architecture code which knows its limitations. * @@ -2490,8 +2703,8 @@ extern u64 local_clock(void); extern u64 running_clock(void); extern u64 sched_clock_cpu(int cpu); - extern void sched_clock_init(void); +extern int sched_clock_initialized(void); #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) @@ -2538,7 +2751,7 @@ extern unsigned long long task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) extern void sched_exec(void); #else #define sched_exec() {} @@ -2674,6 +2887,7 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_process_no_notif(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); @@ -2682,6 +2896,11 @@ extern void wake_up_new_task(struct task_struct *tsk); #endif extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); +#ifdef CONFIG_SCHED_HMP +extern void sched_exit(struct task_struct *p); +#else +static inline void sched_exit(struct task_struct *p) { } +#endif extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); @@ -2809,7 +3028,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm) } /* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); +extern int mmput(struct mm_struct *); /* same as above but performs the slow path from the async kontext. Can * be called from the atomic context as well */ @@ -3234,6 +3453,15 @@ static inline void cond_resched_rcu(void) #endif } +static inline unsigned long get_preempt_disable_ip(struct task_struct *p) +{ +#ifdef CONFIG_DEBUG_PREEMPT + return p->preempt_disable_ip; +#else + return 0; +#endif +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3389,6 +3617,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +extern struct atomic_notifier_head migration_notifier_head; +struct migration_notify_data { + int src_cpu; + int dest_cpu; + int load; +}; + +extern struct atomic_notifier_head load_alert_notifier_head; + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); @@ -3482,6 +3719,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_RT (1U << 0) #define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_IOWAIT (1U << 2) +#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #ifdef CONFIG_CPU_FREQ struct update_util_data { |