summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/irq.h29
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/sched/sysctl.h6
-rw-r--r--include/linux/time.h1
7 files changed, 53 insertions, 0 deletions
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9302d016b89f..8e9d08dfbd18 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -123,6 +123,9 @@ struct cpufreq_policy {
unsigned int up_transition_delay_us;
unsigned int down_transition_delay_us;
+ /* Boost switch for tasks with p->in_iowait set */
+ bool iowait_boost_enable;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 021b1e9ff6cd..8aed56931361 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -208,6 +208,7 @@ extern struct task_group root_task_group;
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed= NR_CPUS, \
+ .cpus_requested = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.restart_block = { \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8da001eb82aa..0e57f41bde84 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -136,6 +136,9 @@ struct irq_domain;
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
* @affinity: IRQ affinity on SMP
+ * @effective_affinity: The effective IRQ affinity on SMP as some irq
+ * chips do not allow multi CPU destinations.
+ * A subset of @affinity.
* @msi_desc: MSI descriptor
*/
struct irq_common_data {
@@ -146,6 +149,9 @@ struct irq_common_data {
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ cpumask_var_t effective_affinity;
+#endif
};
/**
@@ -690,6 +696,29 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
return d->common->affinity;
}
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+static inline
+struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+{
+ return d->common->effective_affinity;
+}
+static inline void irq_data_update_effective_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+ cpumask_copy(d->common->effective_affinity, m);
+}
+#else
+static inline void irq_data_update_effective_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+}
+static inline
+struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+{
+ return d->common->affinity;
+}
+#endif
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 29c17fae9bbf..1019e8d3c88f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -442,6 +442,8 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long def_flags;
+
+ spinlock_t arg_lock; /* protect the below fields */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 70c1f7f9e4fa..4e212132a274 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1668,6 +1668,15 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_WALT
+ struct ravg ravg;
+ /*
+ * 'init_load_pct' represents the initial task load assigned to children
+ * of this task
+ */
+ u32 init_load_pct;
+ u64 last_sleep_ts;
+#endif
#ifdef CONFIG_SCHED_HMP
struct ravg ravg;
/*
@@ -1700,6 +1709,7 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
+ cpumask_t cpus_requested;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -3723,6 +3733,7 @@ static inline unsigned long rlimit_max(unsigned int limit)
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
+#define SCHED_CPUFREQ_WALT (1U << 4)
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1e1fcb8791a7..c85fe9872d07 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -41,6 +41,12 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_cstate_aware;
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int sysctl_sched_walt_init_task_load_pct;
+extern unsigned int sysctl_sched_walt_cpu_high_irqload;
+#endif
#ifdef CONFIG_SCHED_HMP
diff --git a/include/linux/time.h b/include/linux/time.h
index 62cc50700004..cbb55e004342 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -9,6 +9,7 @@
extern struct timezone sys_tz;
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+#define TIMER_LOCK_TIGHT_LOOP_DELAY_NS 350
static inline int timespec_equal(const struct timespec *a,
const struct timespec *b)