summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-01 18:13:36 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-07 14:46:21 -0800
commit576259be4abfbd8f7bee476b48c3ce2eee05cfb4 (patch)
tree8946d71e37fd74204274b423ef18514fe41e1921 /kernel/sched/sched.h
parentecd8f7800fc4aa3b62cfb01773d8e99d5bee228d (diff)
sched/hmp: Use GFP_KERNEL for top task memory allocations
Task load structure allocations can consume a lot of memory as the number of tasks begin to increase. Also they might exhaust the atomic memory pool pretty quickly if a workload starts spawning lots of threads in a short amount of time thus increasing the possibility of failed allocations. Move the call to init_new_task_load() outside atomic context and start using GFP_KERNEL for allocations. There is no need for this allocation to be in atomic context. Change-Id: I357772e10bf8958804d9cd0c78eda27139054b21 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4289bf6cd642..de29c926379b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1079,6 +1079,7 @@ extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
+extern void free_task_load_ptrs(struct task_struct *p);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
extern int got_boost_kick(void);
@@ -1527,6 +1528,8 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
return NULL;
}
+static inline void free_task_load_ptrs(struct task_struct *p) { }
+
static inline void init_new_task_load(struct task_struct *p, bool idle_task)
{
}