summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-01 18:13:36 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-07 14:46:21 -0800
commit576259be4abfbd8f7bee476b48c3ce2eee05cfb4 (patch)
tree8946d71e37fd74204274b423ef18514fe41e1921 /kernel/sched/core.c
parentecd8f7800fc4aa3b62cfb01773d8e99d5bee228d (diff)
sched/hmp: Use GFP_KERNEL for top task memory allocations
Task load structure allocations can consume a lot of memory as the number of tasks begin to increase. Also they might exhaust the atomic memory pool pretty quickly if a workload starts spawning lots of threads in a short amount of time thus increasing the possibility of failed allocations. Move the call to init_new_task_load() outside atomic context and start using GFP_KERNEL for allocations. There is no need for this allocation to be in atomic context. Change-Id: I357772e10bf8958804d9cd0c78eda27139054b21 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f3b1688b3be7..6e39de2836ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2269,17 +2269,7 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-
- kfree(p->ravg.curr_window_cpu);
- kfree(p->ravg.prev_window_cpu);
-
- /*
- * update_task_ravg() can be called for exiting tasks. While the
- * function itself ensures correct behavior, the corresponding
- * trace event requires that these pointers be NULL.
- */
- p->ravg.curr_window_cpu = NULL;
- p->ravg.prev_window_cpu = NULL;
+ free_task_load_ptrs(p);
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
@@ -2384,10 +2374,12 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
+ int cpu;
- __sched_fork(clone_flags, p);
init_new_task_load(p, false);
+ cpu = get_cpu();
+
+ __sched_fork(clone_flags, p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external