summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/hmp.c7
-rw-r--r--kernel/sched/sched.h2
3 files changed, 17 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1d91c012b5d8..13a64488c3d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2325,11 +2325,11 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
- free_task_load_ptrs(p);
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
task_rq_unlock(rq, p, &flags);
+ free_task_load_ptrs(p);
}
#endif /* CONFIG_SCHED_HMP */
@@ -4914,6 +4914,15 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+ /*
+ * The userspace tasks are forbidden to run on
+ * isolated CPUs. So exclude isolated CPUs from
+ * the getaffinity.
+ */
+ if (!(p->flags & PF_KTHREAD))
+ cpumask_andnot(mask, mask, cpu_isolated_mask);
+
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index ae6876e62c0f..ea066ab8376b 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1526,6 +1526,10 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+/*
+ * kfree() may wakeup kswapd. So this function should NOT be called
+ * with any CPU's rq->lock acquired.
+ */
void free_task_load_ptrs(struct task_struct *p)
{
kfree(p->ravg.curr_window_cpu);
@@ -2608,7 +2612,8 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
p->cpu_cycles = cur_cycles;
- trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles,
+ rq->cc.time, p);
}
static int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7ff14b7f598e..7b31973c6db3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1247,7 +1247,7 @@ static inline int cpu_min_power_cost(int cpu)
return cpu_rq(cpu)->cluster->min_power_cost;
}
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
{
return div64_u64(cycles, period);
}