summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-11-30 12:00:47 -0700
committerLinux Build Service Account <lnxbuild@localhost>2016-11-30 12:00:48 -0700
commitfe5ce2a6bc4fd8b58f74731ca2d13452ee776d43 (patch)
tree290a61fedf56009db6bb0dfbe8e4f86adff697b5 /kernel
parent277e2eecbe3cd254fbf92c16d7fd988e1a9f9f50 (diff)
parentbde539edca7035e2f1f19c0d93ea02356b38ed48 (diff)
Promotion of kernel.lnx.4.4-161129.2.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1091566 I0cd6ca6679c19bbdf4ad980a22d9976396028316 qpnp-fg-gen3: Add support to configure auto recharge vol 1085699 I545771f9a84ecc9c2f641c17e057673a32b1f305 usb: gadget: Notify QTI upon cable connect for DPL 1094456 Ifefcbe4741ddd046755ecc24c3f2d619566c2823 clk: fix critical clock locking 1094461 I6f0f8f742d6e1b3ff735dcbeabd54ef101329cdf sched: Disable interrupts while holding related_thread_g 1094434 I69ae0d8589a0878b9758619893848afc272179c5 Crash due to mutex genl_lock called from RCU context 1094456 I2bf824bd2446ca87baabd31c166119d6c5c90643 clk: Provide OF helper to mark clocks as CRITICAL Change-Id: If9a65e4592e6051e11e3501ecefc79a792097a3f CRs-Fixed: 1085699, 1094434, 1094461, 1091566, 1094456
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/hmp.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index a9ccb63c8e23..5ff7a11d043f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1771,20 +1771,20 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
return 0;
} else {
- read_lock(&related_thread_group_lock);
+ read_lock_irqsave(&related_thread_group_lock, flags);
/*
* Protect from concurrent update of rq->prev_runnable_sum and
* group cpu load
*/
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock(&rq->lock);
if (check_groups)
_group_load_in_cpu(cpu_of(rq), &group_load, NULL);
new_load = rq->prev_runnable_sum + group_load;
new_load = freq_policy_load(rq, new_load);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- read_unlock(&related_thread_group_lock);
+ raw_spin_unlock(&rq->lock);
+ read_unlock_irqrestore(&related_thread_group_lock, flags);
cur_freq = load_to_freq(rq, rq->old_busy_time);
freq_required = load_to_freq(rq, new_load);
@@ -3206,14 +3206,16 @@ void sched_get_cpus_busy(struct sched_load *busy,
if (unlikely(cpus == 0))
return;
+ local_irq_save(flags);
+
+ read_lock(&related_thread_group_lock);
+
/*
* This function could be called in timer context, and the
* current task may have been executing for a long time. Ensure
* that the window stats are current by doing an update.
*/
- read_lock(&related_thread_group_lock);
- local_irq_save(flags);
for_each_cpu(cpu, query_cpus)
raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3313,10 +3315,11 @@ skip_early:
for_each_cpu(cpu, query_cpus)
raw_spin_unlock(&(cpu_rq(cpu))->lock);
- local_irq_restore(flags);
read_unlock(&related_thread_group_lock);
+ local_irq_restore(flags);
+
i = 0;
for_each_cpu(cpu, query_cpus) {
rq = cpu_rq(cpu);