From 9209b5556f6acc6b2c0c29135db247f90a3eb78e Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Tue, 4 Dec 2018 12:04:59 +0530 Subject: power: qos: Use effective affinity mask PM_QOS_REQ_AFFINE_IRQ request is supposed to apply the QoS vote for the CPU(s) on which the attached interrupt arrives. Currently the QoS vote is applied to all the CPUs present in the IRQ affinity mask i.e desc->irq_data.common->affinity. However some chips configure only a single CPU from this affinity mask to receive the IRQ. This information is present in effective affinity mask of an IRQ. Start using it so that a QoS vote is not applied to other CPUs on which the IRQ never comes but present in the affinity mask. Change-Id: If26aa23bebe4a7d07ffedb5ff833ccdb4f4fb6ea Signed-off-by: Pavankumar Kondeti --- kernel/power/qos.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) (limited to 'kernel/power/qos.c') diff --git a/kernel/power/qos.c b/kernel/power/qos.c index e6eceb0aa496..49dc710d4a3a 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -545,19 +545,29 @@ static void pm_qos_irq_release(struct kref *ref) } static void pm_qos_irq_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) + const cpumask_t *unused_mask) { unsigned long flags; struct pm_qos_request *req = container_of(notify, struct pm_qos_request, irq_notify); struct pm_qos_constraints *c = pm_qos_array[req->pm_qos_class]->constraints; + struct irq_desc *desc = irq_to_desc(req->irq); + struct cpumask *new_affinity = + irq_data_get_effective_affinity_mask(&desc->irq_data); + bool affinity_changed = false; spin_lock_irqsave(&pm_qos_lock, flags); - cpumask_copy(&req->cpus_affine, mask); + if (!cpumask_equal(&req->cpus_affine, new_affinity)) { + cpumask_copy(&req->cpus_affine, new_affinity); + affinity_changed = true; + } + spin_unlock_irqrestore(&pm_qos_lock, flags); - pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio); + if (affinity_changed) + pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, + req->node.prio); } #endif @@ -601,9 +611,17 @@ void pm_qos_add_request(struct pm_qos_request *req, if (!desc) return; - mask = desc->irq_data.common->affinity; - /* Get the current affinity */ + /* + * If the IRQ is not started, the effective affinity + * won't be set. So fallback to the default affinity. + */ + mask = irq_data_get_effective_affinity_mask( + &desc->irq_data); + if (cpumask_empty(mask)) + mask = irq_data_get_affinity_mask( + &desc->irq_data); + cpumask_copy(&req->cpus_affine, mask); req->irq_notify.irq = req->irq; req->irq_notify.notify = pm_qos_irq_notify; -- cgit v1.2.3 From 629bfed360f99e297f7d8042955710aadbde2123 Mon Sep 17 00:00:00 2001 From: Raghavendra Kakarla Date: Fri, 1 Jun 2018 19:06:53 +0530 Subject: kernel: power: qos: remove check for core isolation while cluster LPMs Since all cores in a cluster are in isolation, PMQoS latency constraint set by clock driver to switch PLL is ignored. So, Cluster enter to L2PC and SPM is trying to disable the PLL and at same time clock driver trying to switch the PLL from other cluster which leads to the synchronization issues. Fix is although all cores are in isolation, honor PMQoS request for cluster LPMs. Change-Id: I4296e16ef4e9046d1fbe3b7378e9f61a2f11c74d Signed-off-by: Raghavendra Kakarla --- kernel/power/qos.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel/power/qos.c') diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 49dc710d4a3a..3e3ae5ed8100 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -477,8 +477,6 @@ int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) val = c->default_value; for_each_cpu(cpu, mask) { - if (cpu_isolated(cpu)) - continue; switch (c->type) { case PM_QOS_MIN: -- cgit v1.2.3