diff options
| author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2018-12-04 12:04:59 +0530 |
|---|---|---|
| committer | Georg Veichtlbauer <georg@vware.at> | 2023-07-16 12:47:42 +0200 |
| commit | 9209b5556f6acc6b2c0c29135db247f90a3eb78e (patch) | |
| tree | 1f08afe86d3e79c3ecf97679311242ffeae88ffc /kernel/power/qos.c | |
| parent | f31078b5825f71499fa95b85babf6ac8c776c37d (diff) | |
power: qos: Use effective affinity mask
PM_QOS_REQ_AFFINE_IRQ request is supposed to apply the QoS vote
for the CPU(s) on which the attached interrupt arrives. Currently
the QoS vote is applied to all the CPUs present in the IRQ
affinity mask i.e desc->irq_data.common->affinity. However some
chips configure only a single CPU from this affinity mask to
receive the IRQ. This information is present in effective
affinity mask of an IRQ. Start using it so that a QoS vote is
not applied to other CPUs on which the IRQ never comes but
present in the affinity mask.
Change-Id: If26aa23bebe4a7d07ffedb5ff833ccdb4f4fb6ea
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/power/qos.c')
| -rw-r--r-- | kernel/power/qos.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index e6eceb0aa496..49dc710d4a3a 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -545,19 +545,29 @@ static void pm_qos_irq_release(struct kref *ref) } static void pm_qos_irq_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) + const cpumask_t *unused_mask) { unsigned long flags; struct pm_qos_request *req = container_of(notify, struct pm_qos_request, irq_notify); struct pm_qos_constraints *c = pm_qos_array[req->pm_qos_class]->constraints; + struct irq_desc *desc = irq_to_desc(req->irq); + struct cpumask *new_affinity = + irq_data_get_effective_affinity_mask(&desc->irq_data); + bool affinity_changed = false; spin_lock_irqsave(&pm_qos_lock, flags); - cpumask_copy(&req->cpus_affine, mask); + if (!cpumask_equal(&req->cpus_affine, new_affinity)) { + cpumask_copy(&req->cpus_affine, new_affinity); + affinity_changed = true; + } + spin_unlock_irqrestore(&pm_qos_lock, flags); - pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio); + if (affinity_changed) + pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, + req->node.prio); } #endif @@ -601,9 +611,17 @@ void pm_qos_add_request(struct pm_qos_request *req, if (!desc) return; - mask = desc->irq_data.common->affinity; - /* Get the current affinity */ + /* + * If the IRQ is not started, the effective affinity + * won't be set. So fallback to the default affinity. + */ + mask = irq_data_get_effective_affinity_mask( + &desc->irq_data); + if (cpumask_empty(mask)) + mask = irq_data_get_affinity_mask( + &desc->irq_data); + cpumask_copy(&req->cpus_affine, mask); req->irq_notify.irq = req->irq; req->irq_notify.notify = pm_qos_irq_notify; |
