summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/lpm-levels.c
diff options
context:
space:
mode:
authorDavide Garberi <dade.garberi@gmail.com>2023-08-06 15:31:30 +0200
committerDavide Garberi <dade.garberi@gmail.com>2023-08-06 15:31:30 +0200
commitcc57cb4ee3b7918b74d30604735d353b9a5fa23b (patch)
tree0be483b86472eaf1c74f747ecbaf6300f3998a1a /drivers/cpuidle/lpm-levels.c
parent44be99a74546fb018cbf2049602a5fd2889a0089 (diff)
parent7d11b1a7a11c598a07687f853ded9eca97d89043 (diff)
Merge lineage-20 of git@github.com:LineageOS/android_kernel_qcom_msm8998.git into lineage-20
7d11b1a7a11c Revert "sched: cpufreq: Use sched_clock instead of rq_clock when updating schedutil" daaa5da96a74 sched: Take irq_sparse lock during the isolation 217ab2d0ef91 rcu: Speed up calling of RCU tasks callbacks 997b726bc092 kernel: power: Workaround for sensor ipc message causing high power consume b933e4d37bc0 sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices 82d3f23d6dc5 sched/fair: Fix bandwidth timer clock drift condition 629bfed360f9 kernel: power: qos: remove check for core isolation while cluster LPMs 891a63210e1d sched/fair: Fix issue where frequency update not skipped b775cb29f663 ANDROID: Move schedtune en/dequeue before schedutil update triggers ebdb82f7b34a sched/fair: Skip frequency updates if CPU about to idle ff383d94478a FROMLIST: sched: Make iowait_boost optional in schedutil 9539942cb065 FROMLIST: cpufreq: Make iowait boost a policy option b65c91c9aa14 ARM: dts: msm: add HW CPU's busy-cost-data for additional freqs 72f13941085b ARM: dts: msm: fix CPU's idle-cost-data ab88411382f7 ARM: dts: msm: fix EM to be monotonically increasing 83dcbae14782 ARM: dts: msm: Fix EAS idle-cost-data property length 33d3b17bfdfb ARM: dts: msm: Add msm8998 energy model c0fa7577022c sched/walt: Re-add code to allow WALT to function d5cd35f38616 FROMGIT: binder: use EINTR for interrupted wait for work db74739c86de sched: Don't fail isolation request for an already isolated CPU aee7a16e347b sched: WALT: increase WALT minimum window size to 20ms 4dbe44554792 sched: cpufreq: Use per_cpu_ptr instead of this_cpu_ptr when reporting load ef3fb04c7df4 sched: cpufreq: Use sched_clock instead of rq_clock when updating schedutil c7128748614a sched/cpupri: Exclude isolated CPUs from the lowest_mask 6adb092856e8 sched: cpufreq: Limit governor updates to WALT changes alone 0fa652ee00f5 sched: walt: Correct WALT window size initialization 41cbb7bc59fb sched: walt: fix window misalignment when HZ=300 43cbf9d6153d sched/tune: Increase the cgroup limit to 6 c71b8fffe6b3 drivers: cpuidle: lpm-levels: Fix KW issues with idle state idx < 0 938e42ca699f drivers: cpuidle: lpm-levels: Correctly check for list empty 8d8a48aecde5 sched/fair: Fix load_balance() affinity redo path eccc8acbe705 sched/fair: Avoid unnecessary active load balance 0ffdb886996b BACKPORT: sched/core: Fix rules for running on online && !active CPUs c9999f04236e sched/core: Allow kthreads to fall back to online && !active cpus b9b6bc6ea3c0 sched: Allow migrating kthreads into online but inactive CPUs a9314f9d8ad4 sched/fair: Allow load bigger task load balance when nr_running is 2 c0b317c27d44 pinctrl: qcom: Clear status bit on irq_unmask 45df1516d04a UPSTREAM: mm: fix misplaced unlock_page in do_wp_page() 899def5edcd4 UPSTREAM: mm/ksm: Remove reuse_ksm_page() 46c6fbdd185a BACKPORT: mm: do_wp_page() simplification 90dccbae4c04 UPSTREAM: mm: reuse only-pte-mapped KSM page in do_wp_page() ebf270d24640 sched/fair: vruntime should normalize when switching from fair cbe0b37059c9 mm: introduce arg_lock to protect arg_start|end and env_start|end in mm_struct 12d40f1995b4 msm: mdss: Fix indentation 620df03a7229 msm: mdss: Treat polling_en as the bool that it is 12af218146a6 msm: mdss: add idle state node 13e661759656 cpuset: Restore tasks affinity while moving across cpusets 602bf4096dab genirq: Honour IRQ's affinity hint during migration 9209b5556f6a power: qos: Use effective affinity mask f31078b5825f genirq: Introduce effective affinity mask 58c453484f7e sched/cputime: Mitigate performance regression in times()/clock_gettime() 400383059868 kernel: time: Add delay after cpu_relax() in tight loops 1daa7ea39076 pinctrl: qcom: Update irq handle for GPIO pins 07f7c9961c7c power: smb-lib: Fix mutex acquisition deadlock on PD hard reset 094b738f46c8 power: qpnp-smb2: Implement battery charging_enabled node d6038d6da57f ASoC: msm-pcm-q6-v2: Add dsp buf check 0d7a6c301af8 qcacld-3.0: Fix OOB in wma_scan_roam.c Change-Id: Ia2e189e37daad6e99bdb359d1204d9133a7916f4
Diffstat (limited to 'drivers/cpuidle/lpm-levels.c')
-rw-r--r--drivers/cpuidle/lpm-levels.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 1eaef20e5ed5..dca59eadc6c2 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -187,7 +187,7 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
uint32_t latency = 0;
int i;
- if (!cluster->list.next) {
+ if (list_empty(&cluster->list)) {
for (i = 0; i < cluster->nlevels; i++) {
level = &cluster->levels[i];
pwr_params = &level->pwr;
@@ -691,7 +691,7 @@ static void update_history(struct cpuidle_device *dev, int idx);
static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu)
{
- int best_level = -1;
+ int best_level = 0;
uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
dev->cpu);
s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
@@ -705,8 +705,6 @@ static int cpu_power_select(struct cpuidle_device *dev,
uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
- if (!cpu)
- return -EINVAL;
if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
return 0;
@@ -1536,17 +1534,11 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
- int idx;
if (!cluster)
return 0;
- idx = cpu_power_select(dev, cluster->cpu);
-
- if (idx < 0)
- return -EPERM;
-
- return idx;
+ return cpu_power_select(dev, cluster->cpu);
}
static void update_history(struct cpuidle_device *dev, int idx)
@@ -1591,9 +1583,6 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
int64_t start_time = ktime_to_ns(ktime_get()), end_time;
struct power_params *pwr_params;
- if (idx < 0)
- return -EINVAL;
-
pwr_params = &cluster->cpu->levels[idx].pwr;
sched_set_cpu_cstate(smp_processor_id(), idx + 1,
pwr_params->energy_overhead, pwr_params->latency_us);