summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/lpm-levels-of.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle/lpm-levels-of.c')
-rw-r--r--drivers/cpuidle/lpm-levels-of.c210
1 files changed, 198 insertions, 12 deletions
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index f4ae70ac9315..b40231dd8dd1 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -38,34 +38,138 @@ static const struct lpm_type_str lpm_types[] = {
{SUSPEND, "suspend_enabled"},
};
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static DEFINE_PER_CPU(uint32_t *, min_residency);
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
-static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr)
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+ struct lpm_level_avail *avail)
{
void *arg = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ arg = (void *) &avail->idle_enabled;
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ arg = (void *) &avail->suspend_enabled;
+
+ return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+ struct kobj_attribute *attr)
+{
struct lpm_level_avail *avail = NULL;
- if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) {
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
- arg = (void *) &avail->idle_enabled;
- } else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) {
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
- arg = (void *) &avail->suspend_enabled;
+
+ return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+ uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
+ uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct power_params *pwr = &cpu->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, i, true);
+
+ if (!mode_avail) {
+ maximum_residency[i] = 0;
+ minimum_residency[i] = 0;
+ continue;
+ }
+
+ maximum_residency[i] = ~0;
+ for (j = i + 1; j < cpu->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, j, true);
+
+ if (mode_avail &&
+ (maximum_residency[i] > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ maximum_residency[i] = pwr->residencies[j];
+ }
+
+ minimum_residency[i] = pwr->time_overhead_us;
+ for (j = i-1; j >= 0; j--) {
+ if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
+ minimum_residency[i] = maximum_residency[j] + 1;
+ break;
+ }
+ }
}
+}
- return arg;
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct power_params *pwr = &cluster->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, i,
+ true);
+
+ if (!mode_avail) {
+ pwr->max_residency = 0;
+ pwr->min_residency = 0;
+ continue;
+ }
+
+ pwr->max_residency = ~0;
+ for (j = i+1; j < cluster->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, j,
+ true);
+ if (mode_avail &&
+ (pwr->max_residency > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ pwr->max_residency = pwr->residencies[j];
+ }
+
+ pwr->min_residency = pwr->time_overhead_us;
+ for (j = i-1; j >= 0; j--) {
+ if (probe_time ||
+ lpm_cluster_mode_allow(cluster, j, true)) {
+ pwr->min_residency =
+ cluster->levels[j].pwr.max_residency + 1;
+ break;
+ }
+ }
+ }
}
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+ return per_cpu(max_residency, cpu);
+}
+
+uint32_t *get_per_cpu_min_residency(int cpu)
+{
+ return per_cpu(min_residency, cpu);
+}
ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
struct kernel_param kp;
- kp.arg = get_avail_val(kobj, attr);
+ kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
ret = param_get_bool(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
@@ -80,15 +184,25 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
{
int ret = 0;
struct kernel_param kp;
+ struct lpm_level_avail *avail;
- kp.arg = get_avail_val(kobj, attr);
+ avail = get_avail_ptr(kobj, attr);
+ if (WARN_ON(!avail))
+ return -EINVAL;
+ kp.arg = get_enabled_ptr(attr, avail);
ret = param_set_bool(buf, &kp);
+ if (avail->cpu_node)
+ set_optimum_cpu_residency(avail->data, avail->idx, false);
+ else
+ set_optimum_cluster_residency(avail->data, false);
+
return ret ? ret : len;
}
static int create_lvl_avail_nodes(const char *name,
- struct kobject *parent, struct lpm_level_avail *avail)
+ struct kobject *parent, struct lpm_level_avail *avail,
+ void *data, int index, bool cpu_node)
{
struct attribute_group *attr_group = NULL;
struct attribute **attr = NULL;
@@ -139,6 +253,9 @@ static int create_lvl_avail_nodes(const char *name,
avail->idle_enabled = true;
avail->suspend_enabled = true;
avail->kobj = kobj;
+ avail->data = data;
+ avail->idx = index;
+ avail->cpu_node = cpu_node;
return ret;
@@ -181,7 +298,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
for (i = 0; i < p->cpu->nlevels; i++) {
ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
- cpu_kobj[cpu_idx], &level_list[i]);
+ cpu_kobj[cpu_idx], &level_list[i],
+ (void *)p->cpu, cpu, true);
if (ret)
goto release_kobj;
}
@@ -215,7 +333,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
for (i = 0; i < p->nlevels; i++) {
ret = create_lvl_avail_nodes(p->levels[i].level_name,
- cluster_kobj, &p->levels[i].available);
+ cluster_kobj, &p->levels[i].available,
+ (void *)p, 0, false);
if (ret)
return ret;
}
@@ -421,6 +540,9 @@ static int parse_power_params(struct device_node *node,
key = "qcom,time-overhead";
ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+
fail:
if (ret)
pr_err("%s(): %s Error reading %s\n", __func__, node->name,
@@ -615,11 +737,31 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
return 0;
}
+static int calculate_residency(struct power_params *base_pwr,
+ struct power_params *next_pwr)
+{
+ int32_t residency = (int32_t)(next_pwr->energy_overhead -
+ base_pwr->energy_overhead) -
+ ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+ - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+ residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
+
+ if (residency < 0) {
+ __WARN_printf("%s: Incorrect power attributes for LPM\n",
+ __func__);
+ return next_pwr->time_overhead_us;
+ }
+
+ return residency < next_pwr->time_overhead_us ?
+ next_pwr->time_overhead_us : residency;
+}
+
static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
{
struct device_node *n;
int ret = -ENOMEM;
- int i;
+ int i, j;
char *key;
c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
@@ -676,6 +818,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
else if (ret)
goto failed;
}
+ for (i = 0; i < c->cpu->nlevels; i++) {
+ for (j = 0; j < c->cpu->nlevels; j++) {
+ if (i >= j) {
+ c->cpu->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+
+ c->cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&c->cpu->levels[i].pwr,
+ &c->cpu->levels[j].pwr);
+
+ pr_err("%s: idx %d %u\n", __func__, j,
+ c->cpu->levels[i].pwr.residencies[j]);
+ }
+ }
+
return 0;
failed:
for (i = 0; i < c->cpu->nlevels; i++) {
@@ -732,6 +890,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
struct device_node *n;
char *key;
int ret = 0;
+ int i, j;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
@@ -789,6 +948,22 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
goto failed_parse_cluster;
c->aff_level = 1;
+
+ for_each_cpu(i, &c->child_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return ERR_PTR(-ENOMEM);
+ per_cpu(min_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(min_residency, i))
+ return ERR_PTR(-ENOMEM);
+ set_optimum_cpu_residency(c->cpu, i, true);
+ }
}
}
@@ -797,6 +972,17 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
else
c->last_level = c->nlevels-1;
+ for (i = 0; i < c->nlevels; i++) {
+ for (j = 0; j < c->nlevels; j++) {
+ if (i >= j) {
+ c->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+ c->levels[i].pwr.residencies[j] = calculate_residency(
+ &c->levels[i].pwr, &c->levels[j].pwr);
+ }
+ }
+ set_optimum_cluster_residency(c, true);
return c;
failed_parse_cluster: