summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/core.c13
-rw-r--r--drivers/base/cpu.c175
-rw-r--r--drivers/base/dd.c41
-rw-r--r--drivers/base/dma-mapping.c17
-rw-r--r--drivers/base/dma-removed.c452
-rw-r--r--drivers/base/firmware_class.c483
-rw-r--r--drivers/base/platform.c20
-rw-r--r--drivers/base/power/main.c12
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c1229
-rw-r--r--drivers/base/power/opp/cpu.c25
-rw-r--r--drivers/base/power/opp/debugfs.c218
-rw-r--r--drivers/base/power/opp/opp.h107
-rw-r--r--drivers/base/power/qos.c15
-rw-r--r--drivers/base/regmap/Kconfig14
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h7
-rw-r--r--drivers/base/regmap/regcache.c54
-rw-r--r--drivers/base/regmap/regmap-debugfs.c75
-rw-r--r--drivers/base/regmap/regmap-spmi.c2
-rw-r--r--drivers/base/regmap/regmap-swr.c216
-rw-r--r--drivers/base/regmap/regmap.c2
23 files changed, 2770 insertions, 411 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 6b2a84e7f2be..60362d8ec7f2 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o dma-removed.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index afe045792796..5a56a8e9f006 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -72,6 +72,11 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
+void lock_device_hotplug_assert(void)
+{
+ lockdep_assert_held(&device_hotplug_lock);
+}
+
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
@@ -1111,7 +1116,13 @@ int device_add(struct device *dev)
error = dpm_sysfs_add(dev);
if (error)
goto DPMError;
- device_pm_add(dev);
+ if ((dev->pm_domain) || (dev->type && dev->type->pm)
+ || (dev->class && (dev->class->pm || dev->class->resume))
+ || (dev->bus && (dev->bus->pm || dev->bus->resume)) ||
+ (dev->driver && dev->driver->pm)) {
+ device_pm_add(dev);
+ }
+
if (MAJOR(dev->devt)) {
error = device_create_file(dev, &dev_attr_dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 91bbb1959d8d..51a08995442d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -180,10 +180,177 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+
+static ssize_t isolate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int isolated = cpu_isolated(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", isolated);
+
+ return rc;
+}
+
+static DEVICE_ATTR_RO(isolate);
+
+static struct attribute *cpu_isolated_attrs[] = {
+ &dev_attr_isolate.attr,
+ NULL
+};
+
+static struct attribute_group cpu_isolated_attr_group = {
+ .attrs = cpu_isolated_attrs,
+};
+
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+
+static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+ if (err)
+ return err;
+
+ err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
+
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+ if (err)
+ return err;
+
+ err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
+
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static ssize_t show_sched_cluser_wake_idle(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int wake_up_idle;
+
+ wake_up_idle = sched_get_cluster_wake_idle(cpuid);
+
+ rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int wake_up_idle;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
+ if (err)
+ return err;
+
+ err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);
+
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
+ show_sched_static_cpu_pwr_cost,
+ store_sched_static_cpu_pwr_cost);
+static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
+ show_sched_static_cluster_pwr_cost,
+ store_sched_static_cluster_pwr_cost);
+static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
+ show_sched_cluser_wake_idle,
+ store_sched_cluster_wake_idle);
+
+static struct attribute *hmp_sched_cpu_attrs[] = {
+ &dev_attr_sched_static_cpu_pwr_cost.attr,
+ &dev_attr_sched_static_cluster_pwr_cost.attr,
+ &dev_attr_sched_cluster_wake_up_idle.attr,
+ NULL
+};
+
+static struct attribute_group sched_hmp_cpu_attr_group = {
+ .attrs = hmp_sched_cpu_attrs,
+};
+
+#endif /* CONFIG_SCHED_HMP */
+
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ &cpu_isolated_attr_group,
+#endif
NULL
};
@@ -191,6 +358,12 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ &cpu_isolated_attr_group,
+#endif
NULL
};
@@ -220,6 +393,7 @@ static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
_CPU_ATTR(present, &cpu_present_mask),
+ _CPU_ATTR(core_ctl_isolated, &cpu_isolated_mask),
};
/*
@@ -454,6 +628,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[0].attr.attr,
&cpu_attrs[1].attr.attr,
&cpu_attrs[2].attr.attr,
+ &cpu_attrs[3].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
&dev_attr_isolated.attr,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9e425fbf83cb..0dd6379ac215 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -171,26 +171,49 @@ static void driver_deferred_probe_trigger(void)
queue_work(deferred_wq, &deferred_probe_work);
}
+static void enable_trigger_defer_cycle(void)
+{
+ driver_deferred_probe_enable = true;
+ driver_deferred_probe_trigger();
+ /*
+ * Sort as many dependencies as possible before the next initcall
+ * level
+ */
+ flush_workqueue(deferred_wq);
+}
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
* We don't want to get in the way when the bulk of drivers are getting probed.
* Instead, this initcall makes sure that deferred probing is delayed until
- * late_initcall time.
+ * all the registered initcall functions at a particular level are completed.
+ * This function is invoked at every *_initcall_sync level.
*/
static int deferred_probe_initcall(void)
{
- deferred_wq = create_singlethread_workqueue("deferwq");
- if (WARN_ON(!deferred_wq))
- return -ENOMEM;
+ if (!deferred_wq) {
+ deferred_wq = create_singlethread_workqueue("deferwq");
+ if (WARN_ON(!deferred_wq))
+ return -ENOMEM;
+ }
- driver_deferred_probe_enable = true;
- driver_deferred_probe_trigger();
- /* Sort as many dependencies as possible before exiting initcalls */
- flush_workqueue(deferred_wq);
+ enable_trigger_defer_cycle();
+ driver_deferred_probe_enable = false;
+ return 0;
+}
+arch_initcall_sync(deferred_probe_initcall);
+subsys_initcall_sync(deferred_probe_initcall);
+fs_initcall_sync(deferred_probe_initcall);
+device_initcall_sync(deferred_probe_initcall);
+
+static int deferred_probe_enable_fn(void)
+{
+ /* Enable deferred probing for all time */
+ enable_trigger_defer_cycle();
return 0;
}
-late_initcall(deferred_probe_initcall);
+late_initcall(deferred_probe_enable_fn);
static void driver_bound(struct device *dev)
{
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index a00f7b79202b..7bc0d4a24c22 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -309,7 +309,12 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *ptr;
unsigned long pfn;
- pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+ pages = kmalloc(sizeof(struct page *) << get_order(size),
+ GFP_KERNEL | __GFP_NOWARN);
+
+ if (!pages)
+ pages = vmalloc(sizeof(struct page *) << get_order(size));
+
if (!pages)
return NULL;
@@ -318,20 +323,24 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
- kfree(pages);
+ kvfree(pages);
return ptr;
}
/*
+ area->pages = pages;
+
* unmaps a range previously mapped by dma_common_*_remap
*/
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
+ bool no_warn)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || (area->flags & vm_flags) != vm_flags) {
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ WARN(!no_warn, "trying to free invalid coherent area: %p\n",
+ cpu_addr);
return;
}
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
new file mode 100644
index 000000000000..5fa3c6bdeea0
--- /dev/null
+++ b/drivers/base/dma-removed.c
@@ -0,0 +1,452 @@
+/*
+ *
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <asm/dma-contiguous.h>
+#include <asm/tlbflush.h>
+
+struct removed_region {
+ phys_addr_t base;
+ int nr_pages;
+ unsigned long *bitmap;
+ int fixup;
+ struct mutex lock;
+};
+
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
+
+static int dma_init_removed_memory(phys_addr_t phys_addr, size_t size,
+ struct removed_region **mem)
+{
+ struct removed_region *dma_mem = NULL;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ dma_mem = kzalloc(sizeof(struct removed_region), GFP_KERNEL);
+ if (!dma_mem)
+ goto out;
+ dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dma_mem->bitmap)
+ goto free1_out;
+
+ dma_mem->base = phys_addr;
+ dma_mem->nr_pages = pages;
+ mutex_init(&dma_mem->lock);
+
+ *mem = dma_mem;
+
+ return 0;
+
+free1_out:
+ kfree(dma_mem);
+out:
+ return -ENOMEM;
+}
+
+static int dma_assign_removed_region(struct device *dev,
+ struct removed_region *mem)
+{
+ if (dev->removed_mem)
+ return -EBUSY;
+
+ dev->removed_mem = mem;
+ return 0;
+}
+
+static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn)
+{
+ struct resource *res, *conflict;
+ resource_size_t cstart, cend;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ res->name = "System RAM";
+ res->start = __pfn_to_phys(base_pfn);
+ res->end = __pfn_to_phys(end_pfn) - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ conflict = request_resource_conflict(&iomem_resource, res);
+ if (!conflict) {
+ pr_err("Removed memory: no conflict resource found\n");
+ kfree(res);
+ goto done;
+ }
+
+ cstart = conflict->start;
+ cend = conflict->end;
+ if ((cstart == res->start) && (cend == res->end)) {
+ release_resource(conflict);
+ } else if ((res->start >= cstart) && (res->start <= cend)) {
+ if (res->start == cstart) {
+ adjust_resource(conflict, res->end + 1,
+ cend - res->end);
+ } else if (res->end == cend) {
+ adjust_resource(conflict, cstart,
+ res->start - cstart);
+ } else {
+ adjust_resource(conflict, cstart,
+ res->start - cstart);
+ res->start = res->end + 1;
+ res->end = cend;
+ request_resource(&iomem_resource, res);
+ goto done;
+ }
+ } else {
+ pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n",
+ (unsigned long long) conflict->start,
+ (unsigned long long) conflict->end);
+ }
+
+ kfree(res);
+done:
+ return;
+}
+
+#ifdef CONFIG_FLATMEM
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct page *start_pg, *end_pg;
+ unsigned long pg, pgend;
+
+ start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
+ end_pfn = round_down(end_pfn, pageblock_nr_pages);
+ /*
+ * Convert start_pfn/end_pfn to a struct page pointer.
+ */
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
+ end_pg = pfn_to_page(end_pfn - 1) + 1;
+
+ /*
+ * Convert to physical addresses, and round start upwards and end
+ * downwards.
+ */
+ pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
+ pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+
+ /*
+ * If there are free pages between these, free the section of the
+ * memmap array.
+ */
+ if (pg < pgend)
+ free_bootmem_late(pg, pgend - pg);
+}
+#else
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+}
+#endif
+
+static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ pte_clear(&init_mm, addr, pte);
+ return 0;
+}
+
+static void clear_mapping(unsigned long addr, unsigned long size)
+{
+ apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL);
+ /* ensure ptes are updated */
+ mb();
+ flush_tlb_kernel_range(addr, addr + size);
+}
+
+static void removed_region_fixup(struct removed_region *dma_mem, int index)
+{
+ unsigned long fixup_size;
+ unsigned long base_pfn;
+ unsigned long flags;
+
+ if (index > dma_mem->nr_pages)
+ return;
+
+ /* carve-out */
+ flags = memblock_region_resize_late_begin();
+ memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE);
+ memblock_remove(dma_mem->base, index * PAGE_SIZE);
+ memblock_region_resize_late_end(flags);
+
+ /* clear page-mappings */
+ base_pfn = dma_mem->base >> PAGE_SHIFT;
+ if (!PageHighMem(pfn_to_page(base_pfn))) {
+ clear_mapping((unsigned long) phys_to_virt(dma_mem->base),
+ index * PAGE_SIZE);
+ }
+
+ /* free page objects */
+ free_memmap(base_pfn, base_pfn + index);
+
+ /* return remaining area to system */
+ fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE;
+ free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size);
+
+ /*
+ * release freed resource region so as to show up under iomem resource
+ * list
+ */
+ adapt_iomem_resource(base_pfn, base_pfn + index);
+
+ /* limit the fixup region */
+ dma_mem->nr_pages = index;
+}
+
+void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+ attrs);
+ bool skip_zeroing = dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs);
+ int pageno;
+ unsigned long order;
+ void *addr = NULL;
+ struct removed_region *dma_mem = dev->removed_mem;
+ int nbits;
+ unsigned int align;
+
+ if (!gfpflags_allow_blocking(gfp))
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ nbits = size >> PAGE_SHIFT;
+ order = get_order(size);
+
+ if (order > get_order(SZ_1M))
+ order = get_order(SZ_1M);
+
+ align = (1 << order) - 1;
+
+
+ mutex_lock(&dma_mem->lock);
+ pageno = bitmap_find_next_zero_area(dma_mem->bitmap, dma_mem->nr_pages,
+ 0, nbits, align);
+
+ if (pageno < dma_mem->nr_pages) {
+ phys_addr_t base = dma_mem->base + pageno * PAGE_SIZE;
+ *handle = base;
+
+ bitmap_set(dma_mem->bitmap, pageno, nbits);
+
+ if (dma_mem->fixup) {
+ removed_region_fixup(dma_mem, pageno + nbits);
+ dma_mem->fixup = 0;
+ }
+
+ if (no_kernel_mapping && skip_zeroing) {
+ addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+ goto out;
+ }
+
+ addr = ioremap(base, size);
+ if (WARN_ON(!addr)) {
+ bitmap_clear(dma_mem->bitmap, pageno, nbits);
+ } else {
+ if (!skip_zeroing)
+ memset_io(addr, 0, size);
+ if (no_kernel_mapping) {
+ iounmap(addr);
+ addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+ }
+ *handle = base;
+ }
+ }
+
+out:
+ mutex_unlock(&dma_mem->lock);
+ return addr;
+}
+
+
+int removed_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ return -ENXIO;
+}
+
+void removed_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+ attrs);
+ struct removed_region *dma_mem = dev->removed_mem;
+
+ size = PAGE_ALIGN(size);
+ if (!no_kernel_mapping)
+ iounmap(cpu_addr);
+ mutex_lock(&dma_mem->lock);
+ bitmap_clear(dma_mem->bitmap, (handle - dma_mem->base) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT);
+ mutex_unlock(&dma_mem->lock);
+}
+
+static dma_addr_t removed_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return ~(dma_addr_t)0;
+}
+
+static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return;
+}
+
+static int removed_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return 0;
+}
+
+static void removed_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return;
+}
+
+static void removed_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
+ size_t size, struct dma_attrs *attrs)
+{
+ return ioremap(handle, size);
+}
+
+void removed_unremap(struct device *dev, void *remapped_address, size_t size)
+{
+ iounmap(remapped_address);
+}
+
+struct dma_map_ops removed_dma_ops = {
+ .alloc = removed_alloc,
+ .free = removed_free,
+ .mmap = removed_mmap,
+ .map_page = removed_map_page,
+ .unmap_page = removed_unmap_page,
+ .map_sg = removed_map_sg,
+ .unmap_sg = removed_unmap_sg,
+ .sync_single_for_cpu = removed_sync_single_for_cpu,
+ .sync_single_for_device = removed_sync_single_for_device,
+ .sync_sg_for_cpu = removed_sync_sg_for_cpu,
+ .sync_sg_for_device = removed_sync_sg_for_device,
+ .remap = removed_remap,
+ .unremap = removed_unremap,
+};
+EXPORT_SYMBOL(removed_dma_ops);
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ struct removed_region *mem = rmem->priv;
+ if (!mem && dma_init_removed_memory(rmem->base, rmem->size, &mem)) {
+ pr_info("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return -EINVAL;
+ }
+ mem->fixup = rmem->fixup;
+ set_dma_ops(dev, &removed_dma_ops);
+ rmem->priv = mem;
+ dma_assign_removed_region(dev, mem);
+ return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops removed_mem_ops = {
+ .device_init = rmem_dma_device_init,
+ .device_release = rmem_dma_device_release,
+};
+
+static int __init removed_dma_setup(struct reserved_mem *rmem)
+{
+ unsigned long node = rmem->fdt_node;
+ int nomap, fixup;
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL;
+
+ if (nomap && fixup) {
+ pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n");
+ return -EINVAL;
+ }
+
+ rmem->fixup = fixup;
+ if (rmem->fixup) {
+ /* Architecture specific contiguous memory fixup only for
+ * no-map-fixup to split mappings
+ */
+ dma_contiguous_early_fixup(rmem->base, rmem->size);
+ }
+
+ rmem->ops = &removed_mem_ops;
+ pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(dma, "removed-dma-pool", removed_dma_setup);
+#endif
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index ccfd268148a8..a1696e1d199f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -29,6 +29,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/io.h>
#include <generated/utsrelease.h>
@@ -111,6 +112,7 @@ static inline long firmware_loading_timeout(void)
#define FW_OPT_FALLBACK 0
#endif
#define FW_OPT_NO_WARN (1U << 3)
+#define FW_OPT_NOCACHE (1U << 4)
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
@@ -142,6 +144,11 @@ struct firmware_buf {
unsigned long status;
void *data;
size_t size;
+ phys_addr_t dest_addr;
+ size_t dest_size;
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+ void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+ void *map_data;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
bool need_uevent;
@@ -163,6 +170,22 @@ struct fw_name_devm {
const char *name;
};
+struct fw_desc {
+ struct work_struct work;
+ const struct firmware **firmware_p;
+ const char *name;
+ struct device *device;
+ unsigned int opt_flags;
+ phys_addr_t dest_addr;
+ size_t dest_size;
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+ void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+ void *map_data;
+ struct module *module;
+ void *context;
+ void (*cont)(const struct firmware *fw, void *context);
+};
+
#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
#define FW_LOADER_NO_CACHE 0
@@ -249,6 +272,9 @@ static void __fw_free_buf(struct kref *ref)
(unsigned int)buf->size);
list_del(&buf->list);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ list_del(&buf->pending_list);
+#endif
spin_unlock(&fwc->lock);
#ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -268,6 +294,11 @@ static void __fw_free_buf(struct kref *ref)
static void fw_free_buf(struct firmware_buf *buf)
{
struct firmware_cache *fwc = buf->fwc;
+ if (!fwc) {
+ kfree_const(buf->fw_id);
+ kfree(buf);
+ return;
+ }
spin_lock(&fwc->lock);
if (!kref_put(&buf->ref, __fw_free_buf))
spin_unlock(&fwc->lock);
@@ -280,7 +311,8 @@ static const char * const fw_path[] = {
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
- "/lib/firmware"
+ "/lib/firmware",
+ "/lib64/firmware"
};
/*
@@ -302,7 +334,14 @@ static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
size = i_size_read(file_inode(file));
if (size <= 0)
return -EINVAL;
- buf = vmalloc(size);
+ if (fw_buf->dest_size > 0 && fw_buf->dest_size < size)
+ return -EINVAL;
+
+ if (fw_buf->dest_addr)
+ buf = fw_buf->map_fw_mem(fw_buf->dest_addr,
+ fw_buf->dest_size, fw_buf->map_data);
+ else
+ buf = vmalloc(size);
if (!buf)
return -ENOMEM;
rc = kernel_read(file, 0, buf, size);
@@ -316,14 +355,20 @@ static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
goto fail;
fw_buf->data = buf;
fw_buf->size = size;
+ if (fw_buf->dest_addr)
+ fw_buf->unmap_fw_mem(buf, fw_buf->size, fw_buf->map_data);
return 0;
fail:
- vfree(buf);
+ if (fw_buf->dest_addr)
+ fw_buf->unmap_fw_mem(buf, fw_buf->size, fw_buf->map_data);
+ else
+ vfree(buf);
return rc;
}
static int fw_get_filesystem_firmware(struct device *device,
- struct firmware_buf *buf)
+ struct firmware_buf *buf,
+ phys_addr_t dest_addr, size_t dest_size)
{
int i, len;
int rc = -ENOENT;
@@ -658,6 +703,10 @@ static ssize_t firmware_loading_store(struct device *dev,
case 1:
/* discarding any previous partial load */
if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (fw_buf->dest_addr) {
+ set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ break;
+ }
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
kfree(fw_buf->pages);
@@ -715,6 +764,102 @@ out:
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+static int __firmware_data_rw(struct firmware_priv *fw_priv, char *buffer,
+ loff_t *offset, size_t count, int read)
+{
+ u8 __iomem *fw_buf;
+ struct firmware_buf *buf = fw_priv->buf;
+ int retval = count;
+
+ if ((*offset + count) > buf->dest_size) {
+ pr_debug("%s: Failed size check.\n", __func__);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ fw_buf = buf->map_fw_mem(buf->dest_addr + *offset, count,
+ buf->map_data);
+ if (!fw_buf) {
+ pr_debug("%s: Failed ioremap.\n", __func__);
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ if (read)
+ memcpy(buffer, fw_buf, count);
+ else
+ memcpy(fw_buf, buffer, count);
+
+ *offset += count;
+ buf->unmap_fw_mem(fw_buf, count, buf->map_data);
+
+out:
+ return retval;
+}
+
+static ssize_t firmware_direct_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct firmware *fw;
+ ssize_t ret_count;
+
+ if (!fw_priv->fw)
+ return -ENODEV;
+
+ mutex_lock(&fw_lock);
+ fw = fw_priv->fw;
+
+ if (offset > fw->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw->size - offset)
+ count = fw->size - offset;
+
+ if (test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+
+ ret_count = __firmware_data_rw(fw_priv, buffer, &offset, count, 1);
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static ssize_t firmware_direct_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct firmware *fw;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw = fw_priv->fw;
+ if (!fw || !fw_priv->buf ||
+ test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ retval = __firmware_data_rw(fw_priv, buffer, &offset, count, 0);
+ if (retval < 0)
+ goto out;
+
+ fw_priv->buf->size = max_t(size_t, offset, fw_priv->buf->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
@@ -864,6 +1009,13 @@ static struct bin_attribute firmware_attr_data = {
.write = firmware_data_write,
};
+static struct bin_attribute firmware_direct_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_direct_read,
+ .write = firmware_direct_write,
+};
+
static struct attribute *fw_dev_attrs[] = {
&dev_attr_loading.attr,
NULL
@@ -879,31 +1031,47 @@ static const struct attribute_group fw_dev_attr_group = {
.bin_attrs = fw_dev_bin_attrs,
};
+static struct bin_attribute *fw_dev_direct_bin_attrs[] = {
+ &firmware_direct_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_direct_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_direct_bin_attrs,
+};
+
static const struct attribute_group *fw_dev_attr_groups[] = {
&fw_dev_attr_group,
NULL
};
+static const struct attribute_group *fw_dev_direct_attr_groups[] = {
+ &fw_dev_direct_attr_group,
+ NULL
+};
+
static struct firmware_priv *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, unsigned int opt_flags)
+fw_create_instance(struct firmware *firmware, struct fw_desc *desc)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
+ dev_err(desc->device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM);
goto exit;
}
- fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_priv->nowait = !!(desc->opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
+
f_dev = &fw_priv->dev;
device_initialize(f_dev);
- dev_set_name(f_dev, "%s", fw_name);
- f_dev->parent = device;
+ dev_set_name(f_dev, "%s", desc->name);
+ f_dev->parent = desc->device;
f_dev->class = &firmware_class;
f_dev->groups = fw_dev_attr_groups;
exit:
@@ -919,7 +1087,10 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
struct firmware_buf *buf = fw_priv->buf;
/* fall back on userspace loading */
- buf->is_paged_buf = true;
+ buf->is_paged_buf = buf->dest_addr ? false : true;
+
+ if (buf->dest_addr)
+ f_dev->groups = fw_dev_direct_attr_groups;
dev_set_uevent_suppress(f_dev, true);
@@ -955,7 +1126,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
if (is_fw_load_aborted(buf))
retval = -EAGAIN;
- else if (!buf->data)
+ else if (!buf->data && buf->is_paged_buf)
retval = -ENOMEM;
device_del(f_dev);
@@ -965,17 +1136,16 @@ err_put_dev:
}
static int fw_load_from_user_helper(struct firmware *firmware,
- const char *name, struct device *device,
- unsigned int opt_flags, long timeout)
+ struct fw_desc *desc, long timeout)
{
struct firmware_priv *fw_priv;
- fw_priv = fw_create_instance(firmware, name, device, opt_flags);
+ fw_priv = fw_create_instance(firmware, desc);
if (IS_ERR(fw_priv))
return PTR_ERR(fw_priv);
fw_priv->buf = firmware->priv;
- return _request_firmware_load(fw_priv, opt_flags, timeout);
+ return _request_firmware_load(fw_priv, desc->opt_flags, timeout);
}
#ifdef CONFIG_PM_SLEEP
@@ -996,9 +1166,8 @@ static void kill_requests_without_uevent(void)
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
-fw_load_from_user_helper(struct firmware *firmware, const char *name,
- struct device *device, unsigned int opt_flags,
- long timeout)
+fw_load_from_user_helper(struct firmware *firmware,
+ struct fw_desc *desc, long timeout)
{
return -ENOENT;
}
@@ -1037,8 +1206,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
* or a negative error code
*/
static int
-_request_firmware_prepare(struct firmware **firmware_p, const char *name,
- struct device *device)
+_request_firmware_prepare(struct firmware **firmware_p, struct fw_desc *desc)
{
struct firmware *firmware;
struct firmware_buf *buf;
@@ -1046,17 +1214,31 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
- dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+ dev_err(desc->device, "%s: kmalloc(struct firmware) failed\n",
__func__);
return -ENOMEM;
}
- if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+ if (fw_get_builtin_firmware(firmware, desc->name)) {
+ dev_dbg(desc->device, "firmware: using built-in firmware %s\n",
+ desc->name);
return 0; /* assigned */
}
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+ if (desc->opt_flags & FW_OPT_NOCACHE) {
+ buf = __allocate_fw_buf(desc->name, NULL);
+ if (!buf)
+ return -ENOMEM;
+ buf->dest_addr = desc->dest_addr;
+ buf->dest_size = desc->dest_size;
+ buf->map_fw_mem = desc->map_fw_mem;
+ buf->unmap_fw_mem = desc->unmap_fw_mem;
+ buf->map_data = desc->map_data;
+ firmware->priv = buf;
+ return 1;
+ }
+
+ ret = fw_lookup_and_allocate_buf(desc->name, &fw_cache, &buf);
/*
* bind with 'buf' now to avoid warning in failure path
@@ -1095,15 +1277,19 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- /* don't cache firmware handled without uevent */
- if (device && (opt_flags & FW_OPT_UEVENT))
+ /* don't cache firmware handled without uevent, or when explicitly
+ * disabled
+ */
+ if (device && (opt_flags & FW_OPT_UEVENT)
+ && !(opt_flags & FW_OPT_NOCACHE))
fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (buf->fwc->state == FW_LOADER_START_CACHE) {
+ if (!(opt_flags & FW_OPT_NOCACHE)
+ && (buf->fwc->state == FW_LOADER_START_CACHE)) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
@@ -1115,58 +1301,56 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
}
/* called from request_firmware() and request_firmware_work_func() */
-static int
-_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, unsigned int opt_flags)
+static int _request_firmware(struct fw_desc *desc)
{
struct firmware *fw;
long timeout;
int ret;
- if (!firmware_p)
+ if (!desc->firmware_p)
return -EINVAL;
- if (!name || name[0] == '\0')
+ if (!desc->name || desc->name[0] == '\0')
return -EINVAL;
- ret = _request_firmware_prepare(&fw, name, device);
+ ret = _request_firmware_prepare(&fw, desc);
if (ret <= 0) /* error or already assigned */
goto out;
ret = 0;
timeout = firmware_loading_timeout();
- if (opt_flags & FW_OPT_NOWAIT) {
+ if (desc->opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
- dev_dbg(device, "firmware: %s loading timed out\n",
- name);
+ dev_dbg(desc->device, "firmware: %s loading timed out\n",
+ desc->name);
ret = -EBUSY;
goto out;
}
} else {
ret = usermodehelper_read_trylock();
if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n",
- name);
+ dev_err(desc->device, "firmware: %s will not be loaded\n",
+ desc->name);
goto out;
}
}
- ret = fw_get_filesystem_firmware(device, fw->priv);
+ ret = fw_get_filesystem_firmware(desc->device, fw->priv,
+ desc->dest_addr, desc->dest_size);
if (ret) {
- if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device,
+ if (!(desc->opt_flags & FW_OPT_NO_WARN))
+ dev_dbg(desc->device,
"Direct firmware load for %s failed with error %d\n",
- name, ret);
- if (opt_flags & FW_OPT_USERHELPER) {
- dev_warn(device, "Falling back to user helper\n");
- ret = fw_load_from_user_helper(fw, name, device,
- opt_flags, timeout);
+ desc->name, ret);
+ if (desc->opt_flags & FW_OPT_USERHELPER) {
+ dev_dbg(desc->device, "Falling back to user helper\n");
+ ret = fw_load_from_user_helper(fw, desc, timeout);
}
}
if (!ret)
- ret = assign_firmware_buf(fw, device, opt_flags);
+ ret = assign_firmware_buf(fw, desc->device, desc->opt_flags);
usermodehelper_read_unlock();
@@ -1176,7 +1360,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
fw = NULL;
}
- *firmware_p = fw;
+ *desc->firmware_p = fw;
return ret;
}
@@ -1204,13 +1388,21 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
+ struct fw_desc desc;
int ret;
+ desc.firmware_p = firmware_p;
+ desc.name = name;
+ desc.device = device;
+ desc.dest_addr = 0;
+ desc.dest_size = 0;
+ desc.opt_flags = FW_OPT_UEVENT | FW_OPT_FALLBACK;
+
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
- FW_OPT_UEVENT | FW_OPT_FALLBACK);
+ ret = _request_firmware(&desc);
module_put(THIS_MODULE);
+
return ret;
}
EXPORT_SYMBOL(request_firmware);
@@ -1229,17 +1421,70 @@ EXPORT_SYMBOL(request_firmware);
int request_firmware_direct(const struct firmware **firmware_p,
const char *name, struct device *device)
{
+ struct fw_desc desc;
int ret;
+ desc.firmware_p = firmware_p;
+ desc.name = name;
+ desc.device = device;
+ desc.opt_flags = FW_OPT_UEVENT | FW_OPT_NO_WARN;
+
+ /* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
- FW_OPT_UEVENT | FW_OPT_NO_WARN);
+ ret = _request_firmware(&desc);
module_put(THIS_MODULE);
+
return ret;
}
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
+ * request_firmware_into_buf: - send firmware request and wait for it
+ * @dest_addr: Destination address for the firmware
+ * @dest_size: Size of destination buffer
+ *
+ * Similar to request_firmware, except takes in a buffer address and
+ * copies firmware data directly to that buffer. Returns the size of
+ * the firmware that was loaded at dest_addr. This API prevents the
+ * caching of images.
+*/
+int
+request_firmware_into_buf(const char *name, struct device *device,
+ phys_addr_t dest_addr, size_t dest_size,
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size,
+ void *data),
+ void (*unmap_fw_mem)(void *virt, size_t sz, void *data),
+ void *map_data)
+{
+ struct fw_desc desc;
+ const struct firmware *fp = NULL;
+ int ret;
+
+ if (dest_addr && !map_fw_mem)
+ return -EINVAL;
+ if (dest_addr && dest_size <= 0)
+ return -EINVAL;
+
+ desc.firmware_p = &fp;
+ desc.name = name;
+ desc.device = device;
+ desc.opt_flags = FW_OPT_FALLBACK | FW_OPT_UEVENT | FW_OPT_NOCACHE;
+ desc.dest_addr = dest_addr;
+ desc.dest_size = dest_size;
+ desc.map_fw_mem = map_fw_mem;
+ desc.unmap_fw_mem = unmap_fw_mem;
+ desc.map_data = map_data;
+
+ ret = _request_firmware(&desc);
+ if (ret)
+ return ret;
+ ret = fp->size;
+ release_firmware(fp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_into_buf);
+
+/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
@@ -1254,31 +1499,70 @@ void release_firmware(const struct firmware *fw)
EXPORT_SYMBOL(release_firmware);
/* Async support */
-struct firmware_work {
- struct work_struct work;
- struct module *module;
- const char *name;
- struct device *device;
- void *context;
- void (*cont)(const struct firmware *fw, void *context);
- unsigned int opt_flags;
-};
-
static void request_firmware_work_func(struct work_struct *work)
{
- struct firmware_work *fw_work;
const struct firmware *fw;
+ struct fw_desc *desc;
+
+ desc = container_of(work, struct fw_desc, work);
+ desc->firmware_p = &fw;
+ _request_firmware(desc);
+ desc->cont(fw, desc->context);
+ put_device(desc->device); /* taken in request_firmware_nowait() */
+
+ module_put(desc->module);
+ kfree(desc);
+}
+
+int
+_request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context),
+ bool nocache, phys_addr_t dest_addr, size_t dest_size,
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+ void (*unmap_fw_mem)(void *virt, size_t size, void *data),
+ void *map_data)
+{
+ struct fw_desc *desc;
+
+ if (dest_addr && !map_fw_mem)
+ return -EINVAL;
+ if (dest_addr && dest_size <= 0)
+ return -EINVAL;
- fw_work = container_of(work, struct firmware_work, work);
+ desc = kzalloc(sizeof(struct fw_desc), gfp);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->module = module;
+ desc->name = name;
+ desc->device = device;
+ desc->context = context;
+ desc->cont = cont;
+ desc->dest_addr = dest_addr;
+ desc->dest_size = dest_size;
+ desc->map_fw_mem = map_fw_mem;
+ desc->unmap_fw_mem = unmap_fw_mem;
+ desc->map_data = map_data;
+ desc->opt_flags = FW_OPT_FALLBACK | FW_OPT_NOWAIT;
+
+ if (uevent)
+ desc->opt_flags |= FW_OPT_UEVENT;
+ else
+ desc->opt_flags |= FW_OPT_USERHELPER;
+ if (nocache)
+ desc->opt_flags |= FW_OPT_NOCACHE;
- _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->opt_flags);
- fw_work->cont(fw, fw_work->context);
- put_device(fw_work->device); /* taken in request_firmware_nowait() */
+ if (!try_module_get(module)) {
+ kfree(desc);
+ return -EFAULT;
+ }
- module_put(fw_work->module);
- kfree_const(fw_work->name);
- kfree(fw_work);
+ get_device(desc->device);
+ INIT_WORK(&desc->work, request_firmware_work_func);
+ schedule_work(&desc->work);
+ return 0;
}
/**
@@ -1310,37 +1594,38 @@ request_firmware_nowait(
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
- struct firmware_work *fw_work;
-
- fw_work = kzalloc(sizeof(struct firmware_work), gfp);
- if (!fw_work)
- return -ENOMEM;
-
- fw_work->module = module;
- fw_work->name = kstrdup_const(name, gfp);
- if (!fw_work->name) {
- kfree(fw_work);
- return -ENOMEM;
- }
- fw_work->device = device;
- fw_work->context = context;
- fw_work->cont = cont;
- fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
- (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
-
- if (!try_module_get(module)) {
- kfree_const(fw_work->name);
- kfree(fw_work);
- return -EFAULT;
- }
-
- get_device(fw_work->device);
- INIT_WORK(&fw_work->work, request_firmware_work_func);
- schedule_work(&fw_work->work);
- return 0;
+ return _request_firmware_nowait(module, uevent, name, device, gfp,
+ context, cont, false, 0, 0, NULL, NULL, NULL);
}
EXPORT_SYMBOL(request_firmware_nowait);
+/**
+ * request_firmware_nowait_into_buf - asynchronous version of request_firmware
+ * @dest_addr: Destination address for the firmware
+ * @dest_size: Size of destination buffer
+ *
+ * Similar to request_firmware_nowait, except loads the firmware
+ * directly to a destination address without using an intermediate
+ * buffer.
+ *
+ **/
+int
+request_firmware_nowait_into_buf(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context),
+ phys_addr_t dest_addr, size_t dest_size,
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+ void (*unmap_fw_mem)(void *virt, size_t size, void *data),
+ void *map_data)
+{
+ return _request_firmware_nowait(module, uevent, name, device, gfp,
+ context, cont, true, dest_addr,
+ dest_size, map_fw_mem, unmap_fw_mem,
+ map_data);
+}
+EXPORT_SYMBOL_GPL(request_firmware_nowait_into_buf);
+
#ifdef CONFIG_PM_SLEEP
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cb4ad6e98b28..ae7f3ce90bd2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -117,6 +117,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+ int ret, nr = 0;
+
+ while ((ret = platform_get_irq(dev, nr)) >= 0)
+ nr++;
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 6c5bc3fadfcf..a88590bb0b10 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -162,6 +162,12 @@ void device_pm_move_before(struct device *deva, struct device *devb)
pr_debug("PM: Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ if (!((devb->pm_domain) || (devb->type && devb->type->pm)
+ || (devb->class && (devb->class->pm || devb->class->resume))
+ || (devb->bus && (devb->bus->pm || devb->bus->resume)) ||
+ (devb->driver && devb->driver->pm))) {
+ device_pm_add(devb);
+ }
/* Delete deva from dpm_list and reinsert before devb. */
list_move_tail(&deva->power.entry, &devb->power.entry);
}
@@ -176,6 +182,12 @@ void device_pm_move_after(struct device *deva, struct device *devb)
pr_debug("PM: Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ if (!((devb->pm_domain) || (devb->type && devb->type->pm)
+ || (devb->class && (devb->class->pm || devb->class->resume))
+ || (devb->bus && (devb->bus->pm || devb->bus->resume)) ||
+ (devb->driver && devb->driver->pm))) {
+ device_pm_add(devb);
+ }
/* Delete deva from dpm_list and reinsert after devb. */
list_move(&deva->power.entry, &devb->power.entry);
}
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 33c1e18c41a4..19837ef04d8e 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,2 +1,3 @@
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index f8580900c273..433b60092972 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -13,50 +13,52 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/export.h>
+#include <linux/regulator/consumer.h>
#include "opp.h"
/*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
#define opp_rcu_lockdep_assert() \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&dev_opp_list_lock), \
- "Missing rcu_read_lock() or " \
- "dev_opp_list_lock protection"); \
+ !lockdep_is_held(&opp_table_lock), \
+ "Missing rcu_read_lock() or " \
+ "opp_table_lock protection"); \
} while (0)
-static struct device_list_opp *_find_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_for_each_entry(list_dev, &dev_opp->dev_list, node)
- if (list_dev->dev == dev)
- return list_dev;
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ if (opp_dev->dev == dev)
+ return opp_dev;
return NULL;
}
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
- if (dev_opp->np == np) {
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
/*
* Multiple devices can point to the same OPP table and
* so will have same node-pointer, np.
@@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return dev_opp->shared_opp ? dev_opp : NULL;
+ return opp_table->shared_opp ? opp_table : NULL;
}
}
@@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np)
}
/**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev: device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev: device pointer used to lookup OPP table
*
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
*
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
* as long as we are under RCU lock.
*
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
*/
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
@@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev)
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
- if (_find_list_dev(dev, dev_opp))
- return dev_opp;
+ list_for_each_entry_rcu(opp_table, &opp_tables, node)
+ if (_find_opp_dev(dev, opp_table))
+ return opp_table;
return ERR_PTR(-ENODEV);
}
@@ -213,16 +215,16 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
unsigned long clock_latency_ns;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
clock_latency_ns = 0;
else
- clock_latency_ns = dev_opp->clock_latency_ns_max;
+ clock_latency_ns = opp_table->clock_latency_ns_max;
rcu_read_unlock();
return clock_latency_ns;
@@ -230,6 +232,82 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+ struct regulator *reg;
+ unsigned long latency_ns = 0;
+ unsigned long min_uV = ~0, max_uV = 0;
+ int ret;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ reg = opp_table->regulator;
+ if (IS_ERR(reg)) {
+ /* Regulator may not be required for device */
+ if (reg)
+ dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+ PTR_ERR(reg));
+ rcu_read_unlock();
+ return 0;
+ }
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
+
+ if (opp->u_volt_min < min_uV)
+ min_uV = opp->u_volt_min;
+ if (opp->u_volt_max > max_uV)
+ max_uV = opp->u_volt_max;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * The caller needs to ensure that opp_table (and hence the regulator)
+ * isn't freed, while we are executing this routine.
+ */
+ ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+ if (ret > 0)
+ latency_ns = ret * 1000;
+
+ return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ * nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return dev_pm_opp_get_max_volt_latency(dev) +
+ dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
+/**
* dev_pm_opp_get_suspend_opp() - Get suspend opp
* @dev: device for which we do this operation
*
@@ -244,21 +322,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
- !dev_opp->suspend_opp->available)
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+ !opp_table->suspend_opp->available)
return NULL;
- return dev_opp->suspend_opp;
+ return opp_table->suspend_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
/**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
* @dev: device for which we do this operation
*
* Return: This function returns the number of available opps if there are any,
@@ -268,21 +346,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- count = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n",
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ count = PTR_ERR(opp_table);
+ dev_err(dev, "%s: OPP table not found (%d)\n",
__func__, count);
goto out_unlock;
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
@@ -299,7 +377,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* @freq: frequency to search for
* @available: true/false - match for available opp
*
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
* matching opp if found, else returns ERR_PTR in case of error and should
* be handled using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
@@ -323,19 +401,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
return ERR_PTR(r);
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
@@ -371,7 +450,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -381,11 +460,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
@@ -421,7 +500,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -431,11 +510,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
@@ -451,116 +530,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
{
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct clk *clk;
+
+ rcu_read_lock();
- list_dev = container_of(head, struct device_list_opp, rcu_head);
- kfree_rcu(list_dev, rcu_head);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ clk = ERR_CAST(opp_table);
+ goto unlock;
+ }
+
+ clk = opp_table->clk;
+ if (IS_ERR(clk))
+ dev_err(dev, "%s: No clock available for the device\n",
+ __func__);
+
+unlock:
+ rcu_read_unlock();
+ return clk;
}
-static void _remove_list_dev(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
{
- list_del(&list_dev->node);
- call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
- _kfree_list_dev_rcu);
+ int ret;
+
+ /* Regulator not available for device */
+ if (IS_ERR(reg)) {
+ dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+ PTR_ERR(reg));
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+ u_volt, u_volt_max);
+
+ ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+ u_volt_max);
+ if (ret)
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+ __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+ return ret;
}
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev: device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct dev_pm_opp *old_opp, *opp;
+ struct regulator *reg;
+ struct clk *clk;
+ unsigned long freq, old_freq;
+ unsigned long u_volt, u_volt_min, u_volt_max;
+ unsigned long ou_volt, ou_volt_min, ou_volt_max;
+ int ret;
+
+ if (unlikely(!target_freq)) {
+ dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+ target_freq);
+ return -EINVAL;
+ }
+
+ clk = _get_opp_clk(dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ freq = clk_round_rate(clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
+
+ old_freq = clk_get_rate(clk);
+
+ /* Return early if nothing to do */
+ if (old_freq == freq) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ rcu_read_unlock();
+ return PTR_ERR(opp_table);
+ }
+
+ old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ if (!IS_ERR(old_opp)) {
+ ou_volt = old_opp->u_volt;
+ ou_volt_min = old_opp->u_volt_min;
+ ou_volt_max = old_opp->u_volt_max;
+ } else {
+ dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+ __func__, old_freq, PTR_ERR(old_opp));
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ rcu_read_unlock();
+ return ret;
+ }
- list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
- if (!list_dev)
+ u_volt = opp->u_volt;
+ u_volt_min = opp->u_volt_min;
+ u_volt_max = opp->u_volt_max;
+
+ reg = opp_table->regulator;
+
+ rcu_read_unlock();
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (clk_set_rate(clk, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (!IS_ERR(old_opp))
+ _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
+{
+ struct opp_device *opp_dev;
+
+ opp_dev = container_of(head, struct opp_device, rcu_head);
+ kfree_rcu(opp_dev, rcu_head);
+}
+
+static void _remove_opp_dev(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ opp_debug_unregister(opp_dev, opp_table);
+ list_del(&opp_dev->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+ _kfree_opp_dev_rcu);
+}
+
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
+{
+ struct opp_device *opp_dev;
+ int ret;
+
+ opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+ if (!opp_dev)
return NULL;
- /* Initialize list-dev */
- list_dev->dev = dev;
- list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+ /* Initialize opp-dev */
+ opp_dev->dev = dev;
+ list_add_rcu(&opp_dev->node, &opp_table->dev_list);
+
+ /* Create debugfs entries for the opp_table */
+ ret = opp_debug_register(opp_dev, opp_table);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+ __func__, ret);
- return list_dev;
+ return opp_dev;
}
/**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
* @dev: device for which we do this operation
*
* It tries to find an existing table first, if it couldn't find one, it
* allocates a new OPP table and returns that.
*
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
*/
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct opp_device *opp_dev;
+ struct device_node *np;
+ int ret;
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (!IS_ERR(dev_opp))
- return dev_opp;
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ return opp_table;
/*
- * Allocate a new device OPP table. In the infrequent case where a new
+ * Allocate a new OPP table. In the infrequent case where a new
* device is needed to be added, we pay this penalty.
*/
- dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
- if (!dev_opp)
+ opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+ if (!opp_table)
return NULL;
- INIT_LIST_HEAD(&dev_opp->dev_list);
+ INIT_LIST_HEAD(&opp_table->dev_list);
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- kfree(dev_opp);
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ kfree(opp_table);
return NULL;
}
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+
+ /* Set regulator to a non-NULL error value */
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+ if (IS_ERR(opp_table->clk)) {
+ ret = PTR_ERR(opp_table->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+ ret);
+ }
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
- return dev_opp;
+ srcu_init_notifier_head(&opp_table->srcu_head);
+ INIT_LIST_HEAD(&opp_table->opp_list);
+
+ /* Secure the device table modification */
+ list_add_rcu(&opp_table->node, &opp_tables);
+ return opp_table;
}
/**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
* @head: RCU head
*/
static void _kfree_device_rcu(struct rcu_head *head)
{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ struct opp_table *opp_table = container_of(head, struct opp_table,
+ rcu_head);
- kfree_rcu(device_opp, rcu_head);
+ kfree_rcu(opp_table, rcu_head);
}
/**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
*
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
*/
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
+
+ if (!list_empty(&opp_table->opp_list))
+ return;
+
+ if (opp_table->supported_hw)
+ return;
- if (!list_empty(&dev_opp->opp_list))
+ if (opp_table->prop_name)
return;
- list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
- node);
+ if (!IS_ERR(opp_table->regulator))
+ return;
+
+ /* Release clk */
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
+
+ opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+ node);
- _remove_list_dev(list_dev, dev_opp);
+ _remove_opp_dev(opp_dev, opp_table);
/* dev_list must be empty now */
- WARN_ON(!list_empty(&dev_opp->dev_list));
+ WARN_ON(!list_empty(&opp_table->dev_list));
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ list_del_rcu(&opp_table->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
_kfree_device_rcu);
}
@@ -577,17 +883,17 @@ static void _kfree_opp_rcu(struct rcu_head *head)
/**
* _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @opp: pointer to the OPP to remove
* @notify: OPP_EVENT_REMOVE notification should be sent or not
*
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
struct dev_pm_opp *opp, bool notify)
{
/*
@@ -595,21 +901,23 @@ static void _opp_remove(struct device_opp *dev_opp,
* frequency/voltage list.
*/
if (notify)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_REMOVE, opp);
+ opp_debug_remove_one(opp);
list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
* @freq: OPP to remove with matching 'freq'
*
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -618,17 +926,17 @@ static void _opp_remove(struct device_opp *dev_opp,
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
bool found = false;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
goto unlock;
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->rate == freq) {
found = true;
break;
@@ -641,14 +949,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
goto unlock;
}
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct device_opp **dev_opp)
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -659,8 +967,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
INIT_LIST_HEAD(&opp->node);
- *dev_opp = _add_device_opp(dev);
- if (!*dev_opp) {
+ *opp_table = _add_opp_table(dev);
+ if (!*opp_table) {
kfree(opp);
return NULL;
}
@@ -668,21 +976,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
return opp;
}
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{
+ struct regulator *reg = opp_table->regulator;
+
+ if (!IS_ERR(reg) &&
+ !regulator_is_supported_voltage(reg, opp->u_volt_min,
+ opp->u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->u_volt_min, opp->u_volt_max);
+ return false;
+ }
+
+ return true;
+}
+
static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- struct list_head *head = &dev_opp->opp_list;
+ struct list_head *head = &opp_table->opp_list;
+ int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
- * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
@@ -700,9 +1025,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
0 : -EEXIST;
}
- new_opp->dev_opp = dev_opp;
+ new_opp->opp_table = opp_table;
list_add_rcu(&new_opp->node, head);
+ ret = opp_debug_create_one(new_opp, opp_table);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
+ __func__, ret);
+
+ if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+ new_opp->available = false;
+ dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+ __func__, new_opp->rate);
+ }
+
return 0;
}
@@ -713,14 +1049,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* @u_volt: Voltage in uVolts for this OPP
* @dynamic: Dynamically added OPPs.
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
* NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
* and freed by dev_pm_opp_of_remove_table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -736,14 +1072,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
bool dynamic)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
+ unsigned long tol;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -751,60 +1088,77 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
+ tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->u_volt = u_volt;
+ new_opp->u_volt_min = u_volt - tol;
+ new_opp->u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct opp_table *opp_table)
{
u32 microvolt[3] = {0};
u32 val;
int count, ret;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+ /* Search for "opp-microvolt-<name>" */
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
- /* Missing property isn't a problem, but an invalid entry is */
- if (!of_find_property(opp->np, "opp-microvolt", NULL))
- return 0;
+ if (!prop) {
+ /* Search for "opp-microvolt" */
+ sprintf(name, "opp-microvolt");
+ prop = of_find_property(opp->np, name, NULL);
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop)
+ return 0;
+ }
+
+ count = of_property_count_u32_elems(opp->np, name);
if (count < 0) {
- dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+ __func__, name, count);
return count;
}
/* There can be one or three elements here */
if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+ __func__, name, count);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
- count);
+ ret = of_property_read_u32_array(opp->np, name, microvolt, count);
if (ret) {
- dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
- ret);
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
return -EINVAL;
}
@@ -818,22 +1172,391 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
opp->u_volt_max = microvolt[2];
}
- if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ /* Search for "opp-microamp-<name>" */
+ prop = NULL;
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microamp-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microamp" */
+ sprintf(name, "opp-microamp");
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (prop && !of_property_read_u32(opp->np, name, &val))
opp->u_amp = val;
return 0;
}
/**
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * @dev: Device for which supported-hw has to be set.
+ * @versions: Array of hierarchy of versions to match.
+ * @count: Number of elements in the array.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the hierarchy of versions it supports. OPP layer will then enable
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
+ * property.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+ unsigned int count)
+{
+ struct opp_table *opp_table;
+ int ret = 0;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ /* Do we already have a version hierarchy associated with opp_table? */
+ if (opp_table->supported_hw) {
+ dev_err(dev, "%s: Already have supported hardware list\n",
+ __func__);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
+ GFP_KERNEL);
+ if (!opp_table->supported_hw) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ opp_table->supported_hw_count = count;
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
+
+/**
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @dev: Device for which supported-hw has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_supported_hw(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ if (!opp_table->supported_hw) {
+ dev_err(dev, "%s: Doesn't have supported hardware list\n",
+ __func__);
+ goto unlock;
+ }
+
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
+
+/**
+ * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * @dev: Device for which the prop-name has to be set.
+ * @name: name to postfix to properties.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the extn to be used for certain property names. The properties to
+ * which the extension will apply are opp-microvolt and opp-microamp. OPP core
+ * should postfix the property name with -<name> while looking for them.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ int ret = 0;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ /* Do we already have a prop-name associated with opp_table? */
+ if (opp_table->prop_name) {
+ dev_err(dev, "%s: Already have prop-name %s\n", __func__,
+ opp_table->prop_name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
+
+/**
+ * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
+ * @dev: Device for which the prop-name has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_prop_name(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ if (!opp_table->prop_name) {
+ dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
+ goto unlock;
+ }
+
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int ret;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have a regulator set */
+ if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ ret = -EBUSY;
+ goto err;
+ }
+ /* Allocate the regulator */
+ reg = regulator_get_optional(dev, name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, name, ret);
+ goto err;
+ }
+
+ opp_table->regulator = reg;
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ if (IS_ERR(opp_table->regulator)) {
+ dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ regulator_put(opp_table->regulator);
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ unsigned int count = opp_table->supported_hw_count;
+ u32 version;
+ int ret;
+
+ if (!opp_table->supported_hw)
+ return true;
+
+ while (count--) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+ &version);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, count, ret);
+ return false;
+ }
+
+ /* Both of these are bitwise masks of the versions */
+ if (!(version & opp_table->supported_hw[count]))
+ return false;
+ }
+
+ return true;
+}
+
+/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @dev: device for which we do this operation
* @np: device node
*
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
* opp can be controlled using dev_pm_opp_enable/disable functions and may be
* removed by dev_pm_opp_remove.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -849,16 +1572,16 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
*/
static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
u64 rate;
u32 val;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -870,6 +1593,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
goto free_opp;
}
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, opp_table, np)) {
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ goto free_opp;
+ }
+
/*
* Rate is defined as an unsigned long in clk API, and so casting
* explicitly to its type. Must be fixed once rate is 64 bit
@@ -885,28 +1614,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_parse_supplies(new_opp, dev);
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
goto free_opp;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
- if (dev_opp->suspend_opp)
+ if (opp_table->suspend_opp) {
dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, dev_opp->suspend_opp->rate,
+ __func__, opp_table->suspend_opp->rate,
new_opp->rate);
- else
- dev_opp->suspend_opp = new_opp;
+ } else {
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
}
- if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
- dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
__func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -917,13 +1648,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -933,11 +1664,11 @@ unlock:
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -969,7 +1700,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* copy operation, returns 0 if no modification was done OR modification was
* successful.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks to
* keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -978,7 +1709,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -987,18 +1718,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
if (!new_opp)
return -ENOMEM;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- /* Find the device_opp */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- r = PTR_ERR(dev_opp);
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
goto unlock;
}
/* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
if (tmp_opp->rate == freq) {
opp = tmp_opp;
break;
@@ -1019,21 +1750,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
new_opp->available = availability_req;
list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&dev_opp_list_lock);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ mutex_unlock(&opp_table_lock);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
/* Notify the change of the OPP availability */
if (availability_req)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_ENABLE, new_opp);
else
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_DISABLE, new_opp);
return 0;
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
kfree(new_opp);
return r;
}
@@ -1047,7 +1778,7 @@ unlock:
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1073,7 +1804,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1091,26 +1822,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Return: pointer to notifier head if found, otherwise -ENODEV or
* -EINVAL based on type of error casted as pointer. value must be checked
* with IS_ERR to determine valid pointer or error result.
*
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
- struct device_opp *dev_opp = _find_device_opp(dev);
+ struct opp_table *opp_table = _find_opp_table(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp); /* matching type */
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table); /* matching type */
- return &dev_opp->srcu_head;
+ return &opp_table->srcu_head;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
@@ -1118,11 +1849,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
/**
* dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
* entries
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Free OPPs created using static entries present in DT.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1130,38 +1861,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
+ /* Check for existing table for 'dev' */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int error = PTR_ERR(opp_table);
if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
+ WARN(1, "%s: opp_table: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
goto unlock;
}
- /* Find if dev_opp manages a single device */
- if (list_is_singular(&dev_opp->dev_list)) {
+ /* Find if opp_table manages a single device */
+ if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
if (!opp->dynamic)
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
}
} else {
- _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
@@ -1182,22 +1913,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
struct device_node *np;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0, count = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _managed_opp(opp_np);
- if (dev_opp) {
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
/* OPPs are already managed */
- if (!_add_list_dev(dev, dev_opp))
+ if (!_add_opp_dev(dev, opp_table))
ret = -ENOMEM;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
- /* We have opp-list node now, iterate over it and add OPPs */
+ /* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
count++;
@@ -1213,19 +1944,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (WARN_ON(!count))
return -ENOENT;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (WARN_ON(IS_ERR(dev_opp))) {
- ret = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
goto free_table;
}
- dev_opp->np = opp_np;
- dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ opp_table->np = opp_np;
+ opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
@@ -1254,7 +1985,7 @@ static int _of_add_opp_table_v1(struct device *dev)
*/
nr = prop->length / sizeof(u32);
if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
return -EINVAL;
}
@@ -1274,11 +2005,11 @@ static int _of_add_opp_table_v1(struct device *dev)
/**
* dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Register the initial OPP table with the OPP library for given device.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 7b445e88a0d5..ba2bdbd932ef 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -31,7 +31,7 @@
* @table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
*
* This function allocates required memory for the cpufreq table. It is
* expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Since we just use the regular accessor functions to access the internal data
* structures, we use RCU read lock inside this function. As a result, users of
* this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
/* Required only for V1 bindings, as v2 can manage it from DT itself */
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
struct device *dev;
int cpu, ret = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
ret = -EINVAL;
goto unlock;
}
@@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
continue;
}
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
__func__, cpu);
continue;
}
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
/*
* Works only for OPP v2 bindings.
*
- * cpumask should be already set to mask of cpu_dev->id.
* Returns -ENOENT if operating-points-v2 bindings aren't supported.
*/
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
@@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
return -ENOENT;
}
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+
/* OPPs are shared ? */
if (!of_property_read_bool(np, "opp-shared"))
goto put_cpu_node;
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
new file mode 100644
index 000000000000..ef1ae6b52042
--- /dev/null
+++ b/drivers/base/power/opp/debugfs.c
@@ -0,0 +1,218 @@
+/*
+ * Generic OPP debugfs interface
+ *
+ * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+
+#include "opp.h"
+
+static struct dentry *rootdir;
+
+static void opp_set_dev_name(const struct device *dev, char *name)
+{
+ if (dev->parent)
+ snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
+ dev_name(dev));
+ else
+ snprintf(name, NAME_MAX, "%s", dev_name(dev));
+}
+
+void opp_debug_remove_one(struct dev_pm_opp *opp)
+{
+ debugfs_remove_recursive(opp->dentry);
+}
+
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+{
+ struct dentry *pdentry = opp_table->dentry;
+ struct dentry *d;
+ char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
+
+ /* Rate is unique to each OPP, use it to give opp-name */
+ snprintf(name, sizeof(name), "opp:%lu", opp->rate);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+ if (!d)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns))
+ return -ENOMEM;
+
+ opp->dentry = d;
+ return 0;
+}
+
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ const struct device *dev = opp_dev->dev;
+ struct dentry *d;
+
+ opp_set_dev_name(dev, opp_table->dentry_name);
+
+ /* Create device specific directory */
+ d = debugfs_create_dir(opp_table->dentry_name, rootdir);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
+ return -ENOMEM;
+ }
+
+ opp_dev->dentry = d;
+ opp_table->dentry = d;
+
+ return 0;
+}
+
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ const struct device *dev = opp_dev->dev;
+ char name[NAME_MAX];
+ struct dentry *d;
+
+ opp_set_dev_name(opp_dev->dev, name);
+
+ /* Create device specific directory link */
+ d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create link\n", __func__);
+ return -ENOMEM;
+ }
+
+ opp_dev->dentry = d;
+
+ return 0;
+}
+
+/**
+ * opp_debug_register - add a device opp node to the debugfs 'opp' directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
+ *
+ * Dynamically adds device specific directory in debugfs 'opp' directory. If the
+ * device-opp is shared with other devices, then links will be created for all
+ * devices except the first.
+ *
+ * Return: 0 on success, otherwise negative error.
+ */
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+{
+ if (!rootdir) {
+ pr_debug("%s: Uninitialized rootdir\n", __func__);
+ return -EINVAL;
+ }
+
+ if (opp_table->dentry)
+ return opp_list_debug_create_link(opp_dev, opp_table);
+
+ return opp_list_debug_create_dir(opp_dev, opp_table);
+}
+
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ struct opp_device *new_dev;
+ const struct device *dev;
+ struct dentry *dentry;
+
+ /* Look for next opp-dev */
+ list_for_each_entry(new_dev, &opp_table->dev_list, node)
+ if (new_dev != opp_dev)
+ break;
+
+ /* new_dev is guaranteed to be valid here */
+ dev = new_dev->dev;
+ debugfs_remove_recursive(new_dev->dentry);
+
+ opp_set_dev_name(dev, opp_table->dentry_name);
+
+ dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ opp_table->dentry_name);
+ if (!dentry) {
+ dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+ __func__, dev_name(opp_dev->dev), dev_name(dev));
+ return;
+ }
+
+ new_dev->dentry = dentry;
+ opp_table->dentry = dentry;
+}
+
+/**
+ * opp_debug_unregister - remove a device opp node from debugfs opp directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
+ *
+ * Dynamically removes device specific directory from debugfs 'opp' directory.
+ */
+void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ if (opp_dev->dentry == opp_table->dentry) {
+ /* Move the real dentry object under another device */
+ if (!list_is_singular(&opp_table->dev_list)) {
+ opp_migrate_dentry(opp_dev, opp_table);
+ goto out;
+ }
+ opp_table->dentry = NULL;
+ }
+
+ debugfs_remove_recursive(opp_dev->dentry);
+
+out:
+ opp_dev->dentry = NULL;
+}
+
+static int __init opp_debug_init(void)
+{
+ /* Create /sys/kernel/debug/opp directory */
+ rootdir = debugfs_create_dir("opp", NULL);
+ if (!rootdir) {
+ pr_err("%s: Failed to create root directory\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(opp_debug_init);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 7366b2aa8997..f67f806fcf3a 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -17,17 +17,21 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/limits.h>
#include <linux/pm_opp.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+struct clk;
+struct regulator;
+
/* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
/*
* Internal data structure organization with the OPP layer library is as
* follows:
- * dev_opp_list (root)
+ * opp_tables (root)
* |- device 1 (represents voltage domain 1)
* | |- opp 1 (availability, freq, voltage)
* | |- opp 2 ..
@@ -36,23 +40,24 @@ extern struct mutex dev_opp_list_lock;
* |- device 2 (represents the next voltage domain)
* ...
* `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
* is represented by the opp structure.
*/
/**
* struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
+ * @node: opp table node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
+ * RCU usage: opp table is traversed with RCU locks. node
* modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
+ * are protected by the opp_table_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
- * @dynamic: not-created from static DT entries.
* @available: true/false - marks if this OPP as available or not
+ * @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
+ * @suspend: true if suspend OPP
* @rate: Frequency in hertz
* @u_volt: Target voltage in microvolts corresponding to this OPP
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
@@ -60,9 +65,10 @@ extern struct mutex dev_opp_list_lock;
* @u_amp: Maximum current drawn by the device in microamperes
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
+ * @dentry: debugfs dentry pointer (per opp)
*
* This structure stores the OPP information for a given device.
*/
@@ -72,6 +78,7 @@ struct dev_pm_opp {
bool available;
bool dynamic;
bool turbo;
+ bool suspend;
unsigned long rate;
unsigned long u_volt;
@@ -80,40 +87,60 @@ struct dev_pm_opp {
unsigned long u_amp;
unsigned long clock_latency_ns;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct rcu_head rcu_head;
struct device_node *np;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
* @node: list node
* @dev: device to which the struct object belongs
* @rcu_head: RCU callback head used for deferred freeing
+ * @dentry: debugfs dentry pointer (per device)
*
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
*/
-struct device_list_opp {
+struct opp_device {
struct list_head node;
const struct device *dev;
struct rcu_head rcu_head;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node: table node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
+ * table.
+ * RCU usage: nodes are not modified in the table of opp_table,
+ * however addition is possible and is secured by opp_table_lock
* @srcu_head: notifier head to notify the OPP availability changes.
* @rcu_head: RCU callback head used for deferred freeing
* @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
+ * @opp_list: table of opps
* @np: struct device_node pointer for opp's DT node.
+ * @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
+ * @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @supported_hw: Array of version number to support.
+ * @supported_hw_count: Number of elements in supported_hw array.
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
+ * @dentry: debugfs dentry pointer of the real device directory (not links).
+ * @dentry_name: Name of the real dentry.
+ *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
@@ -123,7 +150,7 @@ struct device_list_opp {
* need to wait for the grace period of both of them before freeing any
* resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
-struct device_opp {
+struct opp_table {
struct list_head node;
struct srcu_notifier_head srcu_head;
@@ -133,14 +160,48 @@ struct device_opp {
struct device_node *np;
unsigned long clock_latency_ns_max;
+
+ /* For backward compatibility with v1 bindings */
+ unsigned int voltage_tolerance_v1;
+
bool shared_opp;
struct dev_pm_opp *suspend_opp;
+
+ unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char *prop_name;
+ struct clk *clk;
+ struct regulator *regulator;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+ char dentry_name[NAME_MAX];
+#endif
};
/* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
+#ifdef CONFIG_DEBUG_FS
+void opp_debug_remove_one(struct dev_pm_opp *opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
+#else
+static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
+
+static inline int opp_debug_create_one(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{ return 0; }
+static inline int opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{ return 0; }
+
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{ }
+#endif /* DEBUG_FS */
+
#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e459cb..b84b68e258c1 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -147,7 +147,7 @@ static int apply_constraint(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = pm_qos_update_target(&qos->resume_latency,
- &req->data.pnode, action, value);
+ &req->data.lat, action, value);
if (ret) {
value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
@@ -157,7 +157,7 @@ static int apply_constraint(struct dev_pm_qos_request *req,
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance,
- &req->data.pnode, action, value);
+ &req->data.lat, action, value);
if (ret) {
value = pm_qos_read_value(&qos->latency_tolerance);
req->dev->power.set_latency_tolerance(req->dev, value);
@@ -258,7 +258,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
/* Flush the constraints lists for the device. */
c = &qos->resume_latency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
/*
* Update constraints list and call the notification
* callbacks if needed
@@ -266,8 +266,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
+ kfree(c->notifiers);
+
c = &qos->latency_tolerance;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
@@ -382,7 +385,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
- curr_value = req->data.pnode.prio;
+ curr_value = req->data.lat.node.prio;
break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
@@ -835,7 +838,7 @@ s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
ret = IS_ERR_OR_NULL(dev->power.qos)
|| !dev->power.qos->latency_tolerance_req ?
PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
- dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ dev->power.qos->latency_tolerance_req->data.lat.node.prio;
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index db9d00c36a3e..45fc564bf949 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SWR)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
@@ -29,3 +29,15 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+
+config REGMAP_SWR
+ tristate
+
+config REGMAP_ALLOW_WRITE_DEBUGFS
+ depends on REGMAP && DEBUG_FS
+ bool "Allow REGMAP debugfs write"
+ default n
+ help
+ Say 'y' here to allow the regmap debugfs write. Regmap debugfs write
+ could be risky when accessing some essential hardwares, so it is not
+ recommended to enable this option on any production device.
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 609e4c84f485..2822b2974814 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+obj-$(CONFIG_REGMAP_SWR) += regmap-swr.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 3df977054781..61cdbaa3c09a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -85,6 +85,9 @@ struct regmap {
struct list_head debugfs_off_cache;
struct mutex cache_lock;
+
+ unsigned int dump_address;
+ unsigned int dump_count;
#endif
unsigned int max_register;
@@ -245,6 +248,10 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
+int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_sequence *regs,
+ size_t num_regs);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret);
enum regmap_endian regmap_get_val_endian(struct device *dev,
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4c07802986b2..673938f972a8 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -682,6 +682,53 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
return ret;
}
+static int regcache_sync_block_raw_multi_reg(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base,
+ unsigned int start,
+ unsigned int end)
+{
+ unsigned int i, val;
+ unsigned int regtmp = 0;
+ int ret = 0;
+ struct reg_sequence *regs;
+ size_t num_regs = ((end - start) + 1);
+
+ regs = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ num_regs = 0;
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ /* skip registers that are not defined/available */
+ if (!regcache_reg_present(cache_present, i))
+ continue;
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def) {
+ continue;
+ } else {
+ regs[num_regs].reg = regtmp;
+ regs[num_regs].def = val;
+ regs[num_regs].delay_us = 0;
+ num_regs += 1;
+ }
+ }
+ ret = 0;
+ if (num_regs) {
+ dev_dbg(map->dev, "%s: start: 0x%x - end: 0x%x\n",
+ __func__, regs[0].reg, regs[num_regs-1].reg);
+ ret = _regmap_raw_multi_reg_write(map, regs, num_regs);
+ }
+ kfree(regs);
+ return ret;
+}
+
static int regcache_sync_block_raw(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base, unsigned int start,
@@ -729,7 +776,12 @@ int regcache_sync_block(struct regmap *map, void *block,
unsigned int block_base, unsigned int start,
unsigned int end)
{
- if (regmap_can_raw_write(map) && !map->use_single_write)
+ if (regmap_can_raw_write(map) && map->can_multi_write)
+ return regcache_sync_block_raw_multi_reg(map, block,
+ cache_present,
+ block_base, start,
+ end);
+ else if (regmap_can_raw_write(map) && !map->use_single_write)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 3f0a7e262d69..3120bdc84fce 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -259,8 +259,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
count, ppos);
}
-#undef REGMAP_ALLOW_WRITE_DEBUGFS
-#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous especially when we have clients such as
* PMICs, therefore don't provide any real compile time configuration option
@@ -310,6 +309,67 @@ static const struct file_operations regmap_map_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_data_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ int new_count;
+
+ regmap_calc_tot_len(map, NULL, 0);
+ new_count = map->dump_count * map->debugfs_tot_len;
+ if (new_count > count)
+ new_count = count;
+
+ if (*ppos == 0)
+ *ppos = map->dump_address * map->debugfs_tot_len;
+ else if (*ppos >= map->dump_address * map->debugfs_tot_len
+ + map->dump_count * map->debugfs_tot_len)
+ return 0;
+ return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+ new_count, ppos);
+}
+
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
+static ssize_t regmap_data_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ size_t buf_size;
+ char *start = buf;
+ unsigned long value;
+ struct regmap *map = file->private_data;
+ int ret;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+ if (kstrtoul(start, 16, &value))
+ return -EINVAL;
+
+ /* Userspace has been fiddling around behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = regmap_write(map, map->dump_address, value);
+ if (ret < 0)
+ return ret;
+ return buf_size;
+}
+#else
+#define regmap_data_write_file NULL
+#endif
+
+static const struct file_operations regmap_data_fops = {
+ .open = simple_open,
+ .read = regmap_data_read_file,
+ .write = regmap_data_write_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -595,7 +655,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
if (map->max_register || regmap_readable(map, 0)) {
umode_t registers_mode;
-#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
registers_mode = 0600;
#else
registers_mode = 0400;
@@ -603,6 +663,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("registers", registers_mode, map->debugfs,
map, &regmap_map_fops);
+
+ debugfs_create_x32("address", 0600, map->debugfs,
+ &map->dump_address);
+ map->dump_count = 1;
+ debugfs_create_u32("count", 0600, map->debugfs,
+ &map->dump_count);
+ debugfs_create_file("data", registers_mode, map->debugfs,
+ map, &regmap_data_fops);
+
debugfs_create_file("access", 0400, map->debugfs,
map, &regmap_access_fops);
}
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 4a36e415e938..156751544c02 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -1,7 +1,7 @@
/*
* Register map access API - SPMI support
*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013,2016 The Linux Foundation. All rights reserved.
*
* Based on regmap-i2c.c:
* Copyright 2011 Wolfson Microelectronics plc
diff --git a/drivers/base/regmap/regmap-swr.c b/drivers/base/regmap/regmap-swr.c
new file mode 100644
index 000000000000..1641c374b189
--- /dev/null
+++ b/drivers/base/regmap/regmap-swr.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/soundwire/soundwire.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "internal.h"
+
+static int regmap_swr_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_len)
+{
+ struct device *dev = context;
+ struct swr_device *swr = to_swr_device(dev);
+ struct regmap *map = dev_get_regmap(dev, NULL);
+ size_t addr_bytes;
+ size_t val_bytes;
+ int i, ret = 0;
+ u16 reg_addr = 0;
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
+ if (swr == NULL) {
+ dev_err(dev, "%s: swr device is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (reg_size != addr_bytes) {
+ dev_err(dev, "%s: reg size %zd bytes not supported\n",
+ __func__, reg_size);
+ return -EINVAL;
+ }
+ reg_addr = *(u16 *)reg;
+ val_bytes = map->format.val_bytes;
+ /* val_len = val_bytes * val_count */
+ for (i = 0; i < (val_len / val_bytes); i++) {
+ reg_addr = reg_addr + i;
+ val = (u8 *)val + (val_bytes * i);
+ ret = swr_write(swr, swr->dev_num, reg_addr, val);
+ if (ret < 0) {
+ dev_err(dev, "%s: write reg 0x%x failed, err %d\n",
+ __func__, reg_addr, ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int regmap_swr_raw_multi_reg_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct swr_device *swr = to_swr_device(dev);
+ struct regmap *map = dev_get_regmap(dev, NULL);
+ size_t addr_bytes;
+ size_t val_bytes;
+ size_t pad_bytes;
+ size_t num_regs;
+ int i = 0;
+ int ret = 0;
+ u16 *reg;
+ u8 *val;
+ u8 *buf;
+
+ if (swr == NULL) {
+ dev_err(dev, "%s: swr device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ addr_bytes = map->format.reg_bytes;
+ val_bytes = map->format.val_bytes;
+ pad_bytes = map->format.pad_bytes;
+
+ if (addr_bytes + val_bytes + pad_bytes == 0) {
+ dev_err(dev, "%s: sum of addr, value and pad is 0\n", __func__);
+ return -EINVAL;
+ }
+ num_regs = count / (addr_bytes + val_bytes + pad_bytes);
+
+ reg = kcalloc(num_regs, sizeof(u16), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ val = kcalloc(num_regs, sizeof(u8), GFP_KERNEL);
+ if (!val) {
+ ret = -ENOMEM;
+ goto mem_fail;
+ }
+
+ buf = (u8 *)data;
+ for (i = 0; i < num_regs; i++) {
+ reg[i] = *(u16 *)buf;
+ buf += (map->format.reg_bytes + map->format.pad_bytes);
+ val[i] = *buf;
+ buf += map->format.val_bytes;
+ }
+ ret = swr_bulk_write(swr, swr->dev_num, reg, val, num_regs);
+ if (ret)
+ dev_err(dev, "%s: multi reg write failed\n", __func__);
+
+ kfree(val);
+mem_fail:
+ kfree(reg);
+ return ret;
+}
+
+static int regmap_swr_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct regmap *map = dev_get_regmap(dev, NULL);
+ size_t addr_bytes;
+ size_t val_bytes;
+ size_t pad_bytes;
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
+ val_bytes = map->format.val_bytes;
+ pad_bytes = map->format.pad_bytes;
+
+ WARN_ON(count < addr_bytes);
+
+ if (count > (addr_bytes + val_bytes + pad_bytes))
+ return regmap_swr_raw_multi_reg_write(context, data, count);
+ else
+ return regmap_swr_gather_write(context, data, addr_bytes,
+ (data + addr_bytes),
+ (count - addr_bytes));
+}
+
+static int regmap_swr_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct swr_device *swr = to_swr_device(dev);
+ struct regmap *map = dev_get_regmap(dev, NULL);
+ size_t addr_bytes;
+ int ret = 0;
+ u16 reg_addr = 0;
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
+ if (swr == NULL) {
+ dev_err(dev, "%s: swr is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (reg_size != addr_bytes) {
+ dev_err(dev, "%s: register size %zd bytes not supported\n",
+ __func__, reg_size);
+ return -EINVAL;
+ }
+ reg_addr = *(u16 *)reg;
+ ret = swr_read(swr, swr->dev_num, reg_addr, val, val_size);
+ if (ret < 0)
+ dev_err(dev, "%s: codec reg 0x%x read failed %d\n",
+ __func__, reg_addr, ret);
+ return ret;
+}
+
+static struct regmap_bus regmap_swr = {
+ .write = regmap_swr_write,
+ .gather_write = regmap_swr_gather_write,
+ .read = regmap_swr_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_swr(struct swr_device *swr,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __regmap_init(&swr->dev, &regmap_swr, &swr->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL(__regmap_init_swr);
+
+struct regmap *__devm_regmap_init_swr(struct swr_device *swr,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&swr->dev, &regmap_swr, &swr->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL(__devm_regmap_init_swr);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ac63c0e50c7..576b5facdf43 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1822,7 +1822,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
* they are all in the same page and have been changed to being page
* relative. The page register has been written if that was necessary.
*/
-static int _regmap_raw_multi_reg_write(struct regmap *map,
+int _regmap_raw_multi_reg_write(struct regmap *map,
const struct reg_sequence *regs,
size_t num_regs)
{