diff options
Diffstat (limited to 'drivers/soc/qcom')
| -rw-r--r-- | drivers/soc/qcom/Kconfig | 42 | ||||
| -rw-r--r-- | drivers/soc/qcom/Makefile | 2 | ||||
| -rw-r--r-- | drivers/soc/qcom/boot_marker.c | 183 | ||||
| -rw-r--r-- | drivers/soc/qcom/boot_stats.c | 60 | ||||
| -rw-r--r-- | drivers/soc/qcom/cache_m4m_erp64.c | 635 | ||||
| -rw-r--r-- | drivers/soc/qcom/icnss.c | 24 | ||||
| -rw-r--r-- | drivers/soc/qcom/ipc_router_mhi_xprt.c | 34 | ||||
| -rw-r--r-- | drivers/soc/qcom/memory_dump_v2.c | 4 | ||||
| -rw-r--r-- | drivers/soc/qcom/memshare/msm_memshare.c | 11 | ||||
| -rw-r--r-- | drivers/soc/qcom/memshare/msm_memshare.h | 3 | ||||
| -rw-r--r-- | drivers/soc/qcom/msm_minidump.c | 153 | ||||
| -rw-r--r-- | drivers/soc/qcom/perf_event_kryo.c | 13 | ||||
| -rw-r--r-- | drivers/soc/qcom/peripheral-loader.c | 4 | ||||
| -rw-r--r-- | drivers/soc/qcom/qbt1000.c | 21 | ||||
| -rw-r--r-- | drivers/soc/qcom/qdsp6v2/apr.c | 29 | ||||
| -rw-r--r-- | drivers/soc/qcom/scm-boot.c | 19 | ||||
| -rw-r--r-- | drivers/soc/qcom/service-notifier.c | 13 | ||||
| -rw-r--r-- | drivers/soc/qcom/socinfo.c | 36 | ||||
| -rw-r--r-- | drivers/soc/qcom/subsystem_restart.c | 74 |
19 files changed, 1185 insertions, 175 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 34b0adb108eb..ea008ffbc856 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -367,6 +367,14 @@ config MSM_SPM driver allows configuring SPM to allow different low power modes for both core and L2. +config MSM_L2_SPM + bool "SPM support for L2 cache" + help + Enable SPM driver support for L2 cache. Some MSM chipsets allow + control of L2 cache low power mode with a Subsystem Power manager. + Enabling this driver allows configuring L2 SPM for low power modes + on supported chipsets + config QCOM_SCM bool "Secure Channel Manager (SCM) support" default n @@ -573,6 +581,16 @@ config MSM_BOOT_STATS This figures are reported in mpm sleep clock cycles and have a resolution of 31 bits as 1 bit is used as an overflow check. +config MSM_BOOT_TIME_MARKER + bool "Use MSM boot time marker reporting" + depends on MSM_BOOT_STATS + help + Use this to mark msm boot kpi for measurement. + An instrumentation for boot time measurement. + To create an entry, call "place_marker" function. + At userspace, write marker name to "/sys/kernel/debug/bootkpi/kpi_values" + If unsure, say N + config QCOM_CPUSS_DUMP bool "CPU Subsystem Dumping support" help @@ -900,4 +918,28 @@ config QCOM_CX_IPEAK clients are going to cross their thresholds then Cx ipeak hw module will raise an interrupt to cDSP block to throttle cDSP fmax. +config MSM_CACHE_M4M_ERP64 + bool "Cache and M4M error report" + depends on ARCH_MSM8996 + help + Say 'Y' here to enable reporting of cache and M4M errors to the kernel + log. The kernel log contains collected error syndrome and address + registers. These register dumps can be used as useful information + to find out possible hardware problems. + +config MSM_CACHE_M4M_ERP64_PANIC_ON_CE + bool "Panic on correctable cache/M4M errors" + help + Say 'Y' here to cause kernel panic when correctable cache/M4M errors + are detected. Enabling this is useful when you want to dump memory + and system state close to the time when the error occured. + + If unsure, say N. + +config MSM_CACHE_M4M_ERP64_PANIC_ON_UE + bool "Panic on uncorrectable cache/M4M errors" + help + Say 'Y' here to cause kernel panic when uncorrectable cache/M4M errors + are detected. + source "drivers/soc/qcom/memshare/Kconfig" diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 87698b75d3b8..5eeede23333d 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -86,6 +86,7 @@ obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o +obj-$(CONFIG_MSM_BOOT_TIME_MARKER) += boot_marker.o obj-$(CONFIG_MSM_AVTIMER) += avtimer.o ifdef CONFIG_ARCH_MSM8996 obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_kryo.o @@ -104,3 +105,4 @@ obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o +obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c new file mode 100644 index 000000000000..b3a6c9f8d054 --- /dev/null +++ b/drivers/soc/qcom/boot_marker.c @@ -0,0 +1,183 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <soc/qcom/boot_stats.h> + +#define MAX_STRING_LEN 256 +#define BOOT_MARKER_MAX_LEN 40 +static struct dentry *dent_bkpi, *dent_bkpi_status; +static struct boot_marker boot_marker_list; + +struct boot_marker { + char marker_name[BOOT_MARKER_MAX_LEN]; + unsigned long long int timer_value; + struct list_head list; + struct mutex lock; +}; + +static void _create_boot_marker(const char *name, + unsigned long long int timer_value) +{ + struct boot_marker *new_boot_marker; + + pr_debug("%-41s:%llu.%03llu seconds\n", name, + timer_value/TIMER_KHZ, + ((timer_value % TIMER_KHZ) + * 1000) / TIMER_KHZ); + + new_boot_marker = kmalloc(sizeof(*new_boot_marker), GFP_KERNEL); + if (!new_boot_marker) + return; + + strlcpy(new_boot_marker->marker_name, name, + sizeof(new_boot_marker->marker_name)); + new_boot_marker->timer_value = timer_value; + + mutex_lock(&boot_marker_list.lock); + list_add_tail(&(new_boot_marker->list), &(boot_marker_list.list)); + mutex_unlock(&boot_marker_list.lock); +} + +static void set_bootloader_stats(void) +{ + _create_boot_marker("M - APPSBL Start - ", + readl_relaxed(&boot_stats->bootloader_start)); + _create_boot_marker("M - APPSBL Display Init - ", + readl_relaxed(&boot_stats->bootloader_display)); + _create_boot_marker("M - APPSBL Early-Domain Start - ", + readl_relaxed(&boot_stats->bootloader_early_domain_start)); + _create_boot_marker("D - APPSBL Kernel Load Time - ", + readl_relaxed(&boot_stats->bootloader_load_kernel)); + _create_boot_marker("D - APPSBL Kernel Auth Time - ", + readl_relaxed(&boot_stats->bootloader_checksum)); + _create_boot_marker("M - APPSBL End - ", + readl_relaxed(&boot_stats->bootloader_end)); +} + +void place_marker(const char *name) +{ + _create_boot_marker((char *) name, msm_timer_get_sclk_ticks()); +} +EXPORT_SYMBOL(place_marker); + +static ssize_t bootkpi_reader(struct file *fp, char __user *user_buffer, + size_t count, loff_t *position) +{ + int rc = 0; + char *buf; + int temp = 0; + struct boot_marker *marker; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&boot_marker_list.lock); + list_for_each_entry(marker, &boot_marker_list.list, list) { + temp += scnprintf(buf + temp, PAGE_SIZE - temp, + "%-41s:%llu.%03llu seconds\n", + marker->marker_name, + marker->timer_value/TIMER_KHZ, + (((marker->timer_value % TIMER_KHZ) + * 1000) / TIMER_KHZ)); + } + mutex_unlock(&boot_marker_list.lock); + rc = simple_read_from_buffer(user_buffer, count, position, buf, temp); + kfree(buf); + return rc; +} + +static ssize_t bootkpi_writer(struct file *fp, const char __user *user_buffer, + size_t count, loff_t *position) +{ + int rc = 0; + char buf[MAX_STRING_LEN]; + + if (count > MAX_STRING_LEN) + return -EINVAL; + rc = simple_write_to_buffer(buf, + sizeof(buf) - 1, position, user_buffer, count); + if (rc < 0) + return rc; + buf[rc] = '\0'; + place_marker(buf); + return rc; +} + +static int bootkpi_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations fops_bkpi = { + .owner = THIS_MODULE, + .open = bootkpi_open, + .read = bootkpi_reader, + .write = bootkpi_writer, +}; + +static int __init init_bootkpi(void) +{ + dent_bkpi = debugfs_create_dir("bootkpi", NULL); + if (IS_ERR_OR_NULL(dent_bkpi)) + return -ENODEV; + + dent_bkpi_status = debugfs_create_file("kpi_values", + (S_IRUGO|S_IWUGO), dent_bkpi, 0, &fops_bkpi); + if (IS_ERR_OR_NULL(dent_bkpi_status)) { + debugfs_remove(dent_bkpi); + dent_bkpi = NULL; + pr_err("boot_marker: Could not create 'kpi_values' debugfs file\n"); + return -ENODEV; + } + + INIT_LIST_HEAD(&boot_marker_list.list); + mutex_init(&boot_marker_list.lock); + set_bootloader_stats(); + return 0; +} +subsys_initcall(init_bootkpi); + +static void __exit exit_bootkpi(void) +{ + struct boot_marker *marker; + struct boot_marker *temp_addr; + + debugfs_remove_recursive(dent_bkpi); + mutex_lock(&boot_marker_list.lock); + list_for_each_entry_safe(marker, temp_addr, &boot_marker_list.list, + list) { + list_del(&marker->list); + kfree(marker); + } + mutex_unlock(&boot_marker_list.lock); + boot_stats_exit(); +} +module_exit(exit_bootkpi); + +MODULE_DESCRIPTION("MSM boot key performance indicators"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c index 2fc9cbf55d4b..eb5357e892eb 100644 --- a/drivers/soc/qcom/boot_stats.c +++ b/drivers/soc/qcom/boot_stats.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014,2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/init.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> @@ -22,17 +23,13 @@ #include <linux/sched.h> #include <linux/of.h> #include <linux/of_address.h> - -struct boot_stats { - uint32_t bootloader_start; - uint32_t bootloader_end; - uint32_t bootloader_display; - uint32_t bootloader_load_kernel; -}; +#include <linux/export.h> +#include <linux/types.h> +#include <soc/qcom/boot_stats.h> static void __iomem *mpm_counter_base; static uint32_t mpm_counter_freq; -static struct boot_stats __iomem *boot_stats; +struct boot_stats __iomem *boot_stats; static int mpm_parse_dt(void) { @@ -88,6 +85,42 @@ static void print_boot_stats(void) mpm_counter_freq); } +unsigned long long int msm_timer_get_sclk_ticks(void) +{ + unsigned long long int t1, t2; + int loop_count = 10; + int loop_zero_count = 3; + int tmp = USEC_PER_SEC; + void __iomem *sclk_tick; + + do_div(tmp, TIMER_KHZ); + tmp /= (loop_zero_count-1); + sclk_tick = mpm_counter_base; + if (!sclk_tick) + return -EINVAL; + while (loop_zero_count--) { + t1 = __raw_readl_no_log(sclk_tick); + do { + udelay(1); + t2 = t1; + t1 = __raw_readl_no_log(sclk_tick); + } while ((t2 != t1) && --loop_count); + if (!loop_count) { + pr_err("boot_stats: SCLK did not stabilize\n"); + return 0; + } + if (t1) + break; + + udelay(tmp); + } + if (!loop_zero_count) { + pr_err("boot_stats: SCLK reads zero\n"); + return 0; + } + return t1; +} + int boot_stats_init(void) { int ret; @@ -98,9 +131,14 @@ int boot_stats_init(void) print_boot_stats(); + if (!(boot_marker_enabled())) + boot_stats_exit(); + return 0; +} + +int boot_stats_exit(void) +{ iounmap(boot_stats); iounmap(mpm_counter_base); - return 0; } - diff --git a/drivers/soc/qcom/cache_m4m_erp64.c b/drivers/soc/qcom/cache_m4m_erp64.c new file mode 100644 index 000000000000..758e9d03e07b --- /dev/null +++ b/drivers/soc/qcom/cache_m4m_erp64.c @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "msm_cache_erp64: " fmt + +#include <linux/printk.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/cpu.h> +#include <linux/workqueue.h> +#include <linux/of.h> +#include <linux/cpu_pm.h> +#include <linux/smp.h> + +#include <soc/qcom/kryo-l2-accessors.h> + +/* Instruction cache */ +#define ICECR_EL1 S3_1_c11_c1_0 +#define ICECR_IRQ_EN (BIT(1) | BIT(3) | BIT(5) | BIT(7)) +#define ICESR_EL1 S3_1_c11_c1_1 +#define ICESR_BIT_L1DPE BIT(3) +#define ICESR_BIT_L1TPE BIT(2) +#define ICESR_BIT_L0DPE BIT(1) +#define ICESR_BIT_L0TPE BIT(0) +#define ICESYNR0_EL1 S3_1_c11_c1_3 +#define ICESYNR1_EL1 S3_1_c11_c1_4 +#define ICEAR0_EL1 S3_1_c11_c1_5 +#define ICEAR1_EL1 S3_1_c11_c1_6 +#define ICESRS_EL1 S3_1_c11_c1_2 + +/* Data cache */ +#define DCECR_EL1 S3_1_c11_c5_0 +#define DCECR_IRQ_EN (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \ + BIT(9)) +#define DCESR_EL1 S3_1_c11_c5_1 +#define DCESR_BIT_S1FTLBDPE BIT(4) +#define DCESR_BIT_S1FTLBTPE BIT(3) +#define DCESR_BIT_L1DPE BIT(2) +#define DCESR_BIT_L1PTPE BIT(1) +#define DCESR_BIT_L1VTPE BIT(0) +#define DCESYNR0_EL1 S3_1_c11_c5_3 +#define DCESYNR1_EL1 S3_1_c11_c5_4 +#define DCESRS_EL1 S3_1_c11_c5_2 +#define DCEAR0_EL1 S3_1_c11_c5_5 +#define DCEAR1_EL1 S3_1_c11_c5_6 + +/* L2 cache */ +#define L2CPUSRSELR_EL1I S3_3_c15_c0_6 +#define L2CPUSRDR_EL1 S3_3_c15_c0_7 +#define L2ECR0_IA 0x200 +#define L2ECR0_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \ + BIT(11) | BIT(13) | BIT(16) | \ + BIT(19) | BIT(21) | BIT(23) | \ + BIT(26) | BIT(29)) + +#define L2ECR1_IA 0x201 +#define L2ECR1_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \ + BIT(11) | BIT(13) | BIT(16) | \ + BIT(19) | BIT(21) | BIT(23) | BIT(29)) +#define L2ECR2_IA 0x202 +#define L2ECR2_IRQ_EN_MASK 0x3FFFFFF +#define L2ECR2_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \ + BIT(12) | BIT(15) | BIT(17) | \ + BIT(19) | BIT(22) | BIT(25)) +#define L2ESR0_IA 0x204 +#define L2ESR0_MASK 0x00FFFFFF +#define L2ESR0_CE ((BIT(0) | BIT(1) | BIT(2) | BIT(3) | \ + BIT(4) | BIT(5) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17)) \ + & L2ESR0_MASK) +#define L2ESR0_UE (~L2ESR0_CE & L2ESR0_MASK) +#define L2ESRS0_IA 0x205 +#define L2ESR1_IA 0x206 +#define L2ESR1_MASK 0x80FFFBFF +#define L2ESRS1_IA 0x207 +#define L2ESYNR0_IA 0x208 +#define L2ESYNR1_IA 0x209 +#define L2ESYNR2_IA 0x20A +#define L2ESYNR3_IA 0x20B +#define L2ESYNR4_IA 0x20C +#define L2EAR0_IA 0x20E +#define L2EAR1_IA 0x20F + +#define L3_QLL_HML3_FIRA 0x3000 +#define L3_QLL_HML3_FIRA_CE (BIT(1) | BIT(3) | BIT(5)) +#define L3_QLL_HML3_FIRA_UE (BIT(2) | BIT(4) | BIT(6)) +#define L3_QLL_HML3_FIRAC 0x3008 +#define L3_QLL_HML3_FIRAS 0x3010 +#define L3_QLL_HML3_FIRAT0C 0x3020 +#define L3_QLL_HML3_FIRAT0C_IRQ_EN 0xFFFFFFFF +#define L3_QLL_HML3_FIRAT1C 0x3024 +#define L3_QLL_HML3_FIRAT1S 0x302C +#define L3_QLL_HML3_FIRAT1S_IRQ_EN 0x01EFC8FE +#define L3_QLL_HML3_FIRSYNA 0x3100 +#define L3_QLL_HML3_FIRSYNB 0x3104 +#define L3_QLL_HML3_FIRSYNC 0x3108 +#define L3_QLL_HML3_FIRSYND 0x310C + +#define M4M_ERR_STATUS 0x10000 +#define M4M_ERR_STATUS_MASK 0x1FF +#define M4M_ERR_Q22SIB_RET_DEC_ERR (BIT(7)) +#define M4M_ERR_Q22SIB_RET_SLV_ERR (BIT(6)) +#define M4M_ERR_CLR 0x10008 +#define M4M_INT_CTRL 0x10010 +#define M4M_INT_CTRL_IRQ_EN 0x1FF +#define M4M_ERR_CTRL 0x10018 +#define M4M_ERR_INJ 0x10020 +#define M4M_ERR_CAP_0 0x10030 +#define M4M_ERR_CAP_1 0x10038 +#define M4M_ERR_CAP_2 0x10040 +#define M4M_ERR_CAP_3 0x10048 + +#define AFFINITY_LEVEL_L3 3 + +#ifdef CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_CE +static bool __read_mostly panic_on_ce = true; +#else +static bool __read_mostly panic_on_ce; +#endif + +#ifdef CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_UE +static bool __read_mostly panic_on_ue = true; +#else +static bool __read_mostly panic_on_ue; +#endif + +module_param(panic_on_ce, bool, false); +module_param(panic_on_ue, bool, false); + +static void __iomem *hml3_base; +static void __iomem *m4m_base; + +enum erp_irq_index { IRQ_L1, IRQ_L2_INFO0, IRQ_L2_INFO1, IRQ_L2_ERR0, + IRQ_L2_ERR1, IRQ_L3, IRQ_M4M, IRQ_MAX }; +static const char * const erp_irq_names[] = { + "l1_irq", "l2_irq_info_0", "l2_irq_info_1", "l2_irq_err_0", + "l2_irq_err_1", "l3_irq", "m4m_irq" +}; +static int erp_irqs[IRQ_MAX]; + +struct msm_l1_err_stats { + /* nothing */ +}; + +static DEFINE_PER_CPU(struct msm_l1_err_stats, msm_l1_erp_stats); +static DEFINE_PER_CPU(struct call_single_data, handler_csd); + +#define erp_mrs(reg) ({ \ + u64 __val; \ + asm volatile("mrs %0, " __stringify(reg) : "=r" (__val)); \ + __val; \ +}) + +#define erp_msr(reg, val) { \ + asm volatile("msr " __stringify(reg) ", %0" : : "r" (val)); \ +} + +static void msm_erp_show_icache_error(void) +{ + u64 icesr; + int cpu = raw_smp_processor_id(); + + icesr = erp_mrs(ICESR_EL1); + if (!(icesr & (ICESR_BIT_L0TPE | ICESR_BIT_L0DPE | ICESR_BIT_L1TPE | + ICESR_BIT_L1DPE))) { + pr_debug("CPU%d: No I-cache error detected ICESR 0x%llx\n", + cpu, icesr); + goto clear_out; + } + + pr_alert("CPU%d: I-cache error\n", cpu); + pr_alert("CPU%d: ICESR_EL1 0x%llx ICESYNR0 0x%llx ICESYNR1 0x%llx ICEAR0 0x%llx IECAR1 0x%llx\n", + cpu, icesr, erp_mrs(ICESYNR0_EL1), erp_mrs(ICESYNR1_EL1), + erp_mrs(ICEAR0_EL1), erp_mrs(ICEAR1_EL1)); + + /* + * all detectable I-cache erros are recoverable as + * corrupted lines are refetched + */ + if (panic_on_ce) + BUG_ON(1); + else + WARN_ON(1); + +clear_out: + erp_msr(ICESR_EL1, icesr); +} + +static void msm_erp_show_dcache_error(void) +{ + u64 dcesr; + int cpu = raw_smp_processor_id(); + + dcesr = erp_mrs(DCESR_EL1); + if (!(dcesr & (DCESR_BIT_L1VTPE | DCESR_BIT_L1PTPE | DCESR_BIT_L1DPE | + DCESR_BIT_S1FTLBTPE | DCESR_BIT_S1FTLBDPE))) { + pr_debug("CPU%d: No D-cache error detected DCESR 0x%llx\n", + cpu, dcesr); + goto clear_out; + } + + pr_alert("CPU%d: D-cache error detected\n", cpu); + pr_alert("CPU%d: L1 DCESR 0x%llx, DCESYNR0 0x%llx, DCESYNR1 0x%llx, DCEAR0 0x%llx, DCEAR1 0x%llx\n", + cpu, dcesr, erp_mrs(DCESYNR0_EL1), erp_mrs(DCESYNR1_EL1), + erp_mrs(DCEAR0_EL1), erp_mrs(DCEAR1_EL1)); + + /* all D-cache erros are correctable */ + if (panic_on_ce) + BUG_ON(1); + else + WARN_ON(1); + +clear_out: + erp_msr(DCESR_EL1, dcesr); +} + +static irqreturn_t msm_l1_erp_irq(int irq, void *dev_id) +{ + msm_erp_show_icache_error(); + msm_erp_show_dcache_error(); + return IRQ_HANDLED; +} + +static DEFINE_SPINLOCK(local_handler_lock); +static void msm_l2_erp_local_handler(void *force) +{ + unsigned long flags; + u64 esr0, esr1; + bool parity_ue, parity_ce, misc_ue; + int cpu; + + spin_lock_irqsave(&local_handler_lock, flags); + + esr0 = get_l2_indirect_reg(L2ESR0_IA); + esr1 = get_l2_indirect_reg(L2ESR1_IA); + parity_ue = esr0 & L2ESR0_UE; + parity_ce = esr0 & L2ESR0_CE; + misc_ue = esr1; + cpu = raw_smp_processor_id(); + + if (force || parity_ue || parity_ce || misc_ue) { + if (parity_ue) + pr_alert("CPU%d: L2 uncorrectable parity error\n", cpu); + if (parity_ce) + pr_alert("CPU%d: L2 correctable parity error\n", cpu); + if (misc_ue) + pr_alert("CPU%d: L2 (non-parity) error\n", cpu); + pr_alert("CPU%d: L2ESR0 0x%llx, L2ESR1 0x%llx\n", + cpu, esr0, esr1); + pr_alert("CPU%d: L2ESYNR0 0x%llx, L2ESYNR1 0x%llx, L2ESYNR2 0x%llx\n", + cpu, get_l2_indirect_reg(L2ESYNR0_IA), + get_l2_indirect_reg(L2ESYNR1_IA), + get_l2_indirect_reg(L2ESYNR2_IA)); + pr_alert("CPU%d: L2EAR0 0x%llx, L2EAR1 0x%llx\n", cpu, + get_l2_indirect_reg(L2EAR0_IA), + get_l2_indirect_reg(L2EAR1_IA)); + } else { + pr_info("CPU%d: No L2 error detected in L2ESR0 0x%llx, L2ESR1 0x%llx)\n", + cpu, esr0, esr1); + } + + /* clear */ + set_l2_indirect_reg(L2ESR0_IA, esr0); + set_l2_indirect_reg(L2ESR1_IA, esr1); + + if (panic_on_ue) + BUG_ON(parity_ue || misc_ue); + else + WARN_ON(parity_ue || misc_ue); + + if (panic_on_ce) + BUG_ON(parity_ce); + else + WARN_ON(parity_ce); + + spin_unlock_irqrestore(&local_handler_lock, flags); +} + +static irqreturn_t msm_l2_erp_irq(int irq, void *dev_id) +{ + int cpu; + struct call_single_data *csd; + + for_each_online_cpu(cpu) { + csd = &per_cpu(handler_csd, cpu); + csd->func = msm_l2_erp_local_handler; + smp_call_function_single_async(cpu, csd); + } + + return IRQ_HANDLED; +} + +static irqreturn_t msm_l3_erp_irq(int irq, void *dev_id) +{ + u32 hml3_fira; + bool parity_ue, parity_ce, misc_ue; + + hml3_fira = readl_relaxed(hml3_base + L3_QLL_HML3_FIRA); + parity_ue = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) & + L3_QLL_HML3_FIRA_UE; + parity_ce = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) & + L3_QLL_HML3_FIRA_CE; + misc_ue = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) & + ~(L3_QLL_HML3_FIRA_UE | L3_QLL_HML3_FIRA_CE); + if (parity_ue) + pr_alert("L3 uncorrectable parity error\n"); + if (parity_ce) + pr_alert("L3 correctable parity error\n"); + if (misc_ue) + pr_alert("L3 (non-parity) error\n"); + + pr_alert("HML3_FIRA 0x%0x\n", hml3_fira); + pr_alert("HML3_FIRSYNA 0x%0x, HML3_FIRSYNB 0x%0x\n", + readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNA), + readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNB)); + pr_alert("HML3_FIRSYNC 0x%0x, HML3_FIRSYND 0x%0x\n", + readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNC), + readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYND)); + + if (panic_on_ue) + BUG_ON(parity_ue || misc_ue); + else + WARN_ON(parity_ue || misc_ue); + + if (panic_on_ce) + BUG_ON(parity_ce); + else + WARN_ON(parity_ce); + + writel_relaxed(hml3_fira, hml3_base + L3_QLL_HML3_FIRAC); + /* ensure of irq clear */ + wmb(); + return IRQ_HANDLED; +} + +static irqreturn_t msm_m4m_erp_irq(int irq, void *dev_id) +{ + u32 m4m_status; + + pr_alert("CPU%d: M4M error detected\n", raw_smp_processor_id()); + m4m_status = readl_relaxed(m4m_base + M4M_ERR_STATUS); + pr_alert("M4M_ERR_STATUS 0x%0x\n", m4m_status); + if ((m4m_status & M4M_ERR_STATUS_MASK) & + ~(M4M_ERR_Q22SIB_RET_DEC_ERR | M4M_ERR_Q22SIB_RET_SLV_ERR)) { + pr_alert("M4M_ERR_CAP_0 0x%0x, M4M_ERR_CAP_1 0x%x\n", + readl_relaxed(m4m_base + M4M_ERR_CAP_0), + readl_relaxed(m4m_base + M4M_ERR_CAP_1)); + pr_alert("M4M_ERR_CAP_2 0x%0x, M4M_ERR_CAP_3 0x%x\n", + readl_relaxed(m4m_base + M4M_ERR_CAP_2), + readl_relaxed(m4m_base + M4M_ERR_CAP_3)); + } else { + /* + * M4M error-capture registers not valid when error detected + * due to DEC_ERR or SLV_ERR. L2E registers are still valid. + */ + pr_alert("Omit dumping M4M_ERR_CAP\n"); + } + + /* + * On QSB errors, the L2 captures the bad address and syndrome in + * L2E error registers. Therefore dump L2E always whenever M4M error + * detected. + */ + on_each_cpu(msm_l2_erp_local_handler, (void *)1, 1); + writel_relaxed(1, m4m_base + M4M_ERR_CLR); + /* ensure of irq clear */ + wmb(); + + if (panic_on_ue) + BUG_ON(1); + else + WARN_ON(1); + + return IRQ_HANDLED; +} + +static void enable_erp_irq_callback(void *info) +{ + enable_percpu_irq(erp_irqs[IRQ_L1], IRQ_TYPE_NONE); +} + +static void disable_erp_irq_callback(void *info) +{ + disable_percpu_irq(erp_irqs[IRQ_L1]); +} + +static void msm_cache_erp_irq_init(void *param) +{ + u64 v; + /* Enable L0/L1 I/D cache error reporting. */ + erp_msr(ICECR_EL1, ICECR_IRQ_EN); + erp_msr(DCECR_EL1, DCECR_IRQ_EN); + /* + * Enable L2 data, tag, QSB and possion error reporting. + */ + set_l2_indirect_reg(L2ECR0_IA, L2ECR0_IRQ_EN); + set_l2_indirect_reg(L2ECR1_IA, L2ECR1_IRQ_EN); + v = (get_l2_indirect_reg(L2ECR2_IA) & ~L2ECR2_IRQ_EN_MASK) + | L2ECR2_IRQ_EN; + set_l2_indirect_reg(L2ECR2_IA, v); +} + +static void msm_cache_erp_l3_init(void) +{ + writel_relaxed(L3_QLL_HML3_FIRAT0C_IRQ_EN, + hml3_base + L3_QLL_HML3_FIRAT0C); + writel_relaxed(L3_QLL_HML3_FIRAT1S_IRQ_EN, + hml3_base + L3_QLL_HML3_FIRAT1S); +} + +static int cache_erp_cpu_pm_callback(struct notifier_block *self, + unsigned long cmd, void *v) +{ + unsigned long aff_level = (unsigned long) v; + + switch (cmd) { + case CPU_CLUSTER_PM_EXIT: + msm_cache_erp_irq_init(NULL); + + if (aff_level >= AFFINITY_LEVEL_L3) + msm_cache_erp_l3_init(); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block cache_erp_cpu_pm_notifier = { + .notifier_call = cache_erp_cpu_pm_callback, +}; + +static int cache_erp_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + msm_cache_erp_irq_init(NULL); + enable_erp_irq_callback(NULL); + break; + case CPU_DYING: + disable_erp_irq_callback(NULL); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block cache_erp_cpu_notifier = { + .notifier_call = cache_erp_cpu_callback, +}; + +static int msm_cache_erp_probe(struct platform_device *pdev) +{ + int i, ret = 0; + struct resource *r; + + dev_dbg(&pdev->dev, "enter\n"); + + /* L3 */ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hml3_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(hml3_base)) { + dev_err(&pdev->dev, "failed to ioremap (0x%pK)\n", hml3_base); + return PTR_ERR(hml3_base); + } + + for (i = 0; i <= IRQ_L3; i++) { + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + erp_irq_names[i]); + if (!r) { + dev_err(&pdev->dev, "failed to get %s\n", + erp_irq_names[i]); + return -ENODEV; + } + erp_irqs[i] = r->start; + } + + msm_cache_erp_l3_init(); + + /* L0/L1 erp irq per cpu */ + dev_info(&pdev->dev, "Registering for L1 error interrupts\n"); + ret = request_percpu_irq(erp_irqs[IRQ_L1], msm_l1_erp_irq, + erp_irq_names[IRQ_L1], &msm_l1_erp_stats); + if (ret) { + dev_err(&pdev->dev, "failed to request L0/L1 ERP irq %s (%d)\n", + erp_irq_names[IRQ_L1], ret); + return ret; + } else { + dev_dbg(&pdev->dev, "requested L0/L1 ERP irq %s\n", + erp_irq_names[IRQ_L1]); + } + + get_online_cpus(); + register_hotcpu_notifier(&cache_erp_cpu_notifier); + cpu_pm_register_notifier(&cache_erp_cpu_pm_notifier); + + /* Perform L1/L2 cache error detection init on online cpus */ + on_each_cpu(msm_cache_erp_irq_init, NULL, 1); + /* Enable irqs */ + on_each_cpu(enable_erp_irq_callback, NULL, 1); + put_online_cpus(); + + /* L2 erp irq per cluster */ + dev_info(&pdev->dev, "Registering for L2 error interrupts\n"); + for (i = IRQ_L2_INFO0; i <= IRQ_L2_ERR1; i++) { + ret = devm_request_irq(&pdev->dev, erp_irqs[i], + msm_l2_erp_irq, + IRQF_ONESHOT | + IRQF_TRIGGER_HIGH, + erp_irq_names[i], NULL); + if (ret) { + dev_err(&pdev->dev, "failed to request irq %s (%d)\n", + erp_irq_names[i], ret); + goto cleanup; + } + } + + /* L3 erp irq */ + dev_info(&pdev->dev, "Registering for L3 error interrupts\n"); + ret = devm_request_irq(&pdev->dev, erp_irqs[IRQ_L3], msm_l3_erp_irq, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + erp_irq_names[IRQ_L3], NULL); + if (ret) { + dev_err(&pdev->dev, "failed to request L3 irq %s (%d)\n", + erp_irq_names[IRQ_L3], ret); + goto cleanup; + } + + return 0; + +cleanup: + free_percpu_irq(erp_irqs[IRQ_L1], NULL); + return ret; +} + +static void msm_m4m_erp_irq_init(void) +{ + writel_relaxed(M4M_INT_CTRL_IRQ_EN, m4m_base + M4M_INT_CTRL); + writel_relaxed(0, m4m_base + M4M_ERR_CTRL); +} + +static int msm_m4m_erp_m4m_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *r; + + dev_dbg(&pdev->dev, "enter\n"); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + m4m_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(m4m_base)) { + dev_err(&pdev->dev, "failed to ioremap (0x%pK)\n", m4m_base); + return PTR_ERR(m4m_base); + } + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + erp_irq_names[IRQ_M4M]); + if (!r) { + dev_err(&pdev->dev, "failed to get %s\n", + erp_irq_names[IRQ_M4M]); + ret = -ENODEV; + goto exit; + } + erp_irqs[IRQ_M4M] = r->start; + + dev_info(&pdev->dev, "Registering for M4M error interrupts\n"); + ret = devm_request_irq(&pdev->dev, erp_irqs[IRQ_M4M], + msm_m4m_erp_irq, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + erp_irq_names[IRQ_M4M], NULL); + if (ret) { + dev_err(&pdev->dev, "failed to request irq %s (%d)\n", + erp_irq_names[IRQ_M4M], ret); + goto exit; + } + + msm_m4m_erp_irq_init(); + +exit: + return ret; +} + +static struct of_device_id cache_erp_dt_ids[] = { + { .compatible = "qcom,kryo_cache_erp64", }, + {} +}; +MODULE_DEVICE_TABLE(of, cache_erp_dt_ids); + +static struct platform_driver msm_cache_erp_driver = { + .probe = msm_cache_erp_probe, + .driver = { + .name = "msm_cache_erp64", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(cache_erp_dt_ids), + }, +}; + +static struct of_device_id m4m_erp_dt_ids[] = { + { .compatible = "qcom,m4m_erp", }, + {} +}; +MODULE_DEVICE_TABLE(of, m4m_erp_dt_ids); +static struct platform_driver msm_m4m_erp_driver = { + .probe = msm_m4m_erp_m4m_probe, + .driver = { + .name = "msm_m4m_erp", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(m4m_erp_dt_ids), + }, +}; + +static int __init msm_cache_erp_init(void) +{ + int r; + + r = platform_driver_register(&msm_cache_erp_driver); + if (!r) + r = platform_driver_register(&msm_m4m_erp_driver); + if (r) + pr_err("failed to register driver %d\n", r); + return r; +} + +arch_initcall(msm_cache_erp_init); diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index ab46eb70651c..8c242bc7a702 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -48,11 +48,6 @@ #include <soc/qcom/socinfo.h> #include <soc/qcom/ramdump.h> -#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC -#include <net/cnss_prealloc.h> -#endif - - #include "wlan_firmware_service_v01.h" #ifdef CONFIG_ICNSS_DEBUG @@ -202,6 +197,7 @@ enum icnss_driver_state { ICNSS_MSA0_ASSIGNED, ICNSS_WLFW_EXISTS, ICNSS_WDOG_BITE, + ICNSS_SHUTDOWN_DONE, }; struct ce_irq_list { @@ -695,6 +691,8 @@ static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len) goto out; } + memset(&ind_msg, 0, sizeof(ind_msg)); + ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01; ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN; ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei; @@ -1968,8 +1966,6 @@ static int icnss_call_driver_probe(struct icnss_priv *priv) if (ret < 0) { icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n", ret, priv->state); - wcnss_prealloc_check_memory_leak(); - wcnss_pre_alloc_reset(); goto out; } @@ -1990,9 +1986,13 @@ static int icnss_call_driver_shutdown(struct icnss_priv *priv) if (!priv->ops || !priv->ops->shutdown) goto out; + if (test_bit(ICNSS_SHUTDOWN_DONE, &penv->state)) + goto out; + icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state); priv->ops->shutdown(&priv->pdev->dev); + set_bit(ICNSS_SHUTDOWN_DONE, &penv->state); out: return 0; @@ -2030,6 +2030,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv) } out: + clear_bit(ICNSS_SHUTDOWN_DONE, &penv->state); return 0; call_probe: @@ -2099,8 +2100,6 @@ static int icnss_driver_event_register_driver(void *data) if (ret) { icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n", ret, penv->state); - wcnss_prealloc_check_memory_leak(); - wcnss_pre_alloc_reset(); goto power_off; } @@ -2125,8 +2124,6 @@ static int icnss_driver_event_unregister_driver(void *data) penv->ops->remove(&penv->pdev->dev); clear_bit(ICNSS_DRIVER_PROBED, &penv->state); - wcnss_prealloc_check_memory_leak(); - wcnss_pre_alloc_reset(); penv->ops = NULL; @@ -2151,8 +2148,6 @@ static int icnss_call_driver_remove(struct icnss_priv *priv) penv->ops->remove(&priv->pdev->dev); clear_bit(ICNSS_DRIVER_PROBED, &priv->state); - wcnss_prealloc_check_memory_leak(); - wcnss_pre_alloc_reset(); icnss_hw_power_off(penv); @@ -3667,6 +3662,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv) case ICNSS_WDOG_BITE: seq_puts(s, "MODEM WDOG BITE"); continue; + case ICNSS_SHUTDOWN_DONE: + seq_puts(s, "SHUTDOWN DONE"); + continue; } seq_printf(s, "UNKNOWN-%d", i); diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c index f9d967fd0af6..e5f6104bd7de 100644 --- a/drivers/soc/qcom/ipc_router_mhi_xprt.c +++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,7 +22,7 @@ #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/types.h> - +#include <linux/spinlock.h> static int ipc_router_mhi_xprt_debug_mask; module_param_named(debug_mask, ipc_router_mhi_xprt_debug_mask, @@ -123,9 +123,9 @@ struct ipc_router_mhi_xprt { struct completion sft_close_complete; unsigned xprt_version; unsigned xprt_option; - struct mutex tx_addr_map_list_lock; + spinlock_t tx_addr_map_list_lock; struct list_head tx_addr_map_list; - struct mutex rx_addr_map_list_lock; + spinlock_t rx_addr_map_list_lock; struct list_head rx_addr_map_list; }; @@ -179,16 +179,16 @@ void ipc_router_mhi_release_pkt(struct kref *ref) * Return: The mapped virtual Address if found, NULL otherwise. */ void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list, - struct mutex *addr_map_list_lock, - void *addr) + spinlock_t *addr_map_list_lock, void *addr) { struct ipc_router_mhi_addr_map *addr_mapping; struct ipc_router_mhi_addr_map *tmp_addr_mapping; + unsigned long flags; void *virt_addr; if (!addr_map_list || !addr_map_list_lock) return NULL; - mutex_lock(addr_map_list_lock); + spin_lock_irqsave(addr_map_list_lock, flags); list_for_each_entry_safe(addr_mapping, tmp_addr_mapping, addr_map_list, list_node) { if (addr_mapping->virt_addr == addr) { @@ -198,11 +198,11 @@ void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list, kref_put(&addr_mapping->pkt->ref, ipc_router_mhi_release_pkt); kfree(addr_mapping); - mutex_unlock(addr_map_list_lock); + spin_unlock_irqrestore(addr_map_list_lock, flags); return virt_addr; } } - mutex_unlock(addr_map_list_lock); + spin_unlock_irqrestore(addr_map_list_lock, flags); IPC_RTR_ERR( "%s: Virtual address mapping [%p] not found\n", __func__, (void *)addr); @@ -219,10 +219,11 @@ void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list, * Return: 0 on success, standard Linux error code otherwise. */ int ipc_router_mhi_xprt_add_addr_map(struct list_head *addr_map_list, - struct mutex *addr_map_list_lock, + spinlock_t *addr_map_list_lock, struct rr_packet *pkt, void *virt_addr) { struct ipc_router_mhi_addr_map *addr_mapping; + unsigned long flags; if (!addr_map_list || !addr_map_list_lock) return -EINVAL; @@ -231,11 +232,11 @@ int ipc_router_mhi_xprt_add_addr_map(struct list_head *addr_map_list, return -ENOMEM; addr_mapping->virt_addr = virt_addr; addr_mapping->pkt = pkt; - mutex_lock(addr_map_list_lock); + spin_lock_irqsave(addr_map_list_lock, flags); if (addr_mapping->pkt) kref_get(&addr_mapping->pkt->ref); list_add_tail(&addr_mapping->list_node, addr_map_list); - mutex_unlock(addr_map_list_lock); + spin_unlock_irqrestore(addr_map_list_lock, flags); return 0; } @@ -719,12 +720,11 @@ static void mhi_xprt_xfer_event(struct mhi_cb_info *cb_info) mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data); if (cb_info->chan == mhi_xprtp->ch_hndl.out_chan_id) { out_addr = cb_info->result->buf_addr; - mutex_lock(&mhi_xprtp->ch_hndl.state_lock); - ipc_router_mhi_xprt_find_addr_map(&mhi_xprtp->tx_addr_map_list, + ipc_router_mhi_xprt_find_addr_map( + &mhi_xprtp->tx_addr_map_list, &mhi_xprtp->tx_addr_map_list_lock, out_addr); wake_up(&mhi_xprtp->write_wait_q); - mutex_unlock(&mhi_xprtp->ch_hndl.state_lock); } else if (cb_info->chan == mhi_xprtp->ch_hndl.in_chan_id) { queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work); } else { @@ -875,9 +875,9 @@ static int ipc_router_mhi_config_init( mhi_xprtp->ch_hndl.num_trbs = IPC_ROUTER_MHI_XPRT_NUM_TRBS; mhi_xprtp->ch_hndl.mhi_xprtp = mhi_xprtp; INIT_LIST_HEAD(&mhi_xprtp->tx_addr_map_list); - mutex_init(&mhi_xprtp->tx_addr_map_list_lock); + spin_lock_init(&mhi_xprtp->tx_addr_map_list_lock); INIT_LIST_HEAD(&mhi_xprtp->rx_addr_map_list); - mutex_init(&mhi_xprtp->rx_addr_map_list_lock); + spin_lock_init(&mhi_xprtp->rx_addr_map_list_lock); rc = ipc_router_mhi_driver_register(mhi_xprtp); return rc; diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c index 092b1c1af44b..924c826208dd 100644 --- a/drivers/soc/qcom/memory_dump_v2.c +++ b/drivers/soc/qcom/memory_dump_v2.c @@ -95,7 +95,7 @@ int msm_dump_data_add_minidump(struct msm_dump_entry *entry) data = (struct msm_dump_data *)(phys_to_virt(entry->addr)); if (!strcmp(data->name, "")) { - pr_info("Entry name is NULL, Use ID %d for minidump\n", + pr_debug("Entry name is NULL, Use ID %d for minidump\n", entry->id); snprintf(md_entry.name, sizeof(md_entry.name), "KMDT0x%X", entry->id); @@ -133,7 +133,7 @@ int msm_dump_data_register(enum msm_dump_table_ids id, dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table)); if (msm_dump_data_add_minidump(entry)) - pr_info("Failed to add entry in Minidump table\n"); + pr_err("Failed to add entry in Minidump table\n"); return 0; } diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c index b8417513ca55..c11114528d2a 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.c +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -498,6 +498,7 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) struct mem_alloc_generic_resp_msg_v01 *alloc_resp; int rc, resp = 0; int client_id; + uint32_t size = 0; alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req; pr_debug("memshare: alloc request client id: %d proc _id: %d\n", @@ -528,7 +529,11 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) __func__, memblock[client_id].client_id, memblock[client_id].free_memory); if (!memblock[client_id].alloted) { - rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0) + size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES; + else + size = alloc_req->num_bytes; + rc = memshare_alloc(memsh_drv->dev, size, &memblock[client_id]); if (rc) { pr_err("In %s,Unable to allocate memory for requested client\n", @@ -963,8 +968,10 @@ static int memshare_child_probe(struct platform_device *pdev) * Memshare allocation for guaranteed clients */ if (memblock[num_clients].guarantee) { + if (client_id == 1 && size > 0) + size += MEMSHARE_GUARD_BYTES; rc = memshare_alloc(memsh_child->dev, - memblock[num_clients].size, + size, &memblock[num_clients]); if (rc) { pr_err("In %s, Unable to allocate memory for guaranteed clients, rc: %d\n", diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h index 398907532977..c7123fb1314b 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.h +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,7 @@ #define GPS 0 #define CHECK 0 #define FREE 1 +#define MEMSHARE_GUARD_BYTES (4*1024) struct mem_blocks { /* Client Id information */ diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c index 1cb36bf98555..300233085161 100644 --- a/drivers/soc/qcom/msm_minidump.c +++ b/drivers/soc/qcom/msm_minidump.c @@ -62,24 +62,31 @@ struct md_table { struct md_region entry[MAX_NUM_ENTRIES]; }; +/* + * md_elfhdr: Minidump table elf header + * @md_ehdr: elf main header + * @shdr: Section header + * @phdr: Program header + * @elf_offset: section offset in elf + * @strtable_idx: string table current index position + */ +struct md_elfhdr { + struct elfhdr *md_ehdr; + struct elf_shdr *shdr; + struct elf_phdr *phdr; + u64 elf_offset; + u64 strtable_idx; +}; + /* Protect elfheader and smem table from deferred calls contention */ static DEFINE_SPINLOCK(mdt_lock); -static bool minidump_enabled; -static struct md_table minidump_table; +static struct md_table minidump_table; +static struct md_elfhdr minidump_elfheader; + +bool minidump_enabled; static unsigned int pendings; static unsigned int region_idx = 1; /* First entry is ELF header*/ -/* ELF Header */ -static struct elfhdr *md_ehdr; -/* ELF Program header */ -static struct elf_phdr *phdr; -/* ELF Section header */ -static struct elf_shdr *shdr; -/* Section offset in elf image */ -static u64 elf_offset; -/* String table index, first byte must be '\0' */ -static unsigned int stringtable_idx = 1; - static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr) { return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff); @@ -90,6 +97,16 @@ static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx) return &elf_sheader(hdr)[idx]; } +static inline struct elf_phdr *elf_pheader(struct elfhdr *hdr) +{ + return (struct elf_phdr *)((size_t)hdr + (size_t)hdr->e_phoff); +} + +static inline struct elf_phdr *elf_program(struct elfhdr *hdr, int idx) +{ + return &elf_pheader(hdr)[idx]; +} + static inline char *elf_str_table(struct elfhdr *hdr) { if (hdr->e_shstrndx == SHN_UNDEF) @@ -101,23 +118,24 @@ static inline char *elf_lookup_string(struct elfhdr *hdr, int offset) { char *strtab = elf_str_table(hdr); - if ((strtab == NULL) | (stringtable_idx < offset)) + if ((strtab == NULL) || (minidump_elfheader.strtable_idx < offset)) return NULL; return strtab + offset; } static inline unsigned int set_section_name(const char *name) { - char *strtab = elf_str_table(md_ehdr); + char *strtab = elf_str_table(minidump_elfheader.md_ehdr); + int idx = minidump_elfheader.strtable_idx; int ret = 0; - if ((strtab == NULL) | (name == NULL)) + if ((strtab == NULL) || (name == NULL)) return 0; - ret = stringtable_idx; - stringtable_idx += strlcpy((strtab + stringtable_idx), - name, MAX_NAME_LENGTH); - stringtable_idx += 1; + ret = idx; + idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH); + minidump_elfheader.strtable_idx = idx + 1; + return ret; } @@ -137,11 +155,9 @@ static inline bool md_check_name(const char *name) static int md_update_smem_table(const struct md_region *entry) { struct md_smem_region *mdr; - - if (!minidump_enabled) { - pr_info("Table in smem is not setup\n"); - return -ENODEV; - } + struct elfhdr *hdr = minidump_elfheader.md_ehdr; + struct elf_shdr *shdr = elf_section(hdr, hdr->e_shnum++); + struct elf_phdr *phdr = elf_program(hdr, hdr->e_phnum++); mdr = &minidump_table.region[region_idx++]; @@ -155,36 +171,21 @@ static int md_update_smem_table(const struct md_region *entry) shdr->sh_addr = (elf_addr_t)entry->virt_addr; shdr->sh_size = mdr->size; shdr->sh_flags = SHF_WRITE; - shdr->sh_offset = elf_offset; + shdr->sh_offset = minidump_elfheader.elf_offset; shdr->sh_entsize = 0; phdr->p_type = PT_LOAD; - phdr->p_offset = elf_offset; + phdr->p_offset = minidump_elfheader.elf_offset; phdr->p_vaddr = entry->virt_addr; phdr->p_paddr = entry->phys_addr; phdr->p_filesz = phdr->p_memsz = mdr->size; phdr->p_flags = PF_R | PF_W; - md_ehdr->e_shnum += 1; - md_ehdr->e_phnum += 1; - elf_offset += shdr->sh_size; - shdr++; - phdr++; + minidump_elfheader.elf_offset += shdr->sh_size; return 0; } -bool msm_minidump_enabled(void) -{ - bool ret; - - spin_lock(&mdt_lock); - ret = minidump_enabled; - spin_unlock(&mdt_lock); - return ret; -} -EXPORT_SYMBOL(msm_minidump_enabled); - int msm_minidump_add_region(const struct md_region *entry) { u32 entries; @@ -196,19 +197,19 @@ int msm_minidump_add_region(const struct md_region *entry) if (((strlen(entry->name) > MAX_NAME_LENGTH) || md_check_name(entry->name)) && !entry->virt_addr) { - pr_info("Invalid entry details\n"); + pr_err("Invalid entry details\n"); return -EINVAL; } if (!IS_ALIGNED(entry->size, 4)) { - pr_info("size should be 4 byte aligned\n"); + pr_err("size should be 4 byte aligned\n"); return -EINVAL; } spin_lock(&mdt_lock); entries = minidump_table.num_regions; if (entries >= MAX_NUM_ENTRIES) { - pr_info("Maximum entries reached.\n"); + pr_err("Maximum entries reached.\n"); spin_unlock(&mdt_lock); return -ENOMEM; } @@ -238,23 +239,32 @@ EXPORT_SYMBOL(msm_minidump_add_region); static int msm_minidump_add_header(void) { struct md_smem_region *mdreg = &minidump_table.region[0]; - char *banner; + struct elfhdr *md_ehdr; + struct elf_shdr *shdr; + struct elf_phdr *phdr; unsigned int strtbl_off, elfh_size, phdr_off; + char *banner; + /* Header buffer contains: + * elf header, MAX_NUM_ENTRIES+1 of section and program elf headers, + * string table section and linux banner. + */ elfh_size = sizeof(*md_ehdr) + MAX_STRTBL_SIZE + MAX_MEM_LENGTH + ((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 1)); - md_ehdr = kzalloc(elfh_size, GFP_KERNEL); - if (!md_ehdr) + minidump_elfheader.md_ehdr = kzalloc(elfh_size, GFP_KERNEL); + if (!minidump_elfheader.md_ehdr) return -ENOMEM; strlcpy(mdreg->name, "KELF_HEADER", sizeof(mdreg->name)); - mdreg->address = virt_to_phys(md_ehdr); + mdreg->address = virt_to_phys(minidump_elfheader.md_ehdr); mdreg->size = elfh_size; - /* Section headers*/ - shdr = (struct elf_shdr *)(md_ehdr + 1); - phdr = (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES); + md_ehdr = minidump_elfheader.md_ehdr; + /* Assign section/program headers offset */ + minidump_elfheader.shdr = shdr = (struct elf_shdr *)(md_ehdr + 1); + minidump_elfheader.phdr = phdr = + (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES); phdr_off = sizeof(*md_ehdr) + (sizeof(*shdr) * MAX_NUM_ENTRIES); memcpy(md_ehdr->e_ident, ELFMAG, SELFMAG); @@ -268,18 +278,19 @@ static int msm_minidump_add_header(void) md_ehdr->e_ehsize = sizeof(*md_ehdr); md_ehdr->e_phoff = phdr_off; md_ehdr->e_phentsize = sizeof(*phdr); - md_ehdr->e_phnum = 1; md_ehdr->e_shoff = sizeof(*md_ehdr); md_ehdr->e_shentsize = sizeof(*shdr); - md_ehdr->e_shnum = 3; /* NULL, STR TABLE, Linux banner */ md_ehdr->e_shstrndx = 1; - elf_offset = elfh_size; + minidump_elfheader.elf_offset = elfh_size; + + /* + * First section header should be NULL, + * 2nd section is string table. + */ + minidump_elfheader.strtable_idx = 1; strtbl_off = sizeof(*md_ehdr) + ((sizeof(*phdr) + sizeof(*shdr)) * MAX_NUM_ENTRIES); - /* First section header should be NULL - * 2nd entry for string table - */ shdr++; shdr->sh_type = SHT_STRTAB; shdr->sh_offset = (elf_addr_t)strtbl_off; @@ -289,7 +300,15 @@ static int msm_minidump_add_header(void) shdr->sh_name = set_section_name("STR_TBL"); shdr++; - /* 3rd entry for linux banner */ + /* 3rd section is for minidump_table VA, used by parsers */ + shdr->sh_type = SHT_PROGBITS; + shdr->sh_entsize = 0; + shdr->sh_flags = 0; + shdr->sh_addr = (elf_addr_t)&minidump_table; + shdr->sh_name = set_section_name("minidump_table"); + shdr++; + + /* 4th section is linux banner */ banner = (char *)md_ehdr + strtbl_off + MAX_STRTBL_SIZE; strlcpy(banner, linux_banner, MAX_MEM_LENGTH); @@ -300,7 +319,6 @@ static int msm_minidump_add_header(void) shdr->sh_entsize = 0; shdr->sh_flags = SHF_WRITE; shdr->sh_name = set_section_name("linux_banner"); - shdr++; phdr->p_type = PT_LOAD; phdr->p_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE); @@ -309,8 +327,9 @@ static int msm_minidump_add_header(void) phdr->p_filesz = phdr->p_memsz = strlen(linux_banner) + 1; phdr->p_flags = PF_R | PF_W; - md_ehdr->e_phnum += 1; - phdr++; + /* Update headers count*/ + md_ehdr->e_phnum = 1; + md_ehdr->e_shnum = 4; return 0; } @@ -325,13 +344,13 @@ static int __init msm_minidump_init(void) smem_table = smem_get_entry(SMEM_MINIDUMP_TABLE_ID, &size, 0, SMEM_ANY_HOST_FLAG); if (IS_ERR_OR_NULL(smem_table)) { - pr_info("SMEM is not initialized.\n"); + pr_err("SMEM is not initialized.\n"); return -ENODEV; } if ((smem_table->next_avail_offset + MAX_MEM_LENGTH) > smem_table->smem_length) { - pr_info("SMEM memory not available.\n"); + pr_err("SMEM memory not available.\n"); return -ENOMEM; } @@ -353,10 +372,10 @@ static int __init msm_minidump_init(void) for (i = 0; i < pendings; i++) { mdr = &minidump_table.entry[i]; if (md_update_smem_table(mdr)) { - pr_info("Unable to add entry %s to smem table\n", + pr_err("Unable to add entry %s to smem table\n", mdr->name); spin_unlock(&mdt_lock); - return -ENODEV; + return -ENOENT; } } diff --git a/drivers/soc/qcom/perf_event_kryo.c b/drivers/soc/qcom/perf_event_kryo.c index c61a86850777..519961440742 100644 --- a/drivers/soc/qcom/perf_event_kryo.c +++ b/drivers/soc/qcom/perf_event_kryo.c @@ -118,12 +118,7 @@ static void kryo_write_pmresr(int reg, int l_h, u32 val) static u32 kryo_read_pmresr(int reg, int l_h) { - u32 val; - - if (reg > KRYO_MAX_L1_REG) { - pr_err("Invalid read of RESR reg %d\n", reg); - return 0; - } + u32 val = 0; if (l_h == RESR_L) { switch (reg) { @@ -136,6 +131,9 @@ static u32 kryo_read_pmresr(int reg, int l_h) case 2: asm volatile("mrs %0, " pmresr2l_el0 : "=r" (val)); break; + default: + WARN_ONCE(1, "Invalid read of RESR reg %d\n", reg); + break; } } else { switch (reg) { @@ -148,6 +146,9 @@ static u32 kryo_read_pmresr(int reg, int l_h) case 2: asm volatile("mrs %0," pmresr2h_el0 : "=r" (val)); break; + default: + WARN_ONCE(1, "Invalid read of RESR reg %d\n", reg); + break; } } diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index 6e5ddc4a3a7d..3415338a1294 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -917,13 +917,13 @@ out: priv->region_start), VMID_HLOS); } + if (desc->clear_fw_region && priv->region_start) + pil_clear_segment(desc); dma_free_attrs(desc->dev, priv->region_size, priv->region, priv->region_start, &desc->attrs); priv->region = NULL; } - if (desc->clear_fw_region && priv->region_start) - pil_clear_segment(desc); pil_release_mmap(desc); } return ret; diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c index 4ba92436bd06..6e7d34ac9163 100644 --- a/drivers/soc/qcom/qbt1000.c +++ b/drivers/soc/qcom/qbt1000.c @@ -377,6 +377,12 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg) drvdata = file->private_data; + if (IS_ERR(priv_arg)) { + dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n", + __func__, arg); + return -EINVAL; + } + mutex_lock(&drvdata->mutex); pr_debug("qbt1000_ioctl %d\n", cmd); @@ -401,6 +407,13 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg) goto end; } + if (strcmp(app.name, FP_APP_NAME)) { + dev_err(drvdata->dev, "%s: Invalid app name\n", + __func__); + rc = -EINVAL; + goto end; + } + if (drvdata->app_handle) { dev_err(drvdata->dev, "%s: LOAD app already loaded, unloading first\n", __func__); @@ -414,6 +427,7 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg) } pr_debug("app %s load before\n", app.name); + app.name[MAX_NAME_SIZE - 1] = '\0'; /* start the TZ app */ rc = qseecom_start_app( @@ -427,7 +441,8 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg) pr_err("App %s failed to set bw\n", app.name); } } else { - pr_err("app %s failed to load\n", app.name); + dev_err(drvdata->dev, "%s: Fingerprint Trusted App failed to load\n", + __func__); goto end; } @@ -447,9 +462,7 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg) pr_debug("app %s load after\n", app.name); - if (!strcmp(app.name, FP_APP_NAME)) - drvdata->fp_app_handle = drvdata->app_handle; - + drvdata->fp_app_handle = drvdata->app_handle; break; } case QBT1000_UNLOAD_APP: diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c index 128ea434dcc8..a275537d4e08 100644 --- a/drivers/soc/qcom/qdsp6v2/apr.c +++ b/drivers/soc/qcom/qdsp6v2/apr.c @@ -514,19 +514,19 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, mutex_unlock(&svc->m_lock); return NULL; } - if (!svc->port_cnt && !svc->svc_cnt) + if (!svc->svc_cnt) clnt->svc_cnt++; svc->port_cnt++; svc->port_fn[temp_port] = svc_fn; svc->port_priv[temp_port] = priv; + svc->svc_cnt++; } else { if (!svc->fn) { - if (!svc->port_cnt && !svc->svc_cnt) + if (!svc->svc_cnt) clnt->svc_cnt++; svc->fn = svc_fn; - if (svc->port_cnt) - svc->svc_cnt++; svc->priv = priv; + svc->svc_cnt++; } } @@ -745,29 +745,28 @@ int apr_deregister(void *handle) if (!handle) return -EINVAL; + if (!svc->svc_cnt) { + pr_err("%s: svc already deregistered. svc = %pK\n", + __func__, svc); + return -EINVAL; + } + mutex_lock(&svc->m_lock); dest_id = svc->dest_id; client_id = svc->client_id; clnt = &client[dest_id][client_id]; - if (svc->port_cnt > 0 || svc->svc_cnt > 0) { + if (svc->svc_cnt > 0) { if (svc->port_cnt) svc->port_cnt--; - else if (svc->svc_cnt) - svc->svc_cnt--; - if (!svc->port_cnt && !svc->svc_cnt) { + svc->svc_cnt--; + if (!svc->svc_cnt) { client[dest_id][client_id].svc_cnt--; - svc->need_reset = 0x0; - } - } else if (client[dest_id][client_id].svc_cnt > 0) { - client[dest_id][client_id].svc_cnt--; - if (!client[dest_id][client_id].svc_cnt) { - svc->need_reset = 0x0; pr_debug("%s: service is reset %pK\n", __func__, svc); } } - if (!svc->port_cnt && !svc->svc_cnt) { + if (!svc->svc_cnt) { svc->priv = NULL; svc->id = 0; svc->fn = NULL; diff --git a/drivers/soc/qcom/scm-boot.c b/drivers/soc/qcom/scm-boot.c index 369fb27ff447..f3e96f9afa12 100644 --- a/drivers/soc/qcom/scm-boot.c +++ b/drivers/soc/qcom/scm-boot.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, 2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010, 2014, 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,11 +24,20 @@ int scm_set_boot_addr(phys_addr_t addr, unsigned int flags) u32 flags; u32 addr; } cmd; + struct scm_desc desc = {0}; + + if (!is_scm_armv8()) { + cmd.addr = addr; + cmd.flags = flags; + return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR, + &cmd, sizeof(cmd), NULL, 0); + } + + desc.args[0] = addr; + desc.args[1] = flags; + desc.arginfo = SCM_ARGS(2); - cmd.addr = addr; - cmd.flags = flags; - return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR, - &cmd, sizeof(cmd), NULL, 0); + return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR), &desc); } EXPORT_SYMBOL(scm_set_boot_addr); diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c index 68592feccb33..b5681a5c6817 100644 --- a/drivers/soc/qcom/service-notifier.c +++ b/drivers/soc/qcom/service-notifier.c @@ -376,13 +376,6 @@ static void root_service_service_arrive(struct work_struct *work) mutex_unlock(&qmi_client_release_lock); pr_info("Connection established between QMI handle and %d service\n", data->instance_id); - /* Register for indication messages about service */ - rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb, - (void *)data); - if (rc < 0) - pr_err("Indication callback register failed(instance-id: %d) rc:%d\n", - data->instance_id, rc); - mutex_lock(¬if_add_lock); mutex_lock(&service_list_lock); list_for_each_entry(service_notif, &service_list, list) { @@ -405,6 +398,12 @@ static void root_service_service_arrive(struct work_struct *work) } mutex_unlock(&service_list_lock); mutex_unlock(¬if_add_lock); + /* Register for indication messages about service */ + rc = qmi_register_ind_cb(data->clnt_handle, + root_service_service_ind_cb, (void *)data); + if (rc < 0) + pr_err("Indication callback register failed(instance-id: %d) rc:%d\n", + data->instance_id, rc); } static void root_service_service_exit(struct qmi_client_info *data, diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index c1d8748a5d08..b9903fe86f60 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -65,6 +65,7 @@ enum { HW_PLATFORM_RCM = 21, HW_PLATFORM_STP = 23, HW_PLATFORM_SBC = 24, + HW_PLATFORM_ADP = 25, HW_PLATFORM_INVALID }; @@ -85,6 +86,7 @@ const char *hw_platform[] = { [HW_PLATFORM_DTV] = "DTV", [HW_PLATFORM_STP] = "STP", [HW_PLATFORM_SBC] = "SBC", + [HW_PLATFORM_ADP] = "ADP", }; enum { @@ -111,6 +113,22 @@ const char *qrd_hw_platform_subtype[] = { }; enum { + PLATFORM_SUBTYPE_MOJAVE_V1 = 0x0, + PLATFORM_SUBTYPE_MMX = 0x1, + PLATFORM_SUBTYPE_MOJAVE_FULL_V2 = 0x2, + PLATFORM_SUBTYPE_MOJAVE_BARE_V2 = 0x3, + PLATFORM_SUBTYPE_ADP_INVALID, +}; + +const char *adp_hw_platform_subtype[] = { + [PLATFORM_SUBTYPE_MOJAVE_V1] = "MOJAVE_V1", + [PLATFORM_SUBTYPE_MMX] = "MMX", + [PLATFORM_SUBTYPE_MOJAVE_FULL_V2] = "_MOJAVE_V2_FULL", + [PLATFORM_SUBTYPE_MOJAVE_BARE_V2] = "_MOJAVE_V2_BARE", + [PLATFORM_SUBTYPE_ADP_INVALID] = "INVALID", +}; + +enum { PLATFORM_SUBTYPE_UNKNOWN = 0x0, PLATFORM_SUBTYPE_CHARM = 0x1, PLATFORM_SUBTYPE_STRANGE = 0x2, @@ -514,11 +532,13 @@ static struct msm_soc_info cpu_of_id[] = { /* 8996 IDs */ [246] = {MSM_CPU_8996, "MSM8996"}, - [310] = {MSM_CPU_8996, "MSM8996"}, - [311] = {MSM_CPU_8996, "APQ8096"}, [291] = {MSM_CPU_8996, "APQ8096"}, [305] = {MSM_CPU_8996, "MSM8996pro"}, + [310] = {MSM_CPU_8996, "MSM8996"}, + [311] = {MSM_CPU_8996, "APQ8096"}, [312] = {MSM_CPU_8996, "APQ8096pro"}, + [315] = {MSM_CPU_8996, "MSM8996pro"}, + [316] = {MSM_CPU_8996, "APQ8096pro"}, /* 8976 ID */ [266] = {MSM_CPU_8976, "MSM8976"}, @@ -804,6 +824,14 @@ msm_get_platform_subtype(struct device *dev, } return snprintf(buf, PAGE_SIZE, "%-.32s\n", qrd_hw_platform_subtype[hw_subtype]); + } + if (socinfo_get_platform_type() == HW_PLATFORM_ADP) { + if (hw_subtype >= PLATFORM_SUBTYPE_ADP_INVALID) { + pr_err("Invalid hardware platform sub type for adp found\n"); + hw_subtype = PLATFORM_SUBTYPE_ADP_INVALID; + } + return snprintf(buf, PAGE_SIZE, "%-.32s\n", + adp_hw_platform_subtype[hw_subtype]); } else { if (hw_subtype >= PLATFORM_SUBTYPE_INVALID) { pr_err("Invalid hardware platform subtype\n"); @@ -1225,10 +1253,6 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 246; strlcpy(dummy_socinfo.build_id, "msm8996 - ", sizeof(dummy_socinfo.build_id)); - } else if (early_machine_is_msm8996_auto()) { - dummy_socinfo.id = 310; - strlcpy(dummy_socinfo.build_id, "msm8996-auto - ", - sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_msm8929()) { dummy_socinfo.id = 268; strlcpy(dummy_socinfo.build_id, "msm8929 - ", diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c index d3d0b8594c9f..51f4ec79db10 100644 --- a/drivers/soc/qcom/subsystem_restart.c +++ b/drivers/soc/qcom/subsystem_restart.c @@ -480,15 +480,19 @@ static void send_sysmon_notif(struct subsys_device *dev) mutex_unlock(&subsys_list_lock); } -static void for_each_subsys_device(struct subsys_device **list, unsigned count, - void *data, void (*fn)(struct subsys_device *, void *)) +static int for_each_subsys_device(struct subsys_device **list, unsigned count, + void *data, int (*fn)(struct subsys_device *, void *)) { + int ret; while (count--) { struct subsys_device *dev = *list++; if (!dev) continue; - fn(dev, data); + ret = fn(dev, data); + if (ret) + return ret; } + return 0; } static void notify_each_subsys_device(struct subsys_device **list, @@ -590,21 +594,31 @@ static int wait_for_err_ready(struct subsys_device *subsys) return 0; } -static void subsystem_shutdown(struct subsys_device *dev, void *data) +static int subsystem_shutdown(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; + int ret; pr_info("[%s:%d]: Shutting down %s\n", current->comm, current->pid, name); - if (dev->desc->shutdown(dev->desc, true) < 0) - panic("subsys-restart: [%s:%d]: Failed to shutdown %s!", - current->comm, current->pid, name); + ret = dev->desc->shutdown(dev->desc, true); + if (ret < 0) { + if (!dev->desc->ignore_ssr_failure) { + panic("subsys-restart: [%s:%d]: Failed to shutdown %s!", + current->comm, current->pid, name); + } else { + pr_err("Shutdown failure on %s\n", name); + return ret; + } + } dev->crash_count++; subsys_set_state(dev, SUBSYS_OFFLINE); disable_all_irqs(dev); + + return 0; } -static void subsystem_ramdump(struct subsys_device *dev, void *data) +static int subsystem_ramdump(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; @@ -613,15 +627,17 @@ static void subsystem_ramdump(struct subsys_device *dev, void *data) pr_warn("%s[%s:%d]: Ramdump failed.\n", name, current->comm, current->pid); dev->do_ramdump_on_put = false; + return 0; } -static void subsystem_free_memory(struct subsys_device *dev, void *data) +static int subsystem_free_memory(struct subsys_device *dev, void *data) { if (dev->desc->free_memory) dev->desc->free_memory(dev->desc); + return 0; } -static void subsystem_powerup(struct subsys_device *dev, void *data) +static int subsystem_powerup(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; int ret; @@ -629,11 +645,17 @@ static void subsystem_powerup(struct subsys_device *dev, void *data) pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name); init_completion(&dev->err_ready); - if (dev->desc->powerup(dev->desc) < 0) { + ret = dev->desc->powerup(dev->desc); + if (ret < 0) { notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, NULL); - panic("[%s:%d]: Powerup error: %s!", - current->comm, current->pid, name); + if (!dev->desc->ignore_ssr_failure) { + panic("[%s:%d]: Powerup error: %s!", + current->comm, current->pid, name); + } else { + pr_err("Powerup failure on %s\n", name); + return ret; + } } enable_all_irqs(dev); @@ -641,11 +663,16 @@ static void subsystem_powerup(struct subsys_device *dev, void *data) if (ret) { notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, NULL); - panic("[%s:%d]: Timed out waiting for error ready: %s!", - current->comm, current->pid, name); + if (!dev->desc->ignore_ssr_failure) + panic("[%s:%d]: Timed out waiting for error ready: %s!", + current->comm, current->pid, name); + else + return ret; } subsys_set_state(dev, SUBSYS_ONLINE); subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH); + + return 0; } static int __find_subsys(struct device *dev, void *data) @@ -907,6 +934,7 @@ static void subsystem_restart_wq_func(struct work_struct *work) struct subsys_tracking *track; unsigned count; unsigned long flags; + int ret; /* * It's OK to not take the registration lock at this point. @@ -954,7 +982,9 @@ static void subsystem_restart_wq_func(struct work_struct *work) pr_debug("[%s:%d]: Starting restart sequence for %s\n", current->comm, current->pid, desc->name); notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL); - for_each_subsys_device(list, count, NULL, subsystem_shutdown); + ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown); + if (ret) + goto err; notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL); notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION, @@ -970,12 +1000,19 @@ static void subsystem_restart_wq_func(struct work_struct *work) for_each_subsys_device(list, count, NULL, subsystem_free_memory); notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL); - for_each_subsys_device(list, count, NULL, subsystem_powerup); + ret = for_each_subsys_device(list, count, NULL, subsystem_powerup); + if (ret) + goto err; notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL); pr_info("[%s:%d]: Restart sequence for %s completed.\n", current->comm, current->pid, desc->name); +err: + /* Reset subsys count */ + if (ret) + dev->count = 0; + mutex_unlock(&soc_order_reg_lock); mutex_unlock(&track->lock); @@ -1466,6 +1503,9 @@ static int subsys_parse_devicetree(struct subsys_desc *desc) desc->generic_irq = ret; } + desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node, + "qcom,ignore-ssr-failure"); + order = ssr_parse_restart_orders(desc); if (IS_ERR(order)) { pr_err("Could not initialize SSR restart order, err = %ld\n", |
