summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt123
-rw-r--r--Documentation/devicetree/bindings/pil/subsys-pil-tz.txt129
-rw-r--r--drivers/soc/qcom/Kconfig40
-rw-r--r--drivers/soc/qcom/Makefile12
-rw-r--r--drivers/soc/qcom/peripheral-loader.c1098
-rw-r--r--drivers/soc/qcom/peripheral-loader.h149
-rw-r--r--drivers/soc/qcom/pil-msa.c788
-rw-r--r--drivers/soc/qcom/pil-msa.h47
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c412
-rw-r--r--drivers/soc/qcom/pil-q6v5.c722
-rw-r--r--drivers/soc/qcom/pil-q6v5.h80
-rw-r--r--drivers/soc/qcom/ramdump.c394
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c1037
-rw-r--r--drivers/soc/qcom/subsystem_notif.c222
-rw-r--r--drivers/soc/qcom/subsystem_restart.c1772
-rw-r--r--drivers/soc/qcom/sysmon-qmi.c732
-rw-r--r--drivers/soc/qcom/sysmon.c395
-rw-r--r--include/soc/qcom/sysmon.h124
18 files changed, 8276 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
new file mode 100644
index 000000000000..57b0b81dce25
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -0,0 +1,123 @@
+Qualcomm MSS QDSP6v5 Peripheral Image Loader
+
+pil-qdsp6v5-mss is a peripheral image loader (PIL) driver. It is used for
+loading QDSP6v5 (Hexagon) firmware images for modem subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible for
+shutting down the processor when it's not needed.
+
+Required properties:
+- compatible: Must be "qcom,pil-q6v5-mss" or "qcom,pil-q6v55-mss" or
+ "pil-q6v56-mss".
+- reg: Pairs of physical base addresses and region sizes of
+ memory mapped registers.
+- reg-names: Names of the bases for the above registers. "qdsp6_base",
+ "rmb_base", "restart_reg" or "restart_reg_sec"(optional
+ for secure mode) are expected.
+ If "halt_base" is in same 4K pages this register then
+ this will be defined else "halt_q6", "halt_modem",
+ "halt_nc" is required.
+- interrupts: The modem watchdog interrupt
+- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
+- vdd_cx-voltage: Voltage corner/level(max) for cx rail.
+- vdd_mx-supply: Reference to the regulator that supplies the memory rail.
+- vdd_mx-uV: Voltage setting for the mx rail.
+- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
+
+Optional properties:
+- vdd_mss-supply: Reference to the regulator that supplies the processor.
+ This may be a shared regulator that is already voted
+ on in the PIL proxy voting code (and also managed by the
+ modem on its own), hence we mark it as as optional.
+- vdd_pll-supply: Reference to the regulator that supplies the PLL's rail.
+- qcom,vdd_pll: Voltage to be set for the PLL's rail.
+- reg-names: "cxrail_bhs_reg" - control register for modem power
+ domain.
+- clocks: Array of <clock_controller_phandle clock_reference> listing
+ all the clocks that are accesed by this subsystem.
+- qcom,proxy-clock-names: Names of the clocks that need to be turned on/off during
+ proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+ subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names: Names of all the clocks that are accessed by the subsystem.
+- qcom,is-not-loadable: Boolean- Present if the image does not need to
+ be loaded.
+- qcom,pil-self-auth: Boolean- True if authentication is required.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+ subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the modem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the modem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the modem to trigger proxy unvoting in
+ the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the modem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the modem to ack force stop or a graceful stop
+ to the apps.
+- qcom,gpio-ramdump-disable: GPIO used by the modem to inform the apps that ramdump
+ collection should be disabled.
+- qcom,gpio-shutdown-ack: GPIO used by the modem to indicate that it has done the
+ necessary cleanup and that the apps can move forward with
+ the shutdown sequence.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mba-image-is-not-elf: Boolean- Present if MBA image doesnt use the ELF
+ format.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+ service.
+- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
+- qcom,override-acc: Boolean- Present if we need to override the default ACC settings
+- qcom,ahb-clk-vote: Boolean- Present if we need to remove the vote for the mss_cfg_ahb
+ clock after the modem boots up
+- qcom,pnoc-clk-vote: Boolean- Present if the modem needs the PNOC bus to be
+ clocked before it boots up
+- qcom,qdsp6v56-1-3: Boolean- Present if the qdsp version is v56 1.3
+- qcom,qdsp6v56-1-5: Boolean- Present if the qdsp version is v56 1.5
+- qcom,edge: GLINK logical name of the remote subsystem
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+ on behalf of the subsystem driver.
+- qcom,qdsp6v56-1-8: Boolean- Present if the qdsp version is v56 1.8
+- qcom,qdsp6v56-1-8-inrush-current: Boolean- Present if the qdsp version is V56 1.8 and has in-rush
+ current issue.
+- qcom,qdsp6v61-1-1: Boolean- Present if the qdsp version is v61 1.1
+- qcom,qdsp6v62-1-2: Boolean- Present if the qdsp version is v62 1.2
+- qcom,mx-spike-wa: Boolean- Present if we need to assert QDSP6 I/O clamp, memory
+ wordline clamp, and compiler memory clamp during MSS restart.
+- qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
+
+Example:
+ qcom,mss@fc880000 {
+ compatible = "qcom,pil-q6v5-mss";
+ reg = <0xfc880000 0x100>,
+ <0xfd485000 0x400>,
+ <0xfc820000 0x020>,
+ <0xfc401680 0x004>;
+ reg-names = "qdsp6_base", "halt_base", "rmb_base",
+ "restart_reg";
+ interrupts = <0 24 1>;
+ vdd_mss-supply = <&pm8841_s3>;
+ vdd_cx-supply = <&pm8841_s2>;
+ vdd_cx-voltage = <7>;
+ vdd_mx-supply = <&pm8841_s1>;
+ vdd_mx-uV = <105000>;
+
+ clocks = <&clock_rpm clk_xo_pil_mss_clk>,
+ <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+ <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+ <&clock_gcc clk_gcc_boot_rom_ahb_clk>;
+ clock-names = "xo", "iface_clk", "bus_clk", "mem_clk";
+ qcom,proxy-clock-names = "xo";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+
+ qcom,is-not-loadable;
+ qcom,firmware-name = "mba";
+ qcom,pil-self-auth;
+ qcom,mba-image-is-not-elf;
+ qcom,override-acc;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ qcom,ssctl-instance-id = <12>;
+ qcom,sysmon-id = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
new file mode 100644
index 000000000000..8c60906741f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -0,0 +1,129 @@
+* Generic Subsystem Peripheral Image Loader
+
+subsys-pil-tz is a generic peripheral image loader (PIL) driver. It is
+used for loading the firmware images of the subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible
+for shutting down the processor when it's not needed.
+
+Required properties:
+- compatible: Must be "qcom,pil-tz-generic"
+- qcom,firmware-name: Base name of the firmware image.
+
+Optional properties:
+- reg: Pairs of physical base addresses and region sizes of
+ memory mapped registers.
+- reg-names: Names of the bases for the above registers. Not required for
+ PIL usage. Ex. "wrapper_base", "vbif_base".
+- interrupts: Subsystem to Apps watchdog bite interrupt.
+- vdd_'reg'-supply: Reference to the regulator that supplies the corresponding
+ 'reg' domain.
+- qcom,proxy-reg-names: Names of the regulators that need to be turned on/off
+ during proxy voting/unvoting.
+- qcom,active-reg-names: Names of the regulators that need to be turned on for the
+ subsystem to run. Turned off when the subsystem is shutdown.
+- qcom,vdd_'reg'-uV-uA: Voltage and current values for the 'reg' regulator.
+- qcom,proxy-clock-names: Names of the clocks that need to be turned on/off during
+ proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+ subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names: Names of all the clocks that are accessed by the subsystem.
+- qcom,<clock-name>-freq: Frequency to be set for that clock in Hz. If the property
+ isn't added for a clock, then the default clock frequency
+ would be set to 19200000 Hz.
+- qcom,msm-bus,name: Name of the bus client for the subsystem.
+- qcom,msm-bus,num-cases: Number of use-cases.
+- qcom,msm-bus,num-paths: Number of paths.
+- qcom,msm-bus,active-only: If not set, uses the dual context by default.
+- qcom,msm-bus,vectors-KBps: Vector array of master id, slave id, arbitrated
+ bandwidth and instantaneous bandwidth.
+- qcom,pas-id: pas_id of the subsystem.
+- qcom,proxy-timeout-ms: Proxy vote timeout value for the subsystem.
+- qcom,smem-id: ID of the SMEM item for the subsystem.
+- qcom,is-not-loadable: Boolean. Present if the subsystem's firmware image does not
+ need to be loaded.
+- qcom,pil-no-auth: Boolean. Present if the subsystem is not authenticated and brought
+ out of reset by using the PIL ops.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+ subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the subsystem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the subsystem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the subsystem to trigger proxy unvoting in
+ the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the subsystem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the subsystem to ack force stop or a graceful stop
+ to the apps.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,keep-proxy-regs-on: Boolean. Present if during proxy unvoting, PIL needs to leave
+ the regulators enabled after removing the voltage/current votes.
+- qcom,edge: GLINK logical name of the remote subsystem
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+ service.
+- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+ on behalf of the subsystem driver.
+
+Example:
+ qcom,venus@fdce0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xfdce0000 0x4000>,
+ <0xfdc80000 0x400>;
+
+ vdd-supply = <&gdsc_venus>;
+ qcom,proxy-reg-names = "vdd";
+ clock-names = "core_clk", "iface_clk", "bus_clk", "mem_clk",
+ "scm_core_clk", "scm_iface_clk", "scm_bus_clk",
+ "scm_core_clk_src";
+ qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk",
+ "mem_clk", "scm_core_clk",
+ "scm_iface_clk", "scm_bus_clk",
+ "scm_core_clk_src";
+ qcom,scm_core_clk_src-freq = <50000000>;
+
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+
+ qcom,pas-id = <9>;
+ qcom,proxy-timeout-ms = <2000>;
+ qcom,firmware-name = "venus";
+ };
+
+ qcom,lpass@fe200000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xfe200000 0x00100>,
+ <0xfd485100 0x00010>,
+ <0xfc4016c0 0x00004>;
+
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pm8841_s2_corner>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <7 100000>;
+ clock-names = "bus_clk", "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,active-clock-names = "bus_clk";
+ qcom,proxy-clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,scm_core_clk_src-freq = <50000000>;
+
+ qcom,smem-id = <423>;
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,firmware-name = "adsp";
+ qcom,edge = "lpass";
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ qcom,ssctl-instance-id = <14>;
+ qcom,sysmon-id = <1>;
+ };
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 517e1a87a488..7b2ec797a345 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -257,6 +257,46 @@ config MSM_SECURE_BUFFER
endif # ARCH_QCOM
+config MSM_SUBSYSTEM_RESTART
+ bool "MSM Subsystem Restart"
+ help
+ This option enables the MSM subsystem restart framework.
+
+ The MSM subsystem restart framework provides support to boot,
+ shutdown, and restart subsystems with a reference counted API.
+ It also notifies userspace of transitions between these states via
+ sysfs.
+
+config MSM_PIL
+ bool "Peripheral image loading"
+ select FW_LOADER
+ default n
+ help
+ Some peripherals need to be loaded into memory before they can be
+ brought out of reset.
+
+ Say yes to support these devices.
+
+config MSM_PIL_SSR_GENERIC
+ tristate "MSM Subsystem Boot Support"
+ depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+ help
+ Support for booting and shutting down MSM Subsystem processors.
+ This driver also monitors the SMSM status bits and the watchdog
+ interrupt for the subsystem and restarts it on a watchdog bite
+ or a fatal error. Subsystems include LPASS, Venus, VPU, WCNSS and
+ BCSS.
+
+config MSM_PIL_MSS_QDSP6V5
+ tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+ depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+ help
+ Support for booting and shutting down QDSP6v5 (Hexagon) processors
+ in modem subsystems. If you would like to make or receive phone
+ calls then say Y here.
+
+ If unsure, say N.
+
config TRACER_PKT
bool "Tracer Packet"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index fbb3815aeb40..c806598098a1 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
obj-$(CONFIG_MSM_GLINK_SMD_XPRT) += glink_smd_xprt.o
obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT)+= glink_smem_native_xprt.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
obj-$(CONFIG_ARCH_QCOM) += kryo-l2-accessors.o
obj-$(CONFIG_MSM_SMP2P) += smp2p.o smp2p_debug.o smp2p_sleepstate.o
obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
@@ -11,6 +12,17 @@ obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
ifdef CONFIG_DEBUG_FS
obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o
endif
+
+obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
+obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
+
+ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+ obj-y += subsystem_notif.o
+ obj-y += subsystem_restart.o
+ obj-y += ramdump.o
+endif
+
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
new file mode 100644
index 000000000000..8ca649f67142
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -0,0 +1,1098 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/mutex.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+#include "peripheral-loader.h"
+
+#define pil_err(desc, fmt, ...) \
+ dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+#define pil_info(desc, fmt, ...) \
+ dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+
+#if defined(CONFIG_ARM)
+#define pil_memset_io(d, c, count) memset(d, c, count)
+#else
+#define pil_memset_io(d, c, count) memset_io(d, c, count)
+#endif
+
+#define PIL_NUM_DESC 10
+static void __iomem *pil_info_base;
+
+/**
+ * proxy_timeout - Override for proxy vote timeouts
+ * -1: Use driver-specified timeout
+ * 0: Hold proxy votes until shutdown
+ * >0: Specify a custom timeout in ms
+ */
+static int proxy_timeout_ms = -1;
+module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct pil_mdt {
+ struct elf32_hdr hdr;
+ struct elf32_phdr phdr[];
+};
+
+/**
+ * struct pil_seg - memory map representing one segment
+ * @next: points to next seg mentor NULL if last segment
+ * @paddr: physical start address of segment
+ * @sz: size of segment
+ * @filesz: size of segment on disk
+ * @num: segment number
+ * @relocated: true if segment is relocated, false otherwise
+ *
+ * Loosely based on an elf program header. Contains all necessary information
+ * to load and initialize a segment of the image in memory.
+ */
+struct pil_seg {
+ phys_addr_t paddr;
+ unsigned long sz;
+ unsigned long filesz;
+ int num;
+ struct list_head list;
+ bool relocated;
+};
+
+/**
+ * struct pil_priv - Private state for a pil_desc
+ * @proxy: work item used to run the proxy unvoting routine
+ * @ws: wakeup source to prevent suspend during pil_boot
+ * @wname: name of @ws
+ * @desc: pointer to pil_desc this is private data for
+ * @seg: list of segments sorted by physical address
+ * @entry_addr: physical address where processor starts booting at
+ * @base_addr: smallest start address among all segments that are relocatable
+ * @region_start: address where relocatable region starts or lowest address
+ * for non-relocatable images
+ * @region_end: address where relocatable region ends or highest address for
+ * non-relocatable images
+ * @region: region allocated for relocatable images
+ * @unvoted_flag: flag to keep track if we have unvoted or not.
+ *
+ * This struct contains data for a pil_desc that should not be exposed outside
+ * of this file. This structure points to the descriptor and the descriptor
+ * points to this structure so that PIL drivers can't access the private
+ * data of a descriptor but this file can access both.
+ */
+struct pil_priv {
+ struct delayed_work proxy;
+ struct wakeup_source ws;
+ char wname[32];
+ struct pil_desc *desc;
+ struct list_head segs;
+ phys_addr_t entry_addr;
+ phys_addr_t base_addr;
+ phys_addr_t region_start;
+ phys_addr_t region_end;
+ void *region;
+ struct pil_image_info __iomem *info;
+ int id;
+ int unvoted_flag;
+ size_t region_size;
+};
+
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+ int count = 0, ret;
+ struct ramdump_segment *ramdump_segs, *s;
+
+ list_for_each_entry(seg, &priv->segs, list)
+ count++;
+
+ ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ if (desc->subsys_vmid > 0)
+ ret = pil_assign_mem_to_linux(desc, priv->region_start,
+ (priv->region_end - priv->region_start));
+
+ s = ramdump_segs;
+ list_for_each_entry(seg, &priv->segs, list) {
+ s->address = seg->paddr;
+ s->size = seg->sz;
+ s++;
+ }
+
+ ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+ kfree(ramdump_segs);
+
+ if (!ret && desc->subsys_vmid > 0)
+ ret = pil_assign_mem_to_subsys(desc, priv->region_start,
+ (priv->region_end - priv->region_start));
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
+int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+ size_t size)
+{
+ int ret;
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {desc->subsys_vmid};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+ ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+ if (ret)
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
+ __func__, &addr, size, desc->subsys_vmid);
+ return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys);
+
+int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+ size_t size)
+{
+ int ret;
+ int srcVM[1] = {desc->subsys_vmid};
+ int destVM[1] = {VMID_HLOS};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+ if (ret)
+ panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
+ __func__, &addr, size, desc->subsys_vmid);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_linux);
+
+int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+ phys_addr_t addr, size_t size)
+{
+ int ret;
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
+ int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+
+ ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
+ if (ret)
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
+ __func__, &addr, size, desc->subsys_vmid);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
+
+int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+ int VMid)
+{
+ int ret;
+ int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
+ int destVM[1] = {VMid};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+ if (VMid == VMID_HLOS)
+ destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+ ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
+ if (ret)
+ panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
+ __func__, &addr, size, desc->subsys_vmid);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_reclaim_mem);
+
+/**
+ * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns the physical address where the image boots at or 0 if unknown.
+ */
+phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+ return desc->priv ? desc->priv->entry_addr : 0;
+}
+EXPORT_SYMBOL(pil_get_entry_addr);
+
+static void __pil_proxy_unvote(struct pil_priv *priv)
+{
+ struct pil_desc *desc = priv->desc;
+
+ desc->ops->proxy_unvote(desc);
+ notify_proxy_unvote(desc->dev);
+ __pm_relax(&priv->ws);
+ module_put(desc->owner);
+
+}
+
+static void pil_proxy_unvote_work(struct work_struct *work)
+{
+ struct delayed_work *delayed = to_delayed_work(work);
+ struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
+ __pil_proxy_unvote(priv);
+}
+
+static int pil_proxy_vote(struct pil_desc *desc)
+{
+ int ret = 0;
+ struct pil_priv *priv = desc->priv;
+
+ if (desc->ops->proxy_vote) {
+ __pm_stay_awake(&priv->ws);
+ ret = desc->ops->proxy_vote(desc);
+ if (ret)
+ __pm_relax(&priv->ws);
+ }
+
+ if (desc->proxy_unvote_irq)
+ enable_irq(desc->proxy_unvote_irq);
+ notify_proxy_vote(desc->dev);
+
+ return ret;
+}
+
+static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
+{
+ struct pil_priv *priv = desc->priv;
+ unsigned long timeout;
+
+ if (proxy_timeout_ms == 0 && !immediate)
+ return;
+ else if (proxy_timeout_ms > 0)
+ timeout = proxy_timeout_ms;
+ else
+ timeout = desc->proxy_timeout;
+
+ if (desc->ops->proxy_unvote) {
+ if (WARN_ON(!try_module_get(desc->owner)))
+ return;
+
+ if (immediate)
+ timeout = 0;
+
+ if (!desc->proxy_unvote_irq || immediate)
+ schedule_delayed_work(&priv->proxy,
+ msecs_to_jiffies(timeout));
+ }
+}
+
+static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
+{
+ struct pil_desc *desc = dev_id;
+ struct pil_priv *priv = desc->priv;
+
+ pil_info(desc, "Power/Clock ready interrupt received\n");
+ if (!desc->priv->unvoted_flag) {
+ desc->priv->unvoted_flag = 1;
+ __pil_proxy_unvote(priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+ return !!(p->p_flags & BIT(27));
+}
+
+static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
+{
+ return addr - priv->base_addr + priv->region_start;
+}
+
+static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
+ const struct elf32_phdr *phdr, int num)
+{
+ bool reloc = segment_is_relocatable(phdr);
+ const struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+
+ if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+ pil_err(desc, "kernel memory would be overwritten [%#08lx, %#08lx)\n",
+ (unsigned long)phdr->p_paddr,
+ (unsigned long)(phdr->p_paddr + phdr->p_memsz));
+ return ERR_PTR(-EPERM);
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+ num, phdr->p_filesz, phdr->p_memsz);
+ return ERR_PTR(-EINVAL);
+ }
+
+ seg = kmalloc(sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return ERR_PTR(-ENOMEM);
+ seg->num = num;
+ seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
+ seg->filesz = phdr->p_filesz;
+ seg->sz = phdr->p_memsz;
+ seg->relocated = reloc;
+ INIT_LIST_HEAD(&seg->list);
+
+ return seg;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+ return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
+ p->p_memsz;
+}
+
+static void pil_dump_segs(const struct pil_priv *priv)
+{
+ struct pil_seg *seg;
+ phys_addr_t seg_h_paddr;
+
+ list_for_each_entry(seg, &priv->segs, list) {
+ seg_h_paddr = seg->paddr + seg->sz;
+ pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+ &seg->paddr, &seg_h_paddr);
+ }
+}
+
+/*
+ * Ensure the entry address lies within the image limits and if the image is
+ * relocatable ensure it lies within a relocatable segment.
+ */
+static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+ struct pil_seg *seg;
+ phys_addr_t entry = mdt->hdr.e_entry;
+ bool image_relocated = priv->region;
+
+ if (image_relocated)
+ entry = pil_reloc(priv, entry);
+ priv->entry_addr = entry;
+
+ if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
+ return 0;
+
+ list_for_each_entry(seg, &priv->segs, list) {
+ if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
+ if (!image_relocated)
+ return 0;
+ else if (seg->relocated)
+ return 0;
+ }
+ }
+ pil_err(priv->desc, "entry address %pa not within range\n", &entry);
+ pil_dump_segs(priv);
+ return -EADDRNOTAVAIL;
+}
+
+static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
+ phys_addr_t max_addr, size_t align)
+{
+ void *region;
+ size_t size = max_addr - min_addr;
+ size_t aligned_size;
+
+ /* Don't reallocate due to fragmentation concerns, just sanity check */
+ if (priv->region) {
+ if (WARN(priv->region_end - priv->region_start < size,
+ "Can't reuse PIL memory, too small\n"))
+ return -ENOMEM;
+ return 0;
+ }
+
+ if (align > SZ_4M)
+ aligned_size = ALIGN(size, SZ_4M);
+ else
+ aligned_size = ALIGN(size, SZ_1M);
+
+ init_dma_attrs(&priv->desc->attrs);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &priv->desc->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->desc->attrs);
+
+ region = dma_alloc_attrs(priv->desc->dev, aligned_size,
+ &priv->region_start, GFP_KERNEL,
+ &priv->desc->attrs);
+
+ if (region == NULL) {
+ pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
+ size);
+ return -ENOMEM;
+ }
+
+ priv->region = region;
+ priv->region_end = priv->region_start + size;
+ priv->base_addr = min_addr;
+ priv->region_size = aligned_size;
+
+ return 0;
+}
+
+static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+ const struct elf32_phdr *phdr;
+ phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
+ size_t align = 0;
+ int i, ret = 0;
+ bool relocatable = false;
+
+ min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
+ max_addr_n = max_addr_r = 0;
+
+ /* Find the image limits */
+ for (i = 0; i < mdt->hdr.e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ start = phdr->p_paddr;
+ end = start + phdr->p_memsz;
+
+ if (segment_is_relocatable(phdr)) {
+ min_addr_r = min(min_addr_r, start);
+ max_addr_r = max(max_addr_r, end);
+ /*
+ * Lowest relocatable segment dictates alignment of
+ * relocatable region
+ */
+ if (min_addr_r == start)
+ align = phdr->p_align;
+ relocatable = true;
+ } else {
+ min_addr_n = min(min_addr_n, start);
+ max_addr_n = max(max_addr_n, end);
+ }
+
+ }
+
+ /*
+ * Align the max address to the next 4K boundary to satisfy iommus and
+ * XPUs that operate on 4K chunks.
+ */
+ max_addr_n = ALIGN(max_addr_n, SZ_4K);
+ max_addr_r = ALIGN(max_addr_r, SZ_4K);
+
+ if (relocatable) {
+ ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
+ } else {
+ priv->region_start = min_addr_n;
+ priv->region_end = max_addr_n;
+ priv->base_addr = min_addr_n;
+ }
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &priv->region_start,
+ sizeof(priv->region_start) / 4);
+ writel_relaxed(priv->region_end - priv->region_start,
+ &priv->info->size);
+ }
+
+ return ret;
+}
+
+static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
+{
+ int ret = 0;
+ struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
+ struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
+
+ if (seg_a->paddr < seg_b->paddr)
+ ret = -1;
+ else if (seg_a->paddr > seg_b->paddr)
+ ret = 1;
+
+ return ret;
+}
+
+static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
+{
+ struct pil_priv *priv = desc->priv;
+ const struct elf32_phdr *phdr;
+ struct pil_seg *seg;
+ int i, ret;
+
+ ret = pil_setup_region(priv, mdt);
+ if (ret)
+ return ret;
+
+
+ pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
+ &priv->region_end);
+
+ for (i = 0; i < mdt->hdr.e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ seg = pil_init_seg(desc, phdr, i);
+ if (IS_ERR(seg))
+ return PTR_ERR(seg);
+
+ list_add_tail(&seg->list, &priv->segs);
+ }
+ list_sort(NULL, &priv->segs, pil_cmp_seg);
+
+ return pil_init_entry_addr(priv, mdt);
+}
+
+static void pil_release_mmap(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *p, *tmp;
+ u64 zero = 0ULL;
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &zero,
+ sizeof(zero) / 4);
+ writel_relaxed(0, &priv->info->size);
+ }
+
+ list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+#define IOMAP_SIZE SZ_1M
+
+struct pil_map_fw_info {
+ void *region;
+ struct dma_attrs attrs;
+ phys_addr_t base_addr;
+ struct device *dev;
+};
+
+static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
+{
+ struct pil_map_fw_info *info = data;
+
+ return dma_remap(info->dev, info->region, paddr, size,
+ &info->attrs);
+}
+
+static void unmap_fw_mem(void *vaddr, size_t size, void *data)
+{
+ struct pil_map_fw_info *info = data;
+
+ dma_unremap(info->dev, vaddr, size);
+}
+
+static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
+{
+ int ret = 0, count;
+ phys_addr_t paddr;
+ char fw_name[30];
+ int num = seg->num;
+ struct pil_map_fw_info map_fw_info = {
+ .attrs = desc->attrs,
+ .region = desc->priv->region,
+ .base_addr = desc->priv->region_start,
+ .dev = desc->dev,
+ };
+ void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
+
+ if (seg->filesz) {
+ snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
+ desc->fw_name, num);
+ ret = request_firmware_into_buf(fw_name, desc->dev, seg->paddr,
+ seg->filesz, desc->map_fw_mem,
+ desc->unmap_fw_mem, map_data);
+ if (ret < 0) {
+ pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
+ fw_name);
+ return ret;
+ }
+
+ if (ret != seg->filesz) {
+ pil_err(desc, "Blob size %u doesn't match %lu\n",
+ ret, seg->filesz);
+ return -EPERM;
+ }
+ ret = 0;
+ }
+
+ /* Zero out trailing memory */
+ paddr = seg->paddr + seg->filesz;
+ count = seg->sz - seg->filesz;
+ while (count > 0) {
+ int size;
+ u8 __iomem *buf;
+
+ size = min_t(size_t, IOMAP_SIZE, count);
+ buf = desc->map_fw_mem(paddr, size, map_data);
+ if (!buf) {
+ pil_err(desc, "Failed to map memory\n");
+ return -ENOMEM;
+ }
+ pil_memset_io(buf, 0, size);
+
+ desc->unmap_fw_mem(buf, size, map_data);
+
+ count -= size;
+ paddr += size;
+ }
+
+ if (desc->ops->verify_blob) {
+ ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
+ if (ret)
+ pil_err(desc, "Blob%u failed verification\n", num);
+ }
+
+ return ret;
+}
+
+static int pil_parse_devicetree(struct pil_desc *desc)
+{
+ struct device_node *ofnode = desc->dev->of_node;
+ int clk_ready = 0;
+
+ if (!ofnode)
+ return -EINVAL;
+
+ if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
+ &desc->subsys_vmid))
+ pr_debug("Unable to read the addr-protect-id for %s\n",
+ desc->name);
+
+ if (desc->ops->proxy_unvote && of_find_property(ofnode,
+ "qcom,gpio-proxy-unvote",
+ NULL)) {
+ clk_ready = of_get_named_gpio(ofnode,
+ "qcom,gpio-proxy-unvote", 0);
+
+ if (clk_ready < 0) {
+ dev_dbg(desc->dev,
+ "[%s]: Error getting proxy unvoting gpio\n",
+ desc->name);
+ return clk_ready;
+ }
+
+ clk_ready = gpio_to_irq(clk_ready);
+ if (clk_ready < 0) {
+ dev_err(desc->dev,
+ "[%s]: Error getting proxy unvote IRQ\n",
+ desc->name);
+ return clk_ready;
+ }
+ }
+ desc->proxy_unvote_irq = clk_ready;
+ return 0;
+}
+
+/* Synchronize request_firmware() with suspend */
+static DECLARE_RWSEM(pil_pm_rwsem);
+
+/**
+ * pil_boot() - Load a peripheral image into memory and boot it
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns 0 on success or -ERROR on failure.
+ */
+int pil_boot(struct pil_desc *desc)
+{
+ int ret;
+ char fw_name[30];
+ const struct pil_mdt *mdt;
+ const struct elf32_hdr *ehdr;
+ struct pil_seg *seg;
+ const struct firmware *fw;
+ struct pil_priv *priv = desc->priv;
+ bool mem_protect = false;
+ bool hyp_assign = false;
+
+ if (desc->shutdown_fail)
+ pil_err(desc, "Subsystem shutdown failed previously!\n");
+
+ /* Reinitialize for new image */
+ pil_release_mmap(desc);
+
+ down_read(&pil_pm_rwsem);
+ snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
+ ret = request_firmware(&fw, fw_name, desc->dev);
+ if (ret) {
+ pil_err(desc, "Failed to locate %s\n", fw_name);
+ goto out;
+ }
+
+ if (fw->size < sizeof(*ehdr)) {
+ pil_err(desc, "Not big enough to be an elf header\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ mdt = (const struct pil_mdt *)fw->data;
+ ehdr = &mdt->hdr;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ pil_err(desc, "Not an elf header\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ pil_err(desc, "No loadable segments\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+ if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+ sizeof(struct elf32_hdr) > fw->size) {
+ pil_err(desc, "Program headers not within mdt\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ ret = pil_init_mmap(desc, mdt);
+ if (ret)
+ goto release_fw;
+
+ desc->priv->unvoted_flag = 0;
+ ret = pil_proxy_vote(desc);
+ if (ret) {
+ pil_err(desc, "Failed to proxy vote\n");
+ goto release_fw;
+ }
+
+ if (desc->ops->init_image)
+ ret = desc->ops->init_image(desc, fw->data, fw->size);
+ if (ret) {
+ pil_err(desc, "Invalid firmware metadata\n");
+ goto err_boot;
+ }
+
+ if (desc->ops->mem_setup)
+ ret = desc->ops->mem_setup(desc, priv->region_start,
+ priv->region_end - priv->region_start);
+ if (ret) {
+ pil_err(desc, "Memory setup error\n");
+ goto err_deinit_image;
+ }
+
+ if (desc->subsys_vmid > 0) {
+ /* Make sure the memory is actually assigned to Linux. In the
+ * case where the shutdown sequence is not able to immediately
+ * assign the memory back to Linux, we need to do this here. */
+ ret = pil_assign_mem_to_linux(desc, priv->region_start,
+ (priv->region_end - priv->region_start));
+ if (ret)
+ pil_err(desc, "Failed to assign to linux, ret - %d\n",
+ ret);
+
+ ret = pil_assign_mem_to_subsys_and_linux(desc,
+ priv->region_start,
+ (priv->region_end - priv->region_start));
+ if (ret) {
+ pil_err(desc, "Failed to assign memory, ret - %d\n",
+ ret);
+ goto err_deinit_image;
+ }
+ hyp_assign = true;
+ }
+
+ list_for_each_entry(seg, &desc->priv->segs, list) {
+ ret = pil_load_seg(desc, seg);
+ if (ret)
+ goto err_deinit_image;
+ }
+
+ if (desc->subsys_vmid > 0) {
+ ret = pil_reclaim_mem(desc, priv->region_start,
+ (priv->region_end - priv->region_start),
+ desc->subsys_vmid);
+ if (ret) {
+ pil_err(desc, "Failed to assign %s memory, ret - %d\n",
+ desc->name, ret);
+ goto err_deinit_image;
+ }
+ hyp_assign = false;
+ }
+
+ ret = desc->ops->auth_and_reset(desc);
+ if (ret) {
+ pil_err(desc, "Failed to bring out of reset\n");
+ goto err_auth_and_reset;
+ }
+ pil_info(desc, "Brought out of reset\n");
+err_auth_and_reset:
+ if (ret && desc->subsys_vmid > 0) {
+ pil_assign_mem_to_linux(desc, priv->region_start,
+ (priv->region_end - priv->region_start));
+ mem_protect = true;
+ }
+err_deinit_image:
+ if (ret && desc->ops->deinit_image)
+ desc->ops->deinit_image(desc);
+err_boot:
+ if (ret && desc->proxy_unvote_irq)
+ disable_irq(desc->proxy_unvote_irq);
+ pil_proxy_unvote(desc, ret);
+release_fw:
+ release_firmware(fw);
+out:
+ up_read(&pil_pm_rwsem);
+ if (ret) {
+ if (priv->region) {
+ if (desc->subsys_vmid > 0 && !mem_protect &&
+ hyp_assign) {
+ pil_reclaim_mem(desc, priv->region_start,
+ (priv->region_end -
+ priv->region_start),
+ VMID_HLOS);
+ }
+ dma_free_attrs(desc->dev, priv->region_size,
+ priv->region, priv->region_start,
+ &desc->attrs);
+ priv->region = NULL;
+ }
+ pil_release_mmap(desc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(pil_boot);
+
+/**
+ * pil_shutdown() - Shutdown a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_shutdown(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+
+ if (desc->ops->shutdown) {
+ if (desc->ops->shutdown(desc))
+ desc->shutdown_fail = true;
+ else
+ desc->shutdown_fail = false;
+ }
+
+ if (desc->proxy_unvote_irq) {
+ disable_irq(desc->proxy_unvote_irq);
+ if (!desc->priv->unvoted_flag)
+ pil_proxy_unvote(desc, 1);
+ } else if (!proxy_timeout_ms)
+ pil_proxy_unvote(desc, 1);
+ else
+ flush_delayed_work(&priv->proxy);
+}
+EXPORT_SYMBOL(pil_shutdown);
+
+/**
+ * pil_free_memory() - Free memory resources associated with a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_free_memory(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+
+ if (priv->region) {
+ if (desc->subsys_vmid > 0)
+ pil_assign_mem_to_linux(desc, priv->region_start,
+ (priv->region_end - priv->region_start));
+ dma_free_attrs(desc->dev, priv->region_size,
+ priv->region, priv->region_start, &desc->attrs);
+ priv->region = NULL;
+ }
+}
+EXPORT_SYMBOL(pil_free_memory);
+
+static DEFINE_IDA(pil_ida);
+
+/**
+ * pil_desc_init() - Initialize a pil descriptor
+ * @desc: descriptor to intialize
+ *
+ * Initialize a pil descriptor for use by other pil functions. This function
+ * must be called before calling pil_boot() or pil_shutdown().
+ *
+ * Returns 0 for success and -ERROR on failure.
+ */
+int pil_desc_init(struct pil_desc *desc)
+{
+ struct pil_priv *priv;
+ int ret;
+ void __iomem *addr;
+ char buf[sizeof(priv->info->name)];
+
+ if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
+ "Invalid proxy voting. Ignoring\n"))
+ ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ desc->priv = priv;
+ priv->desc = desc;
+
+ priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
+ if (priv->id < 0)
+ goto err;
+
+ if (pil_info_base) {
+ addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
+ priv->info = (struct pil_image_info __iomem *)addr;
+
+ strncpy(buf, desc->name, sizeof(buf));
+ __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
+ }
+
+ ret = pil_parse_devicetree(desc);
+ if (ret)
+ goto err_parse_dt;
+
+ /* Ignore users who don't make any sense */
+ WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
+ && !desc->proxy_timeout,
+ "Invalid proxy unvote callback or a proxy timeout of 0"
+ " was specified or no proxy unvote IRQ was specified.\n");
+
+ if (desc->proxy_unvote_irq) {
+ ret = request_threaded_irq(desc->proxy_unvote_irq,
+ NULL,
+ proxy_unvote_intr_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev,
+ "Unable to request proxy unvote IRQ: %d\n",
+ ret);
+ goto err;
+ }
+ disable_irq(desc->proxy_unvote_irq);
+ }
+
+ snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
+ wakeup_source_init(&priv->ws, priv->wname);
+ INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
+ INIT_LIST_HEAD(&priv->segs);
+
+ /* Make sure mapping functions are set. */
+ if (!desc->map_fw_mem)
+ desc->map_fw_mem = map_fw_mem;
+
+ if (!desc->unmap_fw_mem)
+ desc->unmap_fw_mem = unmap_fw_mem;
+
+ return 0;
+err_parse_dt:
+ ida_simple_remove(&pil_ida, priv->id);
+err:
+ kfree(priv);
+ return ret;
+}
+EXPORT_SYMBOL(pil_desc_init);
+
+/**
+ * pil_desc_release() - Release a pil descriptor
+ * @desc: descriptor to free
+ */
+void pil_desc_release(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+
+ if (priv) {
+ ida_simple_remove(&pil_ida, priv->id);
+ flush_delayed_work(&priv->proxy);
+ wakeup_source_trash(&priv->ws);
+ }
+ desc->priv = NULL;
+ kfree(priv);
+}
+EXPORT_SYMBOL(pil_desc_release);
+
+static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&pil_pm_rwsem);
+ break;
+ case PM_POST_SUSPEND:
+ up_write(&pil_pm_rwsem);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pil_pm_notifier = {
+ .notifier_call = pil_pm_notify,
+};
+
+static int __init msm_pil_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
+ if (!np) {
+ pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
+ goto out;
+ }
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_warn("pil: address to resource on imem region failed\n");
+ goto out;
+ }
+ pil_info_base = ioremap(res.start, resource_size(&res));
+ if (!pil_info_base) {
+ pr_warn("pil: could not map imem region\n");
+ goto out;
+ }
+ for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
+ writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
+
+out:
+ return register_pm_notifier(&pil_pm_notifier);
+}
+device_initcall(msm_pil_init);
+
+static void __exit msm_pil_exit(void)
+{
+ unregister_pm_notifier(&pil_pm_notifier);
+ if (pil_info_base)
+ iounmap(pil_info_base);
+}
+module_exit(msm_pil_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
new file mode 100644
index 000000000000..1e8c0650a698
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -0,0 +1,149 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PERIPHERAL_LOADER_H
+#define __MSM_PERIPHERAL_LOADER_H
+
+#include <linux/dma-attrs.h>
+
+struct device;
+struct module;
+struct pil_priv;
+
+/**
+ * struct pil_desc - PIL descriptor
+ * @name: string used for pil_get()
+ * @fw_name: firmware name
+ * @dev: parent device
+ * @ops: callback functions
+ * @owner: module the descriptor belongs to
+ * @proxy_timeout: delay in ms until proxy vote is removed
+ * @flags: bitfield for image flags
+ * @priv: DON'T USE - internal only
+ * @attrs: DMA attributes to be used during dma allocation.
+ * @proxy_unvote_irq: IRQ to trigger a proxy unvote. proxy_timeout
+ * is ignored if this is set.
+ * @map_fw_mem: Custom function used to map physical address space to virtual.
+ * This defaults to ioremap if not specified.
+ * @unmap_fw_mem: Custom function used to undo mapping by map_fw_mem.
+ * This defaults to iounmap if not specified.
+ * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
+ * @subsys_vmid: memprot id for the subsystem.
+ */
+struct pil_desc {
+ const char *name;
+ const char *fw_name;
+ struct device *dev;
+ const struct pil_reset_ops *ops;
+ struct module *owner;
+ unsigned long proxy_timeout;
+ unsigned long flags;
+#define PIL_SKIP_ENTRY_CHECK BIT(0)
+ struct pil_priv *priv;
+ struct dma_attrs attrs;
+ unsigned int proxy_unvote_irq;
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+ void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+ void *map_data;
+ bool shutdown_fail;
+ u32 subsys_vmid;
+};
+
+/**
+ * struct pil_image_info - info in IMEM about image and where it is loaded
+ * @name: name of image (may or may not be NULL terminated)
+ * @start: indicates physical address where image starts (little endian)
+ * @size: size of image (little endian)
+ */
+struct pil_image_info {
+ char name[8];
+ __le64 start;
+ __le32 size;
+} __attribute__((__packed__));
+
+/**
+ * struct pil_reset_ops - PIL operations
+ * @init_image: prepare an image for authentication
+ * @mem_setup: prepare the image memory region
+ * @verify_blob: authenticate a program segment, called once for each loadable
+ * program segment (optional)
+ * @proxy_vote: make proxy votes before auth_and_reset (optional)
+ * @auth_and_reset: boot the processor
+ * @proxy_unvote: remove any proxy votes (optional)
+ * @deinit_image: restore actions performed in init_image if necessary
+ * @shutdown: shutdown the processor
+ */
+struct pil_reset_ops {
+ int (*init_image)(struct pil_desc *pil, const u8 *metadata,
+ size_t size);
+ int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
+ int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+ size_t size);
+ int (*proxy_vote)(struct pil_desc *pil);
+ int (*auth_and_reset)(struct pil_desc *pil);
+ void (*proxy_unvote)(struct pil_desc *pil);
+ int (*deinit_image)(struct pil_desc *pil);
+ int (*shutdown)(struct pil_desc *pil);
+};
+
+#ifdef CONFIG_MSM_PIL
+extern int pil_desc_init(struct pil_desc *desc);
+extern int pil_boot(struct pil_desc *desc);
+extern void pil_shutdown(struct pil_desc *desc);
+extern void pil_free_memory(struct pil_desc *desc);
+extern void pil_desc_release(struct pil_desc *desc);
+extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+ size_t size);
+extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+ size_t size);
+extern int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+ phys_addr_t addr, size_t size);
+extern int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+ int VMid);
+#else
+static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
+static inline int pil_boot(struct pil_desc *desc) { return 0; }
+static inline void pil_shutdown(struct pil_desc *desc) { }
+static inline void pil_free_memory(struct pil_desc *desc) { }
+static inline void pil_desc_release(struct pil_desc *desc) { }
+static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+ return 0;
+}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ return 0;
+}
+static inline int pil_assign_mem_to_subsys(struct pil_desc *desc,
+ phys_addr_t addr, size_t size)
+{
+ return 0;
+}
+static inline int pil_assign_mem_to_linux(struct pil_desc *desc,
+ phys_addr_t addr, size_t size)
+{
+ return 0;
+}
+static inline int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+ phys_addr_t addr, size_t size)
+{
+ return 0;
+}
+static inline int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr,
+ size_t size, int VMid)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 000000000000..de593184184e
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,788 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB 0x010
+#define QDSP6SS_DBG_CFG 0x018
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE 0x180
+#define MSS_MODEM_HALT_BASE 0x200
+#define MSS_NC_HALT_BASE 0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS 0x1
+#define STATUS_XPU_UNLOCKED 0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE 0x00
+#define RMB_PBL_STATUS 0x04
+#define RMB_MBA_COMMAND 0x08
+#define RMB_MBA_STATUS 0x0C
+#define RMB_PMI_META_DATA 0x10
+#define RMB_PMI_CODE_START 0x14
+#define RMB_PMI_CODE_LENGTH 0x18
+#define RMB_PROTOCOL_VERSION 0x1C
+#define RMB_MBA_DEBUG_INFORMATION 0x20
+
+#define POLL_INTERVAL_US 50
+
+#define CMD_META_DATA_READY 0x1
+#define CMD_LOAD_READY 0x2
+#define CMD_PILFAIL_NFY_MBA 0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS 0x3
+#define STATUS_AUTH_COMPLETE 0x4
+#define STATUS_MBA_UNLOCKED 0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define BHS_TIMEOUT_US 50
+
+#define MSS_RESTART_PARAM_ID 0x2
+#define MSS_RESTART_ID 0xA
+
+#define MSS_MAGIC 0XAABADEAD
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, S_IRUGO | S_IWUSR);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, S_IRUGO | S_IWUSR);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+ pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+ pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+ pr_err("RMB_MBA_COMMAND: %08x\n",
+ readl_relaxed(base + RMB_MBA_COMMAND));
+ pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+ pr_err("RMB_PMI_META_DATA: %08x\n",
+ readl_relaxed(base + RMB_PMI_META_DATA));
+ pr_err("RMB_PMI_CODE_START: %08x\n",
+ readl_relaxed(base + RMB_PMI_CODE_START));
+ pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+ readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+ pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+ readl_relaxed(base + RMB_PROTOCOL_VERSION));
+ pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+ readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+ if (modem_trigger_panic == MSS_MAGIC)
+ panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+ int ret = 0;
+ u32 regval;
+
+ if (drv->vreg) {
+ ret = regulator_enable(drv->vreg);
+ if (ret)
+ dev_err(drv->desc.dev, "Failed to enable modem regulator.\n");
+ }
+
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval |= EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+
+ ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+ regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+ }
+
+ return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+ u32 regval;
+
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval &= ~EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+ }
+
+ if (drv->vreg)
+ return regulator_disable(drv->vreg);
+
+ return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drv->ahb_clk);
+ if (ret)
+ goto err_ahb_clk;
+ ret = clk_prepare_enable(drv->axi_clk);
+ if (ret)
+ goto err_axi_clk;
+ ret = clk_prepare_enable(drv->rom_clk);
+ if (ret)
+ goto err_rom_clk;
+ ret = clk_prepare_enable(drv->gpll0_mss_clk);
+ if (ret)
+ goto err_gpll0_mss_clk;
+ ret = clk_prepare_enable(drv->snoc_axi_clk);
+ if (ret)
+ goto err_snoc_axi_clk;
+ ret = clk_prepare_enable(drv->mnoc_axi_clk);
+ if (ret)
+ goto err_mnoc_axi_clk;
+ return 0;
+err_mnoc_axi_clk:
+ clk_disable_unprepare(drv->snoc_axi_clk);
+err_snoc_axi_clk:
+ clk_disable_unprepare(drv->gpll0_mss_clk);
+err_gpll0_mss_clk:
+ clk_disable_unprepare(drv->rom_clk);
+err_rom_clk:
+ clk_disable_unprepare(drv->axi_clk);
+err_axi_clk:
+ clk_disable_unprepare(drv->ahb_clk);
+err_ahb_clk:
+ return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+ clk_disable_unprepare(drv->mnoc_axi_clk);
+ clk_disable_unprepare(drv->snoc_axi_clk);
+ clk_disable_unprepare(drv->gpll0_mss_clk);
+ clk_disable_unprepare(drv->rom_clk);
+ clk_disable_unprepare(drv->axi_clk);
+ if (!drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+ int ret = 0;
+ int scm_ret = 0;
+ struct scm_desc desc = {0};
+
+ desc.args[0] = mss_restart;
+ desc.args[1] = 0;
+ desc.arginfo = SCM_ARGS(2);
+
+ if (drv->restart_reg && !drv->restart_reg_sec) {
+ writel_relaxed(mss_restart, drv->restart_reg);
+ mb();
+ udelay(2);
+ } else if (drv->restart_reg_sec) {
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
+ &mss_restart, sizeof(mss_restart),
+ &scm_ret, sizeof(scm_ret));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ MSS_RESTART_ID), &desc);
+ scm_ret = desc.ret[0];
+ }
+ if (ret || scm_ret)
+ pr_err("Secure MSS restart failed\n");
+ }
+
+ return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+ struct device *dev = drv->desc.dev;
+ int ret;
+ u32 status;
+
+ /* Wait for PBL completion. */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+ status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
+ if (ret) {
+ dev_err(dev, "PBL boot timed out\n");
+ return ret;
+ }
+ if (status != STATUS_PBL_SUCCESS) {
+ dev_err(dev, "PBL returned unexpected status %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for MBA completion. */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
+ if (ret) {
+ dev_err(dev, "MBA boot timed out\n");
+ return ret;
+ }
+ if (status != STATUS_XPU_UNLOCKED &&
+ status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(dev, "MBA returned unexpected status %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int ret = 0;
+
+ if (drv->axi_halt_base) {
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_Q6_HALT_BASE);
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_NC_HALT_BASE);
+ }
+
+ if (drv->axi_halt_q6)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+ if (drv->axi_halt_mss)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+ if (drv->axi_halt_nc)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+ /*
+ * Software workaround to avoid high MX current during LPASS/MSS
+ * restart.
+ */
+ if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+ ret = clk_prepare_enable(drv->ahb_clk);
+ if (!ret)
+ assert_clamps(pil);
+ else
+ dev_err(pil->dev, "error turning ON AHB clock\n");
+ }
+
+ ret = pil_mss_restart_reg(drv, 1);
+
+ if (drv->is_booted) {
+ pil_mss_disable_clks(drv);
+ pil_mss_power_down(drv);
+ drv->is_booted = false;
+ }
+
+ return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+ int ret = 0;
+ s32 status;
+
+ if (err_path) {
+ writel_relaxed(CMD_PILFAIL_NFY_MBA,
+ drv->rmb_base + RMB_MBA_COMMAND);
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_MBA_UNLOCKED || status < 0,
+ POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
+ if (ret)
+ dev_err(pil->dev, "MBA region unlock timed out\n");
+ else if (status < 0)
+ dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+ status);
+ }
+
+ ret = pil_mss_shutdown(pil);
+
+ if (q6_drv->ahb_clk_vote)
+ clk_disable_unprepare(q6_drv->ahb_clk);
+
+ /* In case of any failure where reclaiming MBA and DP memory
+ * could not happen, free the memory here */
+ if (drv->q6->mba_dp_virt) {
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ &drv->attrs_dma);
+ drv->q6->mba_dp_virt = NULL;
+ }
+
+ return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+ return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+ int ret;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv = 0;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_mx-uV property\n");
+ return ret;
+ }
+
+ ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+ if (ret) {
+ dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vreg_mx);
+ if (ret) {
+ dev_err(pil->dev, "Failed to enable vreg_mx\n");
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+ return ret;
+ }
+
+ ret = pil_q6v5_make_proxy_votes(pil);
+ if (ret) {
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+ }
+
+ return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ pil_q6v5_remove_proxy_votes(pil);
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
+ int ret;
+
+ if (drv->mba_dp_phys)
+ start_addr = drv->mba_dp_phys;
+
+ /*
+ * Bring subsystem out of reset and enable required
+ * regulators and clocks.
+ */
+ ret = pil_mss_power_up(drv);
+ if (ret)
+ goto err_power;
+
+ /* Deassert reset to subsystem and wait for propagation */
+ ret = pil_mss_restart_reg(drv, 0);
+ if (ret)
+ goto err_restart;
+
+ ret = pil_mss_enable_clks(drv);
+ if (ret)
+ goto err_clks;
+
+ if (modem_dbg_cfg)
+ writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+
+ /* Program Image Address */
+ if (drv->self_auth) {
+ writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+ /*
+ * Ensure write to RMB base occurs before reset
+ * is released.
+ */
+ mb();
+ } else {
+ writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+ drv->reg_base + QDSP6SS_RST_EVB);
+ }
+
+ /* Program DP Address */
+ if (drv->dp_size) {
+ writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+ RMB_PMI_CODE_START);
+ writel_relaxed(drv->dp_size, drv->rmb_base +
+ RMB_PMI_CODE_LENGTH);
+ } else {
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+ }
+ /* Make sure RMB regs are written before bringing modem out of reset */
+ mb();
+
+ ret = pil_q6v5_reset(pil);
+ if (ret)
+ goto err_q6v5_reset;
+
+ /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+ if (drv->self_auth) {
+ ret = pil_msa_wait_for_mba_ready(drv);
+ if (ret)
+ goto err_q6v5_reset;
+ }
+
+ dev_info(pil->dev, "MBA boot done\n");
+ drv->is_booted = true;
+
+ return 0;
+
+err_q6v5_reset:
+ modem_log_rmb_regs(drv->rmb_base);
+ pil_mss_disable_clks(drv);
+ if (drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+ pil_mss_restart_reg(drv, 1);
+err_restart:
+ pil_mss_power_down(drv);
+err_power:
+ return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ struct modem_data *md = dev_get_drvdata(pil->dev);
+ const struct firmware *fw, *dp_fw;
+ char fw_name_legacy[10] = "mba.b00";
+ char fw_name[10] = "mba.mbn";
+ char *dp_name = "msadp";
+ char *fw_name_p;
+ void *mba_dp_virt;
+ dma_addr_t mba_dp_phys, mba_dp_phys_end;
+ int ret, count;
+ const u8 *data;
+
+ fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+ ret = request_firmware(&fw, fw_name_p, pil->dev);
+ if (ret) {
+ dev_err(pil->dev, "Failed to locate %s\n",
+ fw_name_p);
+ return ret;
+ }
+
+ data = fw ? fw->data : NULL;
+ if (!data) {
+ dev_err(pil->dev, "MBA data is NULL\n");
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+
+ drv->mba_dp_size = SZ_1M;
+ md->mba_mem_dev.coherent_dma_mask =
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ init_dma_attrs(&md->attrs_dma);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
+
+ ret = request_firmware(&dp_fw, dp_name, pil->dev);
+ if (ret) {
+ dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+ dp_name);
+ } else {
+ if (!dp_fw || !dp_fw->data) {
+ dev_err(pil->dev, "Invalid DP firmware\n");
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+ drv->dp_size = dp_fw->size;
+ drv->mba_dp_size += drv->dp_size;
+ }
+
+ mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
+ &mba_dp_phys, GFP_KERNEL, &md->attrs_dma);
+ if (!mba_dp_virt) {
+ dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+
+ /* Make sure there are no mappings in PKMAP and fixmap */
+ kmap_flush_unused();
+ //kmap_atomic_flush_unused();
+
+ drv->mba_dp_phys = mba_dp_phys;
+ drv->mba_dp_virt = mba_dp_virt;
+ mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+ dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+ &mba_dp_phys, &mba_dp_phys_end);
+
+ /* Load the MBA image into memory */
+ count = fw->size;
+ memcpy(mba_dp_virt, data, count);
+ /* Ensure memcpy of the MBA memory is done before loading the DP */
+ wmb();
+
+ /* Load the DP image into memory */
+ if (drv->mba_dp_size > SZ_1M) {
+ memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+ /* Ensure memcpy is done before powering up modem */
+ wmb();
+ }
+
+ if (pil->subsys_vmid > 0) {
+ ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+ drv->mba_dp_size);
+ if (ret) {
+ pr_err("scm_call to unprotect MBA and DP mem failed\n");
+ goto err_mba_data;
+ }
+ }
+
+ ret = pil_mss_reset(pil);
+ if (ret) {
+ dev_err(pil->dev, "MBA boot failed.\n");
+ goto err_mss_reset;
+ }
+
+ if (dp_fw)
+ release_firmware(dp_fw);
+ release_firmware(fw);
+
+ return 0;
+
+err_mss_reset:
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+ drv->mba_dp_size);
+err_mba_data:
+ dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+ drv->mba_dp_phys, &md->attrs_dma);
+err_invalid_fw:
+ if (dp_fw)
+ release_firmware(dp_fw);
+ release_firmware(fw);
+ drv->mba_dp_virt = NULL;
+ return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+ size_t size)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ void *mdata_virt;
+ dma_addr_t mdata_phys;
+ s32 status;
+ int ret;
+ DEFINE_DMA_ATTRS(attrs);
+
+ drv->mba_mem_dev.coherent_dma_mask =
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ /* Make metadata physically contiguous and 4K aligned. */
+ mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
+ GFP_KERNEL, &attrs);
+ if (!mdata_virt) {
+ dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ memcpy(mdata_virt, metadata, size);
+ /* wmb() ensures copy completes prior to starting authentication. */
+ wmb();
+
+ if (pil->subsys_vmid > 0) {
+ ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+ ALIGN(size, SZ_4K));
+ if (ret) {
+ pr_err("scm_call to unprotect modem metadata mem failed\n");
+ dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
+ mdata_phys, &attrs);
+ goto fail;
+ }
+ }
+
+ /* Initialize length counter to 0 */
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ /* Pass address of meta-data to the MBA and perform authentication */
+ writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+ writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+ POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
+ if (ret) {
+ dev_err(pil->dev, "MBA authentication of headers timed out\n");
+ } else if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d for headers\n",
+ status);
+ ret = -EINVAL;
+ }
+
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+ dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);
+
+ if (!ret)
+ return ret;
+
+fail:
+ modem_log_rmb_regs(drv->rmb_base);
+ if (drv->q6) {
+ pil_mss_shutdown(pil);
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ &drv->attrs_dma);
+ drv->q6->mba_dp_virt = NULL;
+
+ }
+ return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+ const u8 *metadata, size_t size)
+{
+ int ret;
+
+ ret = pil_mss_reset_load_mba(pil);
+ if (ret)
+ return ret;
+
+ return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+ size_t size)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ s32 status;
+ u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ /* Begin image authentication */
+ if (img_length == 0) {
+ writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+ }
+ /* Increment length counter */
+ img_length += size;
+ writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+ if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d\n", status);
+ modem_log_rmb_regs(drv->rmb_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+ int ret;
+ s32 status;
+
+ /* Wait for all segments to be authenticated or an error to occur */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_AUTH_COMPLETE || status < 0,
+ 50, modem_auth_timeout_ms * 1000);
+ if (ret) {
+ dev_err(pil->dev, "MBA authentication of image timed out\n");
+ } else if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d for image\n", status);
+ ret = -EINVAL;
+ }
+
+ if (drv->q6) {
+ if (drv->q6->mba_dp_virt) {
+ /* Reclaim MBA and DP (if allocated) memory. */
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil,
+ drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt,
+ drv->q6->mba_dp_phys, &drv->attrs_dma);
+
+ drv->q6->mba_dp_virt = NULL;
+ }
+ }
+ if (ret)
+ modem_log_rmb_regs(drv->rmb_base);
+ if (q6_drv->ahb_clk_vote)
+ clk_disable_unprepare(q6_drv->ahb_clk);
+
+ return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+ .proxy_vote = pil_mss_make_proxy_votes,
+ .proxy_unvote = pil_mss_remove_proxy_votes,
+ .auth_and_reset = pil_mss_reset,
+ .shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+ .init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+ .proxy_vote = pil_mss_make_proxy_votes,
+ .proxy_unvote = pil_mss_remove_proxy_votes,
+ .verify_blob = pil_msa_mba_verify_blob,
+ .auth_and_reset = pil_msa_mba_auth,
+ .deinit_image = pil_mss_deinit_image,
+ .shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+ .init_image = pil_msa_auth_modem_mdt,
+ .verify_blob = pil_msa_mba_verify_blob,
+ .auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 000000000000..7c0cf3e5c06f
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+#define VDD_MSS_UV 1000000
+
+struct modem_data {
+ struct q6v5_data *q6;
+ struct subsys_device *subsys;
+ struct subsys_desc subsys_desc;
+ void *ramdump_dev;
+ bool crash_shutdown;
+ bool ignore_errors;
+ struct completion stop_ack;
+ void __iomem *rmb_base;
+ struct clk *xo;
+ struct pil_desc desc;
+ struct device mba_mem_dev;
+ struct dma_attrs attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 000000000000..45f24801b6b0
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smsm.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define MAX_VDD_MSS_UV 1150000
+#define PROXY_TIMEOUT_MS 10000
+#define MAX_SSR_REASON_LEN 81U
+#define STOP_ACK_TIMEOUT_MS 1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(void)
+{
+ u32 size;
+ char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+ smem_reason = smem_get_entry_no_rlock(SMEM_SSR_REASON_MSS0, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!smem_reason || !size) {
+ pr_err("modem subsystem failure reason: (unknown, smem_get_entry_no_rlock failed).\n");
+ return;
+ }
+ if (!smem_reason[0]) {
+ pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
+ return;
+ }
+
+ strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+ pr_err("modem subsystem failure reason: %s.\n", reason);
+
+ smem_reason[0] = '\0';
+ wmb();
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+ log_modem_sfr();
+ drv->ignore_errors = true;
+ subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ /* Ignore if we're the one that set the force stop GPIO */
+ if (drv->crash_shutdown)
+ return IRQ_HANDLED;
+
+ pr_err("Fatal error on the modem.\n");
+ subsys_set_crash_status(drv->subsys, true);
+ restart_modem(drv);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+ pr_info("Received stop ack interrupt from modem\n");
+ complete(&drv->stop_ack);
+ return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+ unsigned long ret;
+
+ if (subsys->is_not_loadable)
+ return 0;
+
+ if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+ subsys->force_stop_gpio) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ ret = wait_for_completion_timeout(&drv->stop_ack,
+ msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+ if (!ret)
+ pr_warn("Timed out on stop ack from modem.\n");
+ gpio_set_value(subsys->force_stop_gpio, 0);
+ }
+
+ if (drv->subsys_desc.ramdump_disable_gpio) {
+ drv->subsys_desc.ramdump_disable = gpio_get_value(
+ drv->subsys_desc.ramdump_disable_gpio);
+ pr_warn("Ramdump disable gpio value is %d\n",
+ drv->subsys_desc.ramdump_disable);
+ }
+
+ pil_shutdown(&drv->q6->desc);
+
+ return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+
+ if (subsys->is_not_loadable)
+ return 0;
+ /*
+ * At this time, the modem is shutdown. Therefore this function cannot
+ * run concurrently with the watchdog bite error handler, making it safe
+ * to unset the flag below.
+ */
+ reinit_completion(&drv->stop_ack);
+ drv->subsys_desc.ramdump_disable = 0;
+ drv->ignore_errors = false;
+ drv->q6->desc.fw_name = subsys->fw_name;
+ return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+ drv->crash_shutdown = true;
+ if (!subsys_get_crash_status(drv->subsys) &&
+ subsys->force_stop_gpio) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ mdelay(STOP_ACK_TIMEOUT_MS);
+ }
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+ int ret;
+
+ if (!enable)
+ return 0;
+
+ ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ ret = pil_mss_reset_load_mba(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+ if (ret < 0)
+ pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+ ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+ if (ret < 0)
+ pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+ pil_mss_remove_proxy_votes(&drv->q6->desc);
+ return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+ if (drv->ignore_errors)
+ return IRQ_HANDLED;
+
+ pr_err("Watchdog bite received from modem software!\n");
+ if (drv->subsys_desc.system_debug &&
+ !gpio_get_value(drv->subsys_desc.err_fatal_gpio))
+ panic("%s: System ramdump requested. Triggering device restart!\n",
+ __func__);
+ subsys_set_crash_status(drv->subsys, true);
+ restart_modem(drv);
+ return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ drv->subsys_desc.name = "modem";
+ drv->subsys_desc.dev = &pdev->dev;
+ drv->subsys_desc.owner = THIS_MODULE;
+ drv->subsys_desc.shutdown = modem_shutdown;
+ drv->subsys_desc.powerup = modem_powerup;
+ drv->subsys_desc.ramdump = modem_ramdump;
+ drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+ drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+ drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+ drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+
+ drv->subsys = subsys_register(&drv->subsys_desc);
+ if (IS_ERR(drv->subsys)) {
+ ret = PTR_ERR(drv->subsys);
+ goto err_subsys;
+ }
+
+ drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+ if (!drv->ramdump_dev) {
+ pr_err("%s: Unable to create a modem ramdump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_ramdump;
+ }
+
+ return 0;
+
+err_ramdump:
+ subsys_unregister(drv->subsys);
+err_subsys:
+ return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+ struct platform_device *pdev)
+{
+ struct q6v5_data *q6;
+ struct pil_desc *q6_desc;
+ struct resource *res;
+ struct property *prop;
+ int ret;
+
+ q6 = pil_q6v5_init(pdev);
+ if (IS_ERR_OR_NULL(q6))
+ return PTR_ERR(q6);
+ drv->q6 = q6;
+ drv->xo = q6->xo;
+
+ q6_desc = &q6->desc;
+ q6_desc->owner = THIS_MODULE;
+ q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+ q6_desc->ops = &pil_msa_mss_ops;
+
+ q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+ "qcom,pil-self-auth");
+ if (q6->self_auth) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "rmb_base");
+ q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!q6->rmb_base)
+ return -ENOMEM;
+ drv->rmb_base = q6->rmb_base;
+ q6_desc->ops = &pil_msa_mss_ops_selfauth;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+ if (!res) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "restart_reg_sec");
+ q6->restart_reg_sec = true;
+ }
+
+ q6->restart_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (!q6->restart_reg)
+ return -ENOMEM;
+
+ q6->vreg = NULL;
+
+ prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+ if (prop) {
+ q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+ if (IS_ERR(q6->vreg))
+ return PTR_ERR(q6->vreg);
+
+ ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
+ MAX_VDD_MSS_UV);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set vreg voltage.\n");
+
+ ret = regulator_set_load(q6->vreg, 100000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set vreg mode.\n");
+ return ret;
+ }
+ }
+
+ q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(q6->vreg_mx))
+ return PTR_ERR(q6->vreg_mx);
+ prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cxrail_bhs_reg");
+ if (res)
+ q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(q6->ahb_clk))
+ return PTR_ERR(q6->ahb_clk);
+
+ q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(q6->axi_clk))
+ return PTR_ERR(q6->axi_clk);
+
+ q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+ if (IS_ERR(q6->rom_clk))
+ return PTR_ERR(q6->rom_clk);
+
+ /* Optional. */
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+ q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "snoc_axi_clk") >= 0)
+ q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+ q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+ ret = pil_desc_init(q6_desc);
+
+ return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+ struct modem_data *drv;
+ int ret, is_not_loadable;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, drv);
+
+ is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,is-not-loadable");
+ if (is_not_loadable) {
+ drv->subsys_desc.is_not_loadable = 1;
+ } else {
+ ret = pil_mss_loadable_init(drv, pdev);
+ if (ret)
+ return ret;
+ }
+ init_completion(&drv->stop_ack);
+
+ return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+ struct modem_data *drv = platform_get_drvdata(pdev);
+
+ subsys_unregister(drv->subsys);
+ destroy_ramdump_device(drv->ramdump_dev);
+ pil_desc_release(&drv->q6->desc);
+ return 0;
+}
+
+static struct of_device_id mss_match_table[] = {
+ { .compatible = "qcom,pil-q6v5-mss" },
+ { .compatible = "qcom,pil-q6v55-mss" },
+ { .compatible = "qcom,pil-q6v56-mss" },
+ {}
+};
+
+static struct platform_driver pil_mss_driver = {
+ .probe = pil_mss_driver_probe,
+ .remove = pil_mss_driver_exit,
+ .driver = {
+ .name = "pil-q6v5-mss",
+ .of_match_table = mss_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pil_mss_init(void)
+{
+ return platform_driver_register(&pil_mss_driver);
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+ platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 000000000000..39b352b6f159
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/clk/msm-clk.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET 0x014
+#define QDSP6SS_GFMUX_CTL 0x020
+#define QDSP6SS_PWR_CTL 0x030
+#define QDSP6V6SS_MEM_PWR_CTL 0x034
+#define QDSP6SS_BHS_STATUS 0x078
+#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6SS_STRAP_ACC 0x110
+#define QDSP6V62SS_BHS_STATUS 0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENA BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA BIT(1)
+#define Q6SS_CLK_SRC_SEL_C BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD 0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON BIT(26)
+#define QDSP6v55_LDO_BYP BIT(25)
+#define QDSP6v55_BHS_ON BIT(24)
+#define QDSP6v55_CLAMP_WL BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM BIT(22)
+#define L1IU_SLP_NRET_N BIT(15)
+#define L1DU_SLP_NRET_N BIT(14)
+#define L2PLRU_SLP_NRET_N BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS (200)
+#define BHS_CHECK_MAX_LOOPS (200)
+#define QDSP6SS_XO_CBCR (0x0038)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+ int ret;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(drv->xo);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for XO\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(drv->pnoc_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for pnoc\n");
+ goto err_pnoc_vote;
+ }
+
+ ret = clk_prepare_enable(drv->qdss_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for qdss\n");
+ goto err_qdss_vote;
+ }
+
+ ret = regulator_set_voltage(drv->vreg_cx, uv, uv);
+ if (ret) {
+ dev_err(pil->dev, "Failed to request vdd_cx voltage.\n");
+ goto err_cx_voltage;
+ }
+
+ ret = regulator_set_load(drv->vreg_cx, 100000);
+ if (ret < 0) {
+ dev_err(pil->dev, "Failed to set vdd_cx mode.\n");
+ goto err_cx_mode;
+ }
+
+ ret = regulator_enable(drv->vreg_cx);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for vdd_cx\n");
+ goto err_cx_enable;
+ }
+
+ if (drv->vreg_pll) {
+ ret = regulator_enable(drv->vreg_pll);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for vdd_pll\n");
+ goto err_vreg_pll;
+ }
+ }
+
+ return 0;
+
+err_vreg_pll:
+ regulator_disable(drv->vreg_cx);
+err_cx_enable:
+ regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+ regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, uv);
+err_cx_voltage:
+ clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+ clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+ clk_disable_unprepare(drv->xo);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv, ret = 0;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ return;
+ }
+
+ if (drv->vreg_pll) {
+ regulator_disable(drv->vreg_pll);
+ regulator_set_load(drv->vreg_pll, 0);
+ }
+ regulator_disable(drv->vreg_cx);
+ regulator_set_load(drv->vreg_cx, 0);
+ regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, uv);
+ clk_disable_unprepare(drv->xo);
+ clk_disable_unprepare(drv->pnoc_clk);
+ clk_disable_unprepare(drv->qdss_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+ int ret;
+ u32 status;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+ /* Wait for halt */
+ ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+ status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+ if (ret)
+ dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+ else if (!readl_relaxed(halt_base + AXI_IDLE))
+ dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+ u32 val;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ /*
+ * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+ * clamp as a software workaround to avoid high MX current during
+ * LPASS/MSS restart.
+ */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+ QDSP6v55_CLAMP_QMC_MEM);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ /* To make sure asserting clamps is done before MSS restart*/
+ mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+ u32 val;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ /* Turn off core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val &= ~Q6SS_CLK_ENA;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Clamp IO */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn off Q6 memories */
+ val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+ Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+ Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+ Q6SS_L2DATA_STBY_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Assert Q6 resets */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Kill power at block headswitch */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSS_BHS_ON;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ if (drv->qdsp6v55)
+ /* Subsystem driver expected to halt bus and assert reset */
+ return;
+ else
+ __pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 val;
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ mb();
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_CORE_ARES;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+
+ /* Need a different clock source for v5.2.0 */
+ if (drv->qdsp6v5_2_0) {
+ val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+ val |= Q6SS_CLK_SRC_SEL_C;
+ }
+
+ /* force clock on during source switch */
+ if (drv->qdsp6v56)
+ val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Start core execution */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_STOP_CORE;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+ u32 val, count;
+ void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+ val = readl_relaxed(cbcr_reg);
+ val |= 0x1;
+ writel_relaxed(val, cbcr_reg);
+
+ for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(cbcr_reg);
+ if (!(val & BIT(31)))
+ return 0;
+ udelay(1);
+ }
+
+ dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+ return -EINVAL;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 val;
+ int i;
+
+ /* Override the ACC value if required */
+ if (drv->override_acc)
+ writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+ drv->reg_base + QDSP6SS_STRAP_ACC);
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* BHS require xo cbcr to be enabled */
+ i = q6v55_branch_clk_enable(drv);
+ if (i)
+ return i;
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSP6v55_BHS_ON;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ mb();
+ udelay(1);
+
+ if (drv->qdsp6v62_1_2) {
+ for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+ if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+ & QDSP6v55_BHS_EN_REST_ACK)
+ break;
+ udelay(1);
+ }
+ if (!i) {
+ pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (drv->qdsp6v61_1_1) {
+ for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+ if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+ & QDSP6v55_BHS_EN_REST_ACK)
+ break;
+ udelay(1);
+ }
+ if (!i) {
+ pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Put LDO in bypass mode */
+ val |= QDSP6v55_LDO_BYP;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ if (drv->qdsp6v56_1_3) {
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2 and ETB memories 1 at a time */
+ for (i = 17; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ udelay(1);
+ }
+ } else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+ || drv->qdsp6v56_1_10) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else if (drv->qdsp6v56_1_8_inrush_current) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 6; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+
+ for (i = 0 ; i <= 5 ; i++) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base +
+ QDSP6V6SS_MEM_PWR_CTL);
+ for (i = 28; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6V6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else {
+ /* Turn on memories. */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= 0xFFF00;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L2 banks 1 at a time */
+ for (i = 0; i <= 7; i++) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ }
+ }
+
+ /* Remove word line clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_WL;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ if (drv->qdsp6v55)
+ return __pil_q6v55_reset(pil);
+ else
+ return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+ struct q6v5_data *drv;
+ struct resource *res;
+ struct pil_desc *desc;
+ struct property *prop;
+ int ret;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!drv->reg_base)
+ return ERR_PTR(-ENOMEM);
+
+ desc = &drv->desc;
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+ &desc->name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ desc->dev = &pdev->dev;
+
+ drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-femto-modem");
+
+ if (drv->qdsp6v5_2_0)
+ return drv;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+ if (res) {
+ drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!drv->axi_halt_base) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if (!drv->axi_halt_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_q6");
+ if (res) {
+ drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_q6) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_modem");
+ if (res) {
+ drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_mss) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_nc");
+ if (res) {
+ drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_nc) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ }
+
+ if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+ && drv->axi_halt_nc))) {
+ dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-q6v55-mss");
+ drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-q6v56-mss");
+
+ drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-3");
+ drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-5");
+
+ drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-8");
+ drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-10");
+
+ drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,qdsp6v56-1-8-inrush-current");
+
+ drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v61-1-1");
+
+ drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v62-1-2");
+
+ drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mba-image-is-not-elf");
+
+ drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,override-acc");
+
+ drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+ "qcom,ahb-clk-vote");
+ drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mx-spike-wa");
+
+ drv->xo = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(drv->xo))
+ return ERR_CAST(drv->xo);
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+ drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+ if (IS_ERR(drv->pnoc_clk))
+ return ERR_CAST(drv->pnoc_clk);
+ } else {
+ drv->pnoc_clk = NULL;
+ }
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "qdss_clk") >= 0) {
+ drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+ if (IS_ERR(drv->qdss_clk))
+ return ERR_CAST(drv->qdss_clk);
+ } else {
+ drv->qdss_clk = NULL;
+ }
+
+ drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(drv->vreg_cx))
+ return ERR_CAST(drv->vreg_cx);
+ prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+ return ERR_CAST(prop);
+ }
+
+ drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+ if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+ int voltage;
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+ &voltage);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to find vdd_pll voltage.\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = regulator_set_voltage(drv->vreg_pll, voltage, voltage);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request vdd_pll voltage.\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = regulator_set_load(drv->vreg_pll, 10000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
+ return ERR_PTR(ret);
+ }
+ } else {
+ drv->vreg_pll = NULL;
+ }
+
+ return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 000000000000..f1465e32b9af
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+ void __iomem *cxrail_bhs; /* External BHS register */
+ struct clk *xo; /* XO clock source */
+ struct clk *pnoc_clk; /* PNOC bus clock source */
+ struct clk *ahb_clk; /* PIL access to registers */
+ struct clk *axi_clk; /* CPU access to memory */
+ struct clk *core_clk; /* CPU core */
+ struct clk *reg_clk; /* CPU access registers */
+ struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+ struct clk *rom_clk; /* Boot ROM */
+ struct clk *snoc_axi_clk;
+ struct clk *mnoc_axi_clk;
+ struct clk *qdss_clk;
+ void __iomem *axi_halt_base; /* Halt base of q6, mss,
+ nc are in same 4K page */
+ void __iomem *axi_halt_q6;
+ void __iomem *axi_halt_mss;
+ void __iomem *axi_halt_nc;
+ void __iomem *restart_reg;
+ struct regulator *vreg;
+ struct regulator *vreg_cx;
+ struct regulator *vreg_mx;
+ struct regulator *vreg_pll;
+ bool is_booted;
+ struct pil_desc desc;
+ bool self_auth;
+ phys_addr_t mba_dp_phys;
+ void *mba_dp_virt;
+ size_t mba_dp_size;
+ size_t dp_size;
+ bool qdsp6v55;
+ bool qdsp6v5_2_0;
+ bool qdsp6v56;
+ bool qdsp6v56_1_3;
+ bool qdsp6v56_1_5;
+ bool qdsp6v56_1_8;
+ bool qdsp6v56_1_8_inrush_current;
+ bool qdsp6v56_1_10;
+ bool qdsp6v61_1_1;
+ bool qdsp6v62_1_2;
+ bool non_elf_image;
+ bool restart_reg_sec;
+ bool override_acc;
+ bool ahb_clk_vote;
+ bool mx_spike_wa;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
new file mode 100644
index 000000000000..37eb9f0540ed
--- /dev/null
+++ b/drivers/soc/qcom/ramdump.c
@@ -0,0 +1,394 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/dma-mapping.h>
+
+#define RAMDUMP_WAIT_MSECS 120000
+
+struct ramdump_device {
+ char name[256];
+
+ unsigned int data_ready;
+ unsigned int consumer_present;
+ int ramdump_status;
+
+ struct completion ramdump_complete;
+ struct miscdevice device;
+
+ wait_queue_head_t dump_wait_q;
+ int nsegments;
+ struct ramdump_segment *segments;
+ size_t elfcore_size;
+ char *elfcore_buf;
+ struct dma_attrs attrs;
+};
+
+static int ramdump_open(struct inode *inode, struct file *filep)
+{
+ struct ramdump_device *rd_dev = container_of(filep->private_data,
+ struct ramdump_device, device);
+ rd_dev->consumer_present = 1;
+ rd_dev->ramdump_status = 0;
+ return 0;
+}
+
+static int ramdump_release(struct inode *inode, struct file *filep)
+{
+ struct ramdump_device *rd_dev = container_of(filep->private_data,
+ struct ramdump_device, device);
+ rd_dev->consumer_present = 0;
+ rd_dev->data_ready = 0;
+ complete(&rd_dev->ramdump_complete);
+ return 0;
+}
+
+static unsigned long offset_translate(loff_t user_offset,
+ struct ramdump_device *rd_dev, unsigned long *data_left,
+ void **vaddr)
+{
+ int i = 0;
+ *vaddr = NULL;
+
+ for (i = 0; i < rd_dev->nsegments; i++)
+ if (user_offset >= rd_dev->segments[i].size)
+ user_offset -= rd_dev->segments[i].size;
+ else
+ break;
+
+ if (i == rd_dev->nsegments) {
+ pr_debug("Ramdump(%s): offset_translate returning zero\n",
+ rd_dev->name);
+ *data_left = 0;
+ return 0;
+ }
+
+ *data_left = rd_dev->segments[i].size - user_offset;
+
+ pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
+ rd_dev->name, rd_dev->segments[i].address + user_offset,
+ *data_left);
+
+ if (rd_dev->segments[i].v_address)
+ *vaddr = rd_dev->segments[i].v_address + user_offset;
+
+ return rd_dev->segments[i].address + user_offset;
+}
+
+#define MAX_IOREMAP_SIZE SZ_1M
+
+static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct ramdump_device *rd_dev = container_of(filep->private_data,
+ struct ramdump_device, device);
+ void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
+ unsigned long data_left = 0, bytes_before, bytes_after;
+ unsigned long addr = 0;
+ size_t copy_size = 0, alignsize;
+ unsigned char *alignbuf = NULL, *finalbuf = NULL;
+ int ret = 0;
+ loff_t orig_pos = *pos;
+
+ if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+ if (ret)
+ return ret;
+
+ if (*pos < rd_dev->elfcore_size) {
+ copy_size = rd_dev->elfcore_size - *pos;
+ copy_size = min(copy_size, count);
+
+ if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
+ ret = -EFAULT;
+ goto ramdump_done;
+ }
+ *pos += copy_size;
+ count -= copy_size;
+ buf += copy_size;
+ if (count == 0)
+ return copy_size;
+ }
+
+ addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+ &data_left, &vaddr);
+
+ /* EOF check */
+ if (data_left == 0) {
+ pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
+ rd_dev->name, *pos);
+ rd_dev->ramdump_status = 0;
+ ret = 0;
+ goto ramdump_done;
+ }
+
+ copy_size = min(count, (size_t)MAX_IOREMAP_SIZE);
+ copy_size = min((unsigned long)copy_size, data_left);
+
+ init_dma_attrs(&rd_dev->attrs);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &rd_dev->attrs);
+ device_mem = vaddr;
+ //device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+ // copy_size, &rd_dev->attrs);
+ origdevice_mem = device_mem;
+
+ if (device_mem == NULL) {
+ pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
+ rd_dev->name, addr, copy_size);
+ rd_dev->ramdump_status = -1;
+ ret = -ENOMEM;
+ goto ramdump_done;
+ }
+
+ alignbuf = kzalloc(copy_size, GFP_KERNEL);
+ if (!alignbuf) {
+ pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
+ rd_dev->name);
+ rd_dev->ramdump_status = -1;
+ ret = -ENOMEM;
+ goto ramdump_done;
+ }
+
+ finalbuf = alignbuf;
+ alignsize = copy_size;
+
+ if ((unsigned long)device_mem & 0x7) {
+ bytes_before = 8 - ((unsigned long)device_mem & 0x7);
+ memcpy_fromio(alignbuf, device_mem, bytes_before);
+ device_mem += bytes_before;
+ alignbuf += bytes_before;
+ alignsize -= bytes_before;
+ }
+
+ if (alignsize & 0x7) {
+ bytes_after = alignsize & 0x7;
+ memcpy(alignbuf, device_mem, alignsize - bytes_after);
+ device_mem += alignsize - bytes_after;
+ alignbuf += (alignsize - bytes_after);
+ alignsize = bytes_after;
+ memcpy_fromio(alignbuf, device_mem, alignsize);
+ } else
+ memcpy(alignbuf, device_mem, alignsize);
+
+ if (copy_to_user(buf, finalbuf, copy_size)) {
+ pr_err("Ramdump(%s): Couldn't copy all data to user.",
+ rd_dev->name);
+ rd_dev->ramdump_status = -1;
+ ret = -EFAULT;
+ goto ramdump_done;
+ }
+
+ kfree(finalbuf);
+ //if (!vaddr && origdevice_mem)
+ //dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+ *pos += copy_size;
+
+ pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
+ rd_dev->name, copy_size, addr);
+
+ return *pos - orig_pos;
+
+ramdump_done:
+ //if (!vaddr && origdevice_mem)
+ //dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+ kfree(finalbuf);
+ rd_dev->data_ready = 0;
+ *pos = 0;
+ complete(&rd_dev->ramdump_complete);
+ return ret;
+}
+
+static unsigned int ramdump_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct ramdump_device *rd_dev = container_of(filep->private_data,
+ struct ramdump_device, device);
+ unsigned int mask = 0;
+
+ if (rd_dev->data_ready)
+ mask |= (POLLIN | POLLRDNORM);
+
+ poll_wait(filep, &rd_dev->dump_wait_q, wait);
+ return mask;
+}
+
+static const struct file_operations ramdump_file_ops = {
+ .open = ramdump_open,
+ .release = ramdump_release,
+ .read = ramdump_read,
+ .poll = ramdump_poll
+};
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+ int ret;
+ struct ramdump_device *rd_dev;
+
+ if (!dev_name) {
+ pr_err("%s: Invalid device name.\n", __func__);
+ return NULL;
+ }
+
+ rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
+
+ if (!rd_dev) {
+ pr_err("%s: Couldn't alloc space for ramdump device!",
+ __func__);
+ return NULL;
+ }
+
+ snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
+ dev_name);
+
+ init_completion(&rd_dev->ramdump_complete);
+
+ rd_dev->device.minor = MISC_DYNAMIC_MINOR;
+ rd_dev->device.name = rd_dev->name;
+ rd_dev->device.fops = &ramdump_file_ops;
+ rd_dev->device.parent = parent;
+
+ init_waitqueue_head(&rd_dev->dump_wait_q);
+
+ ret = misc_register(&rd_dev->device);
+
+ if (ret) {
+ pr_err("%s: misc_register failed for %s (%d)", __func__,
+ dev_name, ret);
+ kfree(rd_dev);
+ return NULL;
+ }
+
+ return (void *)rd_dev;
+}
+EXPORT_SYMBOL(create_ramdump_device);
+
+void destroy_ramdump_device(void *dev)
+{
+ struct ramdump_device *rd_dev = dev;
+
+ if (IS_ERR_OR_NULL(rd_dev))
+ return;
+
+ misc_deregister(&rd_dev->device);
+ kfree(rd_dev);
+}
+EXPORT_SYMBOL(destroy_ramdump_device);
+
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments, bool use_elf)
+{
+ int ret, i;
+ struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+ Elf32_Phdr *phdr;
+ Elf32_Ehdr *ehdr;
+ unsigned long offset;
+
+ if (!rd_dev->consumer_present) {
+ pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+ return -EPIPE;
+ }
+
+ for (i = 0; i < nsegments; i++)
+ segments[i].size = PAGE_ALIGN(segments[i].size);
+
+ rd_dev->segments = segments;
+ rd_dev->nsegments = nsegments;
+
+ if (use_elf) {
+ rd_dev->elfcore_size = sizeof(*ehdr) +
+ sizeof(*phdr) * nsegments;
+ ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+ rd_dev->elfcore_buf = (char *)ehdr;
+ if (!rd_dev->elfcore_buf)
+ return -ENOMEM;
+
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+ ehdr->e_type = ET_CORE;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_phoff = sizeof(*ehdr);
+ ehdr->e_ehsize = sizeof(*ehdr);
+ ehdr->e_phentsize = sizeof(*phdr);
+ ehdr->e_phnum = nsegments;
+
+ offset = rd_dev->elfcore_size;
+ phdr = (Elf32_Phdr *)(ehdr + 1);
+ for (i = 0; i < nsegments; i++, phdr++) {
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = offset;
+ phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+ phdr->p_filesz = phdr->p_memsz = segments[i].size;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ offset += phdr->p_filesz;
+ }
+ }
+
+ rd_dev->data_ready = 1;
+ rd_dev->ramdump_status = -1;
+
+ reinit_completion(&rd_dev->ramdump_complete);
+
+ /* Tell userspace that the data is ready */
+ wake_up(&rd_dev->dump_wait_q);
+
+ /* Wait (with a timeout) to let the ramdump complete */
+ ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
+ msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
+
+ if (!ret) {
+ pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
+ rd_dev->name);
+ ret = -EPIPE;
+ } else
+ ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
+
+ rd_dev->data_ready = 0;
+ rd_dev->elfcore_size = 0;
+ kfree(rd_dev->elfcore_buf);
+ rd_dev->elfcore_buf = NULL;
+ return ret;
+
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, false);
+}
+EXPORT_SYMBOL(do_ramdump);
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, true);
+}
+EXPORT_SYMBOL(do_elf_ramdump);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
new file mode 100644
index 000000000000..4437126de8d7
--- /dev/null
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -0,0 +1,1037 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
+
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+
+#define XO_FREQ 19200000
+#define PROXY_TIMEOUT_MS 10000
+#define MAX_SSR_REASON_LEN 81U
+#define STOP_ACK_TIMEOUT_MS 1000
+#define CRASH_STOP_ACK_TO_MS 200
+
+#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
+#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
+
+/**
+ * struct reg_info - regulator info
+ * @reg: regulator handle
+ * @uV: voltage in uV
+ * @uA: current in uA
+ */
+struct reg_info {
+ struct regulator *reg;
+ int uV;
+ int uA;
+};
+
+/**
+ * struct pil_tz_data
+ * @regs: regulators that should be always on when the subsystem is
+ * brought out of reset
+ * @proxy_regs: regulators that should be on during pil proxy voting
+ * @clks: clocks that should be always on when the subsystem is
+ * brought out of reset
+ * @proxy_clks: clocks that should be on during pil proxy voting
+ * @reg_count: the number of always on regulators
+ * @proxy_reg_count: the number of proxy voting regulators
+ * @clk_count: the number of always on clocks
+ * @proxy_clk_count: the number of proxy voting clocks
+ * @smem_id: the smem id used for read the subsystem crash reason
+ * @ramdump_dev: ramdump device pointer
+ * @pas_id: the PAS id for tz
+ * @bus_client: bus client id
+ * @enable_bus_scaling: set to true if PIL needs to vote for
+ * bus bandwidth
+ * @keep_proxy_regs_on: If set, during proxy unvoting, PIL removes the
+ * voltage/current vote for proxy regulators but leaves
+ * them enabled.
+ * @stop_ack: state of completion of stop ack
+ * @desc: PIL descriptor
+ * @subsys: subsystem device pointer
+ * @subsys_desc: subsystem descriptor
+ */
+struct pil_tz_data {
+ struct reg_info *regs;
+ struct reg_info *proxy_regs;
+ struct clk **clks;
+ struct clk **proxy_clks;
+ int reg_count;
+ int proxy_reg_count;
+ int clk_count;
+ int proxy_clk_count;
+ int smem_id;
+ void *ramdump_dev;
+ u32 pas_id;
+ u32 bus_client;
+ bool enable_bus_scaling;
+ bool keep_proxy_regs_on;
+ struct completion stop_ack;
+ struct pil_desc desc;
+ struct subsys_device *subsys;
+ struct subsys_desc subsys_desc;
+};
+
+enum scm_cmd {
+ PAS_INIT_IMAGE_CMD = 1,
+ PAS_MEM_SETUP_CMD,
+ PAS_AUTH_AND_RESET_CMD = 5,
+ PAS_SHUTDOWN_CMD,
+};
+
+enum pas_id {
+ PAS_MODEM,
+ PAS_Q6,
+ PAS_DSPS,
+ PAS_TZAPPS,
+ PAS_MODEM_SW,
+ PAS_MODEM_FW,
+ PAS_WCNSS,
+ PAS_SECAPP,
+ PAS_GSS,
+ PAS_VIDC,
+ PAS_VPU,
+ PAS_BCSS,
+};
+
+static struct msm_bus_paths scm_pas_bw_tbl[] = {
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ },
+ },
+ .num_paths = 1,
+ },
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ib = 492 * 8 * 1000000UL,
+ .ab = 492 * 8 * 100000UL,
+ },
+ },
+ .num_paths = 1,
+ },
+};
+
+static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
+ .usecase = scm_pas_bw_tbl,
+ .num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
+ .name = "scm_pas",
+};
+
+static uint32_t scm_perf_client;
+static int scm_pas_bw_count;
+static DEFINE_MUTEX(scm_pas_bw_mutex);
+
+static int scm_pas_enable_bw(void)
+{
+ int ret = 0;
+
+ if (!scm_perf_client)
+ return -EINVAL;
+
+ mutex_lock(&scm_pas_bw_mutex);
+ if (!scm_pas_bw_count) {
+ ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
+ if (ret)
+ goto err_bus;
+ scm_pas_bw_count++;
+ }
+
+ mutex_unlock(&scm_pas_bw_mutex);
+ return ret;
+
+err_bus:
+ pr_err("scm-pas; Bandwidth request failed (%d)\n", ret);
+ msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+ mutex_unlock(&scm_pas_bw_mutex);
+ return ret;
+}
+
+static void scm_pas_disable_bw(void)
+{
+ mutex_lock(&scm_pas_bw_mutex);
+ if (scm_pas_bw_count-- == 1)
+ msm_bus_scale_client_update_request(scm_perf_client, 0);
+ mutex_unlock(&scm_pas_bw_mutex);
+}
+
+static void scm_pas_init(int id)
+{
+ static int is_inited;
+
+ if (is_inited)
+ return;
+
+ scm_pas_bw_tbl[0].vectors[0].src = id;
+ scm_pas_bw_tbl[1].vectors[0].src = id;
+
+ scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
+ if (!scm_perf_client)
+ pr_warn("scm-pas: Unable to register bus client\n");
+
+ is_inited = 1;
+}
+
+static int of_read_clocks(struct device *dev, struct clk ***clks_ref,
+ const char *propname)
+{
+ int clk_count, i, len;
+ struct clk **clks;
+
+ if (!of_find_property(dev->of_node, propname, &len))
+ return 0;
+
+ clk_count = of_property_count_strings(dev->of_node, propname);
+ if (IS_ERR_VALUE(clk_count)) {
+ dev_err(dev, "Failed to get clock names\n");
+ return -EINVAL;
+ }
+
+ clks = devm_kzalloc(dev, sizeof(struct clk *) * clk_count,
+ GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < clk_count; i++) {
+ const char *clock_name;
+ char clock_freq_name[50];
+ u32 clock_rate = XO_FREQ;
+
+ of_property_read_string_index(dev->of_node,
+ propname, i,
+ &clock_name);
+ snprintf(clock_freq_name, ARRAY_SIZE(clock_freq_name),
+ "qcom,%s-freq", clock_name);
+ if (of_find_property(dev->of_node, clock_freq_name, &len))
+ if (of_property_read_u32(dev->of_node, clock_freq_name,
+ &clock_rate)) {
+ dev_err(dev, "Failed to read %s clock's freq\n",
+ clock_freq_name);
+ return -EINVAL;
+ }
+
+ clks[i] = devm_clk_get(dev, clock_name);
+ if (IS_ERR(clks[i])) {
+ int rc = PTR_ERR(clks[i]);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s clock\n",
+ clock_name);
+ return rc;
+ }
+
+ /* Make sure rate-settable clocks' rates are set */
+ if (clk_get_rate(clks[i]) == 0)
+ clk_set_rate(clks[i], clk_round_rate(clks[i],
+ clock_rate));
+ }
+
+ *clks_ref = clks;
+ return clk_count;
+}
+
+static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
+ const char *propname)
+{
+ int reg_count, i, len, rc;
+ struct reg_info *regs;
+
+ if (!of_find_property(dev->of_node, propname, &len))
+ return 0;
+
+ reg_count = of_property_count_strings(dev->of_node, propname);
+ if (IS_ERR_VALUE(reg_count)) {
+ dev_err(dev, "Failed to get regulator names\n");
+ return -EINVAL;
+ }
+
+ regs = devm_kzalloc(dev, sizeof(struct reg_info) * reg_count,
+ GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < reg_count; i++) {
+ const char *reg_name;
+ char reg_uV_uA_name[50];
+ u32 vdd_uV_uA[2];
+
+ of_property_read_string_index(dev->of_node,
+ propname, i,
+ &reg_name);
+
+ regs[i].reg = devm_regulator_get(dev, reg_name);
+ if (IS_ERR(regs[i].reg)) {
+ int rc = PTR_ERR(regs[i].reg);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s\n regulator",
+ reg_name);
+ return rc;
+ }
+
+ /*
+ * Read the voltage and current values for the corresponding
+ * regulator. The device tree property name is "qcom," +
+ * "regulator_name" + "-uV-uA".
+ */
+ rc = snprintf(reg_uV_uA_name, ARRAY_SIZE(reg_uV_uA_name),
+ "qcom,%s-uV-uA", reg_name);
+ if (rc < strlen(reg_name) + 6) {
+ dev_err(dev, "Failed to hold reg_uV_uA_name\n");
+ return -EINVAL;
+ }
+
+ if (!of_find_property(dev->of_node, reg_uV_uA_name, &len))
+ continue;
+
+ len /= sizeof(vdd_uV_uA[0]);
+
+ /* There should be two entries: one for uV and one for uA */
+ if (len != 2) {
+ dev_err(dev, "Missing uV/uA value\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
+ vdd_uV_uA, len);
+ if (rc) {
+ dev_err(dev, "Failed to read uV/uA values\n");
+ return rc;
+ }
+
+ regs[i].uV = vdd_uV_uA[0];
+ regs[i].uA = vdd_uV_uA[1];
+ }
+
+ *regs_ref = regs;
+ return reg_count;
+}
+
+static int of_read_bus_pdata(struct platform_device *pdev,
+ struct pil_tz_data *d)
+{
+ struct msm_bus_scale_pdata *pdata;
+ pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ d->bus_client = msm_bus_scale_register_client(pdata);
+ if (!d->bus_client)
+ pr_warn("%s: Unable to register bus client\n", __func__);
+
+ return 0;
+}
+
+static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
+{
+ int len, count, rc;
+ struct device *dev = &pdev->dev;
+
+ count = of_read_clocks(dev, &d->clks, "qcom,active-clock-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup clocks.\n");
+ return count;
+ }
+ d->clk_count = count;
+
+ count = of_read_clocks(dev, &d->proxy_clks, "qcom,proxy-clock-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup proxy clocks.\n");
+ return count;
+ }
+ d->proxy_clk_count = count;
+
+ count = of_read_regs(dev, &d->regs, "qcom,active-reg-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup regulators.\n");
+ return count;
+ }
+ d->reg_count = count;
+
+ count = of_read_regs(dev, &d->proxy_regs, "qcom,proxy-reg-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup proxy regulators.\n");
+ return count;
+ }
+ d->proxy_reg_count = count;
+
+ if (of_find_property(dev->of_node, "qcom,msm-bus,name", &len)) {
+ d->enable_bus_scaling = true;
+ rc = of_read_bus_pdata(pdev, d);
+ if (rc) {
+ dev_err(dev, "Failed to setup bus scaling client.\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int enable_regulators(struct pil_tz_data *d, struct device *dev,
+ struct reg_info *regs, int reg_count,
+ bool reg_no_enable)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < reg_count; i++) {
+ if (regs[i].uV > 0) {
+ rc = regulator_set_voltage(regs[i].reg,
+ regs[i].uV, INT_MAX);
+ if (rc) {
+ dev_err(dev, "Failed to request voltage.\n");
+ goto err_voltage;
+ }
+ }
+
+ if (regs[i].uA > 0) {
+ rc = regulator_set_load(regs[i].reg,
+ regs[i].uA);
+ if (rc < 0) {
+ dev_err(dev, "Failed to set regulator mode\n");
+ goto err_mode;
+ }
+ }
+
+ if (d->keep_proxy_regs_on && reg_no_enable)
+ continue;
+
+ rc = regulator_enable(regs[i].reg);
+ if (rc) {
+ dev_err(dev, "Regulator enable failed\n");
+ goto err_enable;
+ }
+ }
+
+ return 0;
+err_enable:
+ if (regs[i].uA > 0) {
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+ regulator_set_load(regs[i].reg, 0);
+ }
+err_mode:
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+err_voltage:
+ for (i--; i >= 0; i--) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_load(regs[i].reg, 0);
+
+ if (d->keep_proxy_regs_on && reg_no_enable)
+ continue;
+ regulator_disable(regs[i].reg);
+ }
+
+ return rc;
+}
+
+static void disable_regulators(struct pil_tz_data *d, struct reg_info *regs,
+ int reg_count, bool reg_no_disable)
+{
+ int i;
+
+ for (i = 0; i < reg_count; i++) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_load(regs[i].reg, 0);
+
+ if (d->keep_proxy_regs_on && reg_no_disable)
+ continue;
+ regulator_disable(regs[i].reg);
+ }
+}
+
+static int prepare_enable_clocks(struct device *dev, struct clk **clks,
+ int clk_count)
+{
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < clk_count; i++) {
+ rc = clk_prepare_enable(clks[i]);
+ if (rc) {
+ dev_err(dev, "Clock enable failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clks[i]);
+
+ return rc;
+}
+
+static void disable_unprepare_clocks(struct clk **clks, int clk_count)
+{
+ int i;
+
+ for (i = 0; i < clk_count; i++)
+ clk_disable_unprepare(clks[i]);
+}
+
+static int pil_make_proxy_vote(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ int rc;
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ rc = enable_regulators(d, pil->dev, d->proxy_regs,
+ d->proxy_reg_count, false);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+ d->proxy_clk_count);
+ if (rc)
+ goto err_clks;
+
+ if (d->bus_client) {
+ rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+ if (rc) {
+ dev_err(pil->dev, "bandwidth request failed\n");
+ goto err_bw;
+ }
+ } else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
+
+ return 0;
+err_bw:
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+err_clks:
+ disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+ return rc;
+}
+
+static void pil_remove_proxy_vote(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+
+ if (d->subsys_desc.no_auth)
+ return;
+
+ if (d->bus_client)
+ msm_bus_scale_client_update_request(d->bus_client, 0);
+ else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
+
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+ disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
+}
+
+static int pil_init_image_trusted(struct pil_desc *pil,
+ const u8 *metadata, size_t size)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ struct pas_init_image_req {
+ u32 proc;
+ u32 image_addr;
+ } request;
+ u32 scm_ret = 0;
+ void *mdata_buf;
+ dma_addr_t mdata_phys;
+ int ret;
+ DEFINE_DMA_ATTRS(attrs);
+ struct device dev = {0};
+ struct scm_desc desc = {0};
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ ret = scm_pas_enable_bw();
+ if (ret)
+ return ret;
+ dev.coherent_dma_mask =
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL,
+ &attrs);
+ if (!mdata_buf) {
+ pr_err("scm-pas: Allocation for metadata failed.\n");
+ scm_pas_disable_bw();
+ return -ENOMEM;
+ }
+
+ memcpy(mdata_buf, metadata, size);
+
+ request.proc = d->pas_id;
+ request.image_addr = mdata_phys;
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+ } else {
+ desc.args[0] = d->pas_id;
+ desc.args[1] = mdata_phys;
+ desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_RW);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD),
+ &desc);
+ scm_ret = desc.ret[0];
+ }
+
+ dma_free_attrs(&dev, size, mdata_buf, mdata_phys, &attrs);
+ scm_pas_disable_bw();
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+
+static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+ size_t size)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ struct pas_init_image_req {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } request;
+ u32 scm_ret = 0;
+ int ret;
+ struct scm_desc desc = {0};
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ request.proc = d->pas_id;
+ request.start_addr = addr;
+ request.len = size;
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+ } else {
+ desc.args[0] = d->pas_id;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = SCM_ARGS(3);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+ &desc);
+ scm_ret = desc.ret[0];
+ }
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+
+static int pil_auth_and_reset(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ int rc;
+ u32 proc, scm_ret = 0;
+ struct scm_desc desc = {0};
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ desc.args[0] = proc = d->pas_id;
+ desc.arginfo = SCM_ARGS(1);
+
+ rc = enable_regulators(d, pil->dev, d->regs, d->reg_count, false);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->clks, d->clk_count);
+ if (rc)
+ goto err_clks;
+
+ rc = scm_pas_enable_bw();
+ if (rc)
+ goto err_reset;
+
+ if (!is_scm_armv8()) {
+ rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
+ sizeof(proc), &scm_ret, sizeof(scm_ret));
+ } else {
+ rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ PAS_AUTH_AND_RESET_CMD), &desc);
+ scm_ret = desc.ret[0];
+ }
+ scm_pas_disable_bw();
+ if (rc)
+ goto err_reset;
+
+ return scm_ret;
+err_reset:
+ disable_unprepare_clocks(d->clks, d->clk_count);
+err_clks:
+ disable_regulators(d, d->regs, d->reg_count, false);
+
+ return rc;
+}
+
+static int pil_shutdown_trusted(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ u32 proc, scm_ret = 0;
+ int rc;
+ struct scm_desc desc = {0};
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ desc.args[0] = proc = d->pas_id;
+ desc.arginfo = SCM_ARGS(1);
+
+ rc = enable_regulators(d, pil->dev, d->proxy_regs,
+ d->proxy_reg_count, true);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+ d->proxy_clk_count);
+ if (rc)
+ goto err_clks;
+
+ if (!is_scm_armv8()) {
+ rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc,
+ sizeof(proc), &scm_ret, sizeof(scm_ret));
+ } else {
+ rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_SHUTDOWN_CMD),
+ &desc);
+ scm_ret = desc.ret[0];
+ }
+
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+ disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+ if (rc)
+ return rc;
+
+ disable_unprepare_clocks(d->clks, d->clk_count);
+ disable_regulators(d, d->regs, d->reg_count, false);
+
+ return scm_ret;
+err_clks:
+ disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+ return rc;
+}
+
+static struct pil_reset_ops pil_ops_trusted = {
+ .init_image = pil_init_image_trusted,
+ .mem_setup = pil_mem_setup_trusted,
+ .auth_and_reset = pil_auth_and_reset,
+ .shutdown = pil_shutdown_trusted,
+ .proxy_vote = pil_make_proxy_vote,
+ .proxy_unvote = pil_remove_proxy_vote,
+};
+
+static void log_failure_reason(const struct pil_tz_data *d)
+{
+ u32 size;
+ char *smem_reason, reason[MAX_SSR_REASON_LEN];
+ const char *name = d->subsys_desc.name;
+
+ if (d->smem_id == -1)
+ return;
+
+ smem_reason = smem_get_entry_no_rlock(d->smem_id, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!smem_reason || !size) {
+ pr_err("%s SFR: (unknown, smem_get_entry_no_rlock failed).\n",
+ name);
+ return;
+ }
+ if (!smem_reason[0]) {
+ pr_err("%s SFR: (unknown, empty string found).\n", name);
+ return;
+ }
+
+ strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+ pr_err("%s subsystem failure reason: %s.\n", name, reason);
+
+ smem_reason[0] = '\0';
+ wmb();
+}
+
+static int subsys_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+ int ret;
+
+ if (!subsys_get_crash_status(d->subsys) && force_stop &&
+ subsys->force_stop_gpio) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ ret = wait_for_completion_timeout(&d->stop_ack,
+ msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+ if (!ret)
+ pr_warn("Timed out on stop ack from %s.\n",
+ subsys->name);
+ gpio_set_value(subsys->force_stop_gpio, 0);
+ }
+
+ pil_shutdown(&d->desc);
+ return 0;
+}
+
+static int subsys_powerup(const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+ int ret = 0;
+
+ if (subsys->stop_ack_irq)
+ reinit_completion(&d->stop_ack);
+
+ d->desc.fw_name = subsys->fw_name;
+ ret = pil_boot(&d->desc);
+
+ return ret;
+}
+
+static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+
+ if (!enable)
+ return 0;
+
+ return pil_do_ramdump(&d->desc, d->ramdump_dev);
+}
+
+static void subsys_free_memory(const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+
+ pil_free_memory(&d->desc);
+}
+
+static void subsys_crash_shutdown(const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+
+ if (subsys->force_stop_gpio > 0 &&
+ !subsys_get_crash_status(d->subsys)) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ mdelay(CRASH_STOP_ACK_TO_MS);
+ }
+}
+
+static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ pr_err("Fatal error on %s!\n", d->subsys_desc.name);
+ if (subsys_get_crash_status(d->subsys)) {
+ pr_err("%s: Ignoring error fatal, restart in progress\n",
+ d->subsys_desc.name);
+ return IRQ_HANDLED;
+ }
+ subsys_set_crash_status(d->subsys, true);
+ log_failure_reason(d);
+ subsystem_restart_dev(d->subsys);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ if (subsys_get_crash_status(d->subsys))
+ return IRQ_HANDLED;
+ pr_err("Watchdog bite received from %s!\n", d->subsys_desc.name);
+
+ if (d->subsys_desc.system_debug &&
+ !gpio_get_value(d->subsys_desc.err_fatal_gpio))
+ panic("%s: System ramdump requested. Triggering device restart!\n",
+ __func__);
+ subsys_set_crash_status(d->subsys, true);
+ log_failure_reason(d);
+ subsystem_restart_dev(d->subsys);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_stop_ack_intr_handler(int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ pr_info("Received stop ack interrupt from %s\n", d->subsys_desc.name);
+ complete(&d->stop_ack);
+ return IRQ_HANDLED;
+}
+
+static int pil_tz_driver_probe(struct platform_device *pdev)
+{
+ struct pil_tz_data *d;
+ u32 proxy_timeout;
+ int len, rc;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, d);
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,pil-no-auth"))
+ d->subsys_desc.no_auth = true;
+
+ d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
+ "qcom,keep-proxy-regs-on");
+
+ rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+ &d->desc.name);
+ if (rc)
+ return rc;
+
+ /* Defaulting smem_id to be not present */
+ d->smem_id = -1;
+
+ if (of_find_property(pdev->dev.of_node, "qcom,smem-id", &len)) {
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+ &d->smem_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get the smem_id.\n");
+ return rc;
+ }
+ }
+
+ d->desc.dev = &pdev->dev;
+ d->desc.owner = THIS_MODULE;
+ d->desc.ops = &pil_ops_trusted;
+
+ d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
+ &proxy_timeout);
+ if (!rc)
+ d->desc.proxy_timeout = proxy_timeout;
+
+ if (!d->subsys_desc.no_auth) {
+ rc = piltz_resc_init(pdev, d);
+ if (rc)
+ return -ENOENT;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
+ &d->pas_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to find the pas_id.\n");
+ return rc;
+ }
+ scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+ }
+
+ rc = pil_desc_init(&d->desc);
+ if (rc)
+ return rc;
+
+ init_completion(&d->stop_ack);
+
+ d->subsys_desc.name = d->desc.name;
+ d->subsys_desc.owner = THIS_MODULE;
+ d->subsys_desc.dev = &pdev->dev;
+ d->subsys_desc.shutdown = subsys_shutdown;
+ d->subsys_desc.powerup = subsys_powerup;
+ d->subsys_desc.ramdump = subsys_ramdump;
+ d->subsys_desc.free_memory = subsys_free_memory;
+ d->subsys_desc.crash_shutdown = subsys_crash_shutdown;
+ d->subsys_desc.err_fatal_handler = subsys_err_fatal_intr_handler;
+ d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
+ d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
+
+ d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
+ &pdev->dev);
+ if (!d->ramdump_dev) {
+ rc = -ENOMEM;
+ goto err_ramdump;
+ }
+
+ d->subsys = subsys_register(&d->subsys_desc);
+ if (IS_ERR(d->subsys)) {
+ rc = PTR_ERR(d->subsys);
+ goto err_subsys;
+ }
+
+ return 0;
+err_subsys:
+ destroy_ramdump_device(d->ramdump_dev);
+err_ramdump:
+ pil_desc_release(&d->desc);
+
+ return rc;
+}
+
+static int pil_tz_driver_exit(struct platform_device *pdev)
+{
+ struct pil_tz_data *d = platform_get_drvdata(pdev);
+
+ subsys_unregister(d->subsys);
+ destroy_ramdump_device(d->ramdump_dev);
+ pil_desc_release(&d->desc);
+
+ return 0;
+}
+
+static struct of_device_id pil_tz_match_table[] = {
+ {.compatible = "qcom,pil-tz-generic"},
+ {}
+};
+
+static struct platform_driver pil_tz_driver = {
+ .probe = pil_tz_driver_probe,
+ .remove = pil_tz_driver_exit,
+ .driver = {
+ .name = "subsys-pil-tz",
+ .of_match_table = pil_tz_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pil_tz_init(void)
+{
+ return platform_driver_register(&pil_tz_driver);
+}
+module_init(pil_tz_init);
+
+static void __exit pil_tz_exit(void)
+{
+ platform_driver_unregister(&pil_tz_driver);
+}
+module_exit(pil_tz_exit);
+
+MODULE_DESCRIPTION("Support for booting subsystems");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
new file mode 100644
index 000000000000..431bbd8cee6f
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -0,0 +1,222 @@
+/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem Notifier -- Provides notifications
+ * of subsys events.
+ *
+ * Use subsys_notif_register_notifier to register for notifications
+ * and subsys_notif_queue_notification to send notifications.
+ *
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+
+
+struct subsys_notif_info {
+ char name[50];
+ struct srcu_notifier_head subsys_notif_rcvr_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(notif_lock);
+static DEFINE_MUTEX(notif_add_lock);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static void subsys_notif_reg_test_notifier(const char *);
+#endif
+
+static struct subsys_notif_info *_notif_find_subsys(const char *subsys_name)
+{
+ struct subsys_notif_info *subsys;
+
+ mutex_lock(&notif_lock);
+ list_for_each_entry(subsys, &subsystem_list, list)
+ if (!strncmp(subsys->name, subsys_name,
+ ARRAY_SIZE(subsys->name))) {
+ mutex_unlock(&notif_lock);
+ return subsys;
+ }
+ mutex_unlock(&notif_lock);
+
+ return NULL;
+}
+
+void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb)
+{
+ int ret;
+ struct subsys_notif_info *subsys = _notif_find_subsys(subsys_name);
+
+ if (!subsys) {
+
+ /* Possible first time reference to this subsystem. Add it. */
+ subsys = (struct subsys_notif_info *)
+ subsys_notif_add_subsys(subsys_name);
+
+ if (!subsys)
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = srcu_notifier_chain_register(
+ &subsys->subsys_notif_rcvr_list, nb);
+
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_register_notifier);
+
+int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct subsys_notif_info *subsys =
+ (struct subsys_notif_info *)subsys_handle;
+
+ if (!subsys)
+ return -EINVAL;
+
+ ret = srcu_notifier_chain_unregister(
+ &subsys->subsys_notif_rcvr_list, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL(subsys_notif_unregister_notifier);
+
+void *subsys_notif_add_subsys(const char *subsys_name)
+{
+ struct subsys_notif_info *subsys = NULL;
+
+ if (!subsys_name)
+ goto done;
+
+ mutex_lock(&notif_add_lock);
+
+ subsys = _notif_find_subsys(subsys_name);
+
+ if (subsys) {
+ mutex_unlock(&notif_add_lock);
+ goto done;
+ }
+
+ subsys = kmalloc(sizeof(struct subsys_notif_info), GFP_KERNEL);
+
+ if (!subsys) {
+ mutex_unlock(&notif_add_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ strlcpy(subsys->name, subsys_name, ARRAY_SIZE(subsys->name));
+
+ srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
+
+ INIT_LIST_HEAD(&subsys->list);
+
+ mutex_lock(&notif_lock);
+ list_add_tail(&subsys->list, &subsystem_list);
+ mutex_unlock(&notif_lock);
+
+ #if defined(SUBSYS_RESTART_DEBUG)
+ subsys_notif_reg_test_notifier(subsys->name);
+ #endif
+
+ mutex_unlock(&notif_add_lock);
+
+done:
+ return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_add_subsys);
+
+int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data)
+{
+ int ret = 0;
+ struct subsys_notif_info *subsys =
+ (struct subsys_notif_info *) subsys_handle;
+
+ if (!subsys)
+ return -EINVAL;
+
+ if (notif_type < 0 || notif_type >= SUBSYS_NOTIF_TYPE_COUNT)
+ return -EINVAL;
+
+ ret = srcu_notifier_call_chain(
+ &subsys->subsys_notif_rcvr_list, notif_type,
+ data);
+ return ret;
+}
+EXPORT_SYMBOL(subsys_notif_queue_notification);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static const char *notif_to_string(enum subsys_notif_type notif_type)
+{
+ switch (notif_type) {
+
+ case SUBSYS_BEFORE_SHUTDOWN:
+ return __stringify(SUBSYS_BEFORE_SHUTDOWN);
+
+ case SUBSYS_AFTER_SHUTDOWN:
+ return __stringify(SUBSYS_AFTER_SHUTDOWN);
+
+ case SUBSYS_BEFORE_POWERUP:
+ return __stringify(SUBSYS_BEFORE_POWERUP);
+
+ case SUBSYS_AFTER_POWERUP:
+ return __stringify(SUBSYS_AFTER_POWERUP);
+
+ default:
+ return "unknown";
+ }
+}
+
+static int subsys_notifier_test_call(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ switch (code) {
+
+ default:
+ printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+ __func__, notif_to_string(code), data);
+ break;
+
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = subsys_notifier_test_call,
+};
+
+static void subsys_notif_reg_test_notifier(const char *subsys_name)
+{
+ void *handle = subsys_notif_register_notifier(subsys_name, &nb);
+ printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+ __func__, handle);
+}
+#endif
+
+MODULE_DESCRIPTION("Subsystem Restart Notifier");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
new file mode 100644
index 000000000000..4175c98c065a
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -0,0 +1,1772 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+
+#include <asm/current.h>
+
+#define DISABLE_SSR 0x9889deed
+/* If set to 0x9889deed, call to subsystem_restart_dev() returns immediately */
+static uint disable_restart_work;
+module_param(disable_restart_work, uint, S_IRUGO | S_IWUSR);
+
+static int enable_debug;
+module_param(enable_debug, int, S_IRUGO | S_IWUSR);
+
+/* The maximum shutdown timeout is the product of MAX_LOOPS and DELAY_MS. */
+#define SHUTDOWN_ACK_MAX_LOOPS 100
+#define SHUTDOWN_ACK_DELAY_MS 100
+
+/**
+ * enum p_subsys_state - state of a subsystem (private)
+ * @SUBSYS_NORMAL: subsystem is operating normally
+ * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
+ * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
+ *
+ * The 'private' side of the subsytem state used to determine where in the
+ * restart process the subsystem is.
+ */
+enum p_subsys_state {
+ SUBSYS_NORMAL,
+ SUBSYS_CRASHED,
+ SUBSYS_RESTARTING,
+};
+
+/**
+ * enum subsys_state - state of a subsystem (public)
+ * @SUBSYS_OFFLINE: subsystem is offline
+ * @SUBSYS_ONLINE: subsystem is online
+ *
+ * The 'public' side of the subsytem state, exposed to userspace.
+ */
+enum subsys_state {
+ SUBSYS_OFFLINE,
+ SUBSYS_ONLINE,
+};
+
+static const char * const subsys_states[] = {
+ [SUBSYS_OFFLINE] = "OFFLINE",
+ [SUBSYS_ONLINE] = "ONLINE",
+};
+
+static const char * const restart_levels[] = {
+ [RESET_SOC] = "SYSTEM",
+ [RESET_SUBSYS_COUPLED] = "RELATED",
+};
+
+/**
+ * struct subsys_tracking - track state of a subsystem or restart order
+ * @p_state: private state of subsystem/order
+ * @state: public state of subsystem/order
+ * @s_lock: protects p_state
+ * @lock: protects subsystem/order callbacks and state
+ *
+ * Tracks the state of a subsystem or a set of subsystems (restart order).
+ * Doing this avoids the need to grab each subsystem's lock and update
+ * each subsystems state when restarting an order.
+ */
+struct subsys_tracking {
+ enum p_subsys_state p_state;
+ spinlock_t s_lock;
+ enum subsys_state state;
+ struct mutex lock;
+};
+
+/**
+ * struct subsys_soc_restart_order - subsystem restart order
+ * @subsystem_list: names of subsystems in this restart order
+ * @count: number of subsystems in order
+ * @track: state tracking and locking
+ * @subsys_ptrs: pointers to subsystems in this restart order
+ */
+struct subsys_soc_restart_order {
+ struct device_node **device_ptrs;
+ int count;
+
+ struct subsys_tracking track;
+ struct subsys_device **subsys_ptrs;
+ struct list_head list;
+};
+
+struct restart_log {
+ struct timeval time;
+ struct subsys_device *dev;
+ struct list_head list;
+};
+
+/**
+ * struct subsys_device - subsystem device
+ * @desc: subsystem descriptor
+ * @work: context for subsystem_restart_wq_func() for this device
+ * @ssr_wlock: prevents suspend during subsystem_restart()
+ * @wlname: name of wakeup source
+ * @device_restart_work: work struct for device restart
+ * @track: state tracking and locking
+ * @notify: subsys notify handle
+ * @dev: device
+ * @owner: module that provides @desc
+ * @count: reference count of subsystem_get()/subsystem_put()
+ * @id: ida
+ * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
+ * @restart_order: order of other devices this devices restarts with
+ * @crash_count: number of times the device has crashed
+ * @dentry: debugfs directory for this device
+ * @do_ramdump_on_put: ramdump on subsystem_put() if true
+ * @err_ready: completion variable to record error ready from subsystem
+ * @crashed: indicates if subsystem has crashed
+ * @notif_state: current state of subsystem in terms of subsys notifications
+ */
+struct subsys_device {
+ struct subsys_desc *desc;
+ struct work_struct work;
+ struct wakeup_source ssr_wlock;
+ char wlname[64];
+ struct work_struct device_restart_work;
+ struct subsys_tracking track;
+
+ void *notify;
+ struct device dev;
+ struct module *owner;
+ int count;
+ int id;
+ int restart_level;
+ int crash_count;
+ struct subsys_soc_restart_order *restart_order;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
+ bool do_ramdump_on_put;
+ struct cdev char_dev;
+ dev_t dev_no;
+ struct completion err_ready;
+ bool crashed;
+ int notif_state;
+ struct list_head list;
+};
+
+static struct subsys_device *to_subsys(struct device *d)
+{
+ return container_of(d, struct subsys_device, dev);
+}
+
+static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
+{
+ struct subsys_soc_restart_order *order = subsys->restart_order;
+
+ if (order)
+ return &order->track;
+ else
+ return &subsys->track;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ enum subsys_state state = to_subsys(dev)->track.state;
+ return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
+}
+
+static ssize_t crash_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
+}
+
+static ssize_t
+restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int level = to_subsys(dev)->restart_level;
+ return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
+}
+
+static ssize_t restart_level_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ const char *p;
+ int i, orig_count = count;
+
+ p = memchr(buf, '\n', count);
+ if (p)
+ count = p - buf;
+
+ for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
+ if (!strncasecmp(buf, restart_levels[i], count)) {
+ subsys->restart_level = i;
+ return orig_count;
+ }
+ return -EPERM;
+}
+
+static ssize_t firmware_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->fw_name);
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ struct subsys_tracking *track = subsys_get_track(subsys);
+ const char *p;
+ int orig_count = count;
+
+ p = memchr(buf, '\n', count);
+ if (p)
+ count = p - buf;
+
+ pr_info("Changing subsys fw_name to %s\n", buf);
+ mutex_lock(&track->lock);
+ strlcpy(subsys->desc->fw_name, buf, count + 1);
+ mutex_unlock(&track->lock);
+ return orig_count;
+}
+
+static ssize_t system_debug_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ char p[6] = "set";
+
+ if (!subsys->desc->system_debug)
+ strlcpy(p, "reset", sizeof(p));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", p);
+}
+
+static ssize_t system_debug_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ const char *p;
+ int orig_count = count;
+
+ p = memchr(buf, '\n', count);
+ if (p)
+ count = p - buf;
+
+ if (!strncasecmp(buf, "set", count))
+ subsys->desc->system_debug = true;
+ else if (!strncasecmp(buf, "reset", count))
+ subsys->desc->system_debug = false;
+ else
+ return -EPERM;
+ return orig_count;
+}
+
+int subsys_get_restart_level(struct subsys_device *dev)
+{
+ return dev->restart_level;
+}
+EXPORT_SYMBOL(subsys_get_restart_level);
+
+static void subsys_set_state(struct subsys_device *subsys,
+ enum subsys_state state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&subsys->track.s_lock, flags);
+ if (subsys->track.state != state) {
+ subsys->track.state = state;
+ spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+ sysfs_notify(&subsys->dev.kobj, NULL, "state");
+ return;
+ }
+ spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+}
+
+/**
+ * subsytem_default_online() - Mark a subsystem as online by default
+ * @dev: subsystem to mark as online
+ *
+ * Marks a subsystem as "online" without increasing the reference count
+ * on the subsystem. This is typically used by subsystems that are already
+ * online when the kernel boots up.
+ */
+void subsys_default_online(struct subsys_device *dev)
+{
+ subsys_set_state(dev, SUBSYS_ONLINE);
+}
+EXPORT_SYMBOL(subsys_default_online);
+
+static struct device_attribute subsys_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(state),
+ __ATTR_RO(crash_count),
+ __ATTR(restart_level, 0644, restart_level_show, restart_level_store),
+ __ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
+ __ATTR(system_debug, 0644, system_debug_show, system_debug_store),
+ __ATTR_NULL,
+};
+
+static struct bus_type subsys_bus_type = {
+ .name = "msm_subsys",
+ .dev_attrs = subsys_attrs,
+};
+
+static DEFINE_IDA(subsys_ida);
+
+static int enable_ramdumps;
+module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
+
+static int enable_mini_ramdumps;
+module_param(enable_mini_ramdumps, int, S_IRUGO | S_IWUSR);
+
+struct workqueue_struct *ssr_wq;
+static struct class *char_class;
+
+static LIST_HEAD(restart_log_list);
+static LIST_HEAD(subsys_list);
+static LIST_HEAD(ssr_order_list);
+static DEFINE_MUTEX(soc_order_reg_lock);
+static DEFINE_MUTEX(restart_log_mutex);
+static DEFINE_MUTEX(subsys_list_lock);
+static DEFINE_MUTEX(char_device_lock);
+static DEFINE_MUTEX(ssr_order_mutex);
+
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
+{
+ int i;
+ struct subsys_soc_restart_order *order;
+ struct device_node *device = dev->desc->dev->of_node;
+
+ mutex_lock(&soc_order_reg_lock);
+ list_for_each_entry(order, &ssr_order_list, list) {
+ for (i = 0; i < order->count; i++) {
+ if (order->device_ptrs[i] == device) {
+ order->subsys_ptrs[i] = dev;
+ goto found;
+ }
+ }
+ }
+ order = NULL;
+found:
+ mutex_unlock(&soc_order_reg_lock);
+
+ return order;
+}
+
+static int max_restarts;
+module_param(max_restarts, int, 0644);
+
+static long max_history_time = 3600;
+module_param(max_history_time, long, 0644);
+
+static void do_epoch_check(struct subsys_device *dev)
+{
+ int n = 0;
+ struct timeval *time_first = NULL, *curr_time;
+ struct restart_log *r_log, *temp;
+ static int max_restarts_check;
+ static long max_history_time_check;
+
+ mutex_lock(&restart_log_mutex);
+
+ max_restarts_check = max_restarts;
+ max_history_time_check = max_history_time;
+
+ /* Check if epoch checking is enabled */
+ if (!max_restarts_check)
+ goto out;
+
+ r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
+ if (!r_log)
+ goto out;
+ r_log->dev = dev;
+ do_gettimeofday(&r_log->time);
+ curr_time = &r_log->time;
+ INIT_LIST_HEAD(&r_log->list);
+
+ list_add_tail(&r_log->list, &restart_log_list);
+
+ list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
+
+ if ((curr_time->tv_sec - r_log->time.tv_sec) >
+ max_history_time_check) {
+
+ pr_debug("Deleted node with restart_time = %ld\n",
+ r_log->time.tv_sec);
+ list_del(&r_log->list);
+ kfree(r_log);
+ continue;
+ }
+ if (!n) {
+ time_first = &r_log->time;
+ pr_debug("Time_first: %ld\n", time_first->tv_sec);
+ }
+ n++;
+ pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
+ }
+
+ if (time_first && n >= max_restarts_check) {
+ if ((curr_time->tv_sec - time_first->tv_sec) <
+ max_history_time_check)
+ panic("Subsystems have crashed %d times in less than "
+ "%ld seconds!", max_restarts_check,
+ max_history_time_check);
+ }
+
+out:
+ mutex_unlock(&restart_log_mutex);
+}
+
+static int is_ramdump_enabled(struct subsys_device *dev)
+{
+ if (dev->desc->ramdump_disable_gpio)
+ return !dev->desc->ramdump_disable;
+
+ return enable_ramdumps;
+}
+
+static void send_sysmon_notif(struct subsys_device *dev)
+{
+ struct subsys_device *subsys;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(subsys, &subsys_list, list)
+ if ((subsys->notif_state > 0) && (subsys != dev))
+ sysmon_send_event(dev->desc, subsys->desc,
+ subsys->notif_state);
+ mutex_unlock(&subsys_list_lock);
+}
+
+static void for_each_subsys_device(struct subsys_device **list, unsigned count,
+ void *data, void (*fn)(struct subsys_device *, void *))
+{
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ if (!dev)
+ continue;
+ fn(dev, data);
+ }
+}
+
+static void notify_each_subsys_device(struct subsys_device **list,
+ unsigned count,
+ enum subsys_notif_type notif, void *data)
+{
+ struct subsys_device *subsys;
+
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ struct notif_data notif_data;
+ struct platform_device *pdev;
+
+ if (!dev)
+ continue;
+
+ pdev = container_of(dev->desc->dev, struct platform_device,
+ dev);
+ dev->notif_state = notif;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(subsys, &subsys_list, list)
+ if (dev != subsys &&
+ subsys->track.state == SUBSYS_ONLINE)
+ sysmon_send_event(subsys->desc, dev->desc,
+ notif);
+ mutex_unlock(&subsys_list_lock);
+
+ if (notif == SUBSYS_AFTER_POWERUP &&
+ dev->track.state == SUBSYS_ONLINE)
+ send_sysmon_notif(dev);
+
+ notif_data.crashed = subsys_get_crash_status(dev);
+ notif_data.enable_ramdump = is_ramdump_enabled(dev);
+ notif_data.enable_mini_ramdumps = enable_mini_ramdumps;
+ notif_data.no_auth = dev->desc->no_auth;
+ notif_data.pdev = pdev;
+
+ subsys_notif_queue_notification(dev->notify, notif,
+ &notif_data);
+ }
+}
+
+static void enable_all_irqs(struct subsys_device *dev)
+{
+ if (dev->desc->err_ready_irq)
+ enable_irq(dev->desc->err_ready_irq);
+ if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+ enable_irq(dev->desc->wdog_bite_irq);
+ irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
+ }
+ if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+ enable_irq(dev->desc->err_fatal_irq);
+ if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+ enable_irq(dev->desc->stop_ack_irq);
+}
+
+static void disable_all_irqs(struct subsys_device *dev)
+{
+ if (dev->desc->err_ready_irq)
+ disable_irq(dev->desc->err_ready_irq);
+ if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+ disable_irq(dev->desc->wdog_bite_irq);
+ irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
+ }
+ if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+ disable_irq(dev->desc->err_fatal_irq);
+ if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+ disable_irq(dev->desc->stop_ack_irq);
+}
+
+int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+ int count;
+
+ if (desc && !desc->shutdown_ack_gpio)
+ return 0;
+
+ for (count = SHUTDOWN_ACK_MAX_LOOPS; count > 0; count--) {
+ if (gpio_get_value(desc->shutdown_ack_gpio))
+ return count;
+ msleep(SHUTDOWN_ACK_DELAY_MS);
+ }
+
+ pr_err("[%s]: Timed out waiting for shutdown ack\n", desc->name);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(wait_for_shutdown_ack);
+
+static int wait_for_err_ready(struct subsys_device *subsys)
+{
+ int ret;
+
+ if (!subsys->desc->err_ready_irq || enable_debug == 1)
+ return 0;
+
+ ret = wait_for_completion_timeout(&subsys->err_ready,
+ msecs_to_jiffies(10000));
+ if (!ret) {
+ pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Shutting down %s\n", current, name);
+ if (dev->desc->shutdown(dev->desc, true) < 0)
+ panic("subsys-restart: [%p]: Failed to shutdown %s!",
+ current, name);
+ dev->crash_count++;
+ subsys_set_state(dev, SUBSYS_OFFLINE);
+ disable_all_irqs(dev);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ if (dev->desc->ramdump)
+ if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
+ pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+ dev->do_ramdump_on_put = false;
+}
+
+static void subsystem_free_memory(struct subsys_device *dev, void *data)
+{
+ if (dev->desc->free_memory)
+ dev->desc->free_memory(dev->desc);
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+ int ret;
+
+ pr_info("[%p]: Powering up %s\n", current, name);
+ init_completion(&dev->err_ready);
+
+ if (dev->desc->powerup(dev->desc) < 0) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ panic("[%p]: Powerup error: %s!", current, name);
+ }
+ enable_all_irqs(dev);
+
+ ret = wait_for_err_ready(dev);
+ if (ret) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ panic("[%p]: Timed out waiting for error ready: %s!",
+ current, name);
+ }
+ subsys_set_state(dev, SUBSYS_ONLINE);
+ subsys_set_crash_status(dev, false);
+}
+
+static int __find_subsys(struct device *dev, void *data)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ return !strcmp(subsys->desc->name, data);
+}
+
+static struct subsys_device *find_subsys(const char *str)
+{
+ struct device *dev;
+
+ if (!str)
+ return NULL;
+
+ dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
+ __find_subsys);
+ return dev ? to_subsys(dev) : NULL;
+}
+
+static int subsys_start(struct subsys_device *subsys)
+{
+ int ret;
+
+ notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
+ NULL);
+
+ init_completion(&subsys->err_ready);
+ ret = subsys->desc->powerup(subsys->desc);
+ if (ret) {
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ return ret;
+ }
+ enable_all_irqs(subsys);
+
+ if (subsys->desc->is_not_loadable) {
+ subsys_set_state(subsys, SUBSYS_ONLINE);
+ return 0;
+ }
+
+ ret = wait_for_err_ready(subsys);
+ if (ret) {
+ /* pil-boot succeeded but we need to shutdown
+ * the device because error ready timed out.
+ */
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ subsys->desc->shutdown(subsys->desc, false);
+ disable_all_irqs(subsys);
+ return ret;
+ } else {
+ subsys_set_state(subsys, SUBSYS_ONLINE);
+ }
+
+ notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
+ NULL);
+ return ret;
+}
+
+static void subsys_stop(struct subsys_device *subsys)
+{
+ const char *name = subsys->desc->name;
+
+ if (!of_property_read_bool(subsys->desc->dev->of_node,
+ "qcom,pil-force-shutdown")) {
+ subsys->desc->sysmon_shutdown_ret =
+ sysmon_send_shutdown(subsys->desc);
+ if (subsys->desc->sysmon_shutdown_ret)
+ pr_debug("Graceful shutdown failed for %s\n", name);
+ }
+
+ notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
+ subsys->desc->shutdown(subsys->desc, false);
+ subsys_set_state(subsys, SUBSYS_OFFLINE);
+ disable_all_irqs(subsys);
+ notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+
+void *__subsystem_get(const char *name, const char *fw_name)
+{
+ struct subsys_device *subsys;
+ struct subsys_device *subsys_d;
+ int ret;
+ void *retval;
+ struct subsys_tracking *track;
+
+ if (!name)
+ return NULL;
+
+ subsys = retval = find_subsys(name);
+ if (!subsys)
+ return ERR_PTR(-ENODEV);
+ if (!try_module_get(subsys->owner)) {
+ retval = ERR_PTR(-ENODEV);
+ goto err_module;
+ }
+
+ subsys_d = subsystem_get(subsys->desc->depends_on);
+ if (IS_ERR(subsys_d)) {
+ retval = subsys_d;
+ goto err_depends;
+ }
+
+ track = subsys_get_track(subsys);
+ mutex_lock(&track->lock);
+ if (!subsys->count) {
+ if (fw_name) {
+ pr_info("Changing subsys fw_name to %s\n", fw_name);
+ strlcpy(subsys->desc->fw_name, fw_name,
+ sizeof(subsys->desc->fw_name));
+ }
+ ret = subsys_start(subsys);
+ if (ret) {
+ retval = ERR_PTR(ret);
+ goto err_start;
+ }
+ }
+ subsys->count++;
+ mutex_unlock(&track->lock);
+ return retval;
+err_start:
+ mutex_unlock(&track->lock);
+ subsystem_put(subsys_d);
+err_depends:
+ module_put(subsys->owner);
+err_module:
+ put_device(&subsys->dev);
+ return retval;
+}
+
+/**
+ * subsytem_get() - Boot a subsystem
+ * @name: pointer to a string containing the name of the subsystem to boot
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get(const char *name)
+{
+ return __subsystem_get(name, NULL);
+}
+EXPORT_SYMBOL(subsystem_get);
+
+/**
+ * subsystem_get_with_fwname() - Boot a subsystem using the firmware name passed in
+ * @name: pointer to a string containing the name of the subsystem to boot
+ * @fw_name: pointer to a string containing the subsystem firmware image name
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get_with_fwname(const char *name, const char *fw_name)
+{
+ return __subsystem_get(name, fw_name);
+}
+EXPORT_SYMBOL(subsystem_get_with_fwname);
+
+/**
+ * subsystem_put() - Shutdown a subsystem
+ * @peripheral_handle: pointer from a previous call to subsystem_get()
+ *
+ * This doesn't imply that a subsystem is shutdown until all callers of
+ * subsystem_get() have called subsystem_put().
+ */
+void subsystem_put(void *subsystem)
+{
+ struct subsys_device *subsys_d, *subsys = subsystem;
+ struct subsys_tracking *track;
+
+ if (IS_ERR_OR_NULL(subsys))
+ return;
+
+ track = subsys_get_track(subsys);
+ mutex_lock(&track->lock);
+ if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
+ subsys->desc->name, __func__))
+ goto err_out;
+ if (!--subsys->count) {
+ subsys_stop(subsys);
+ if (subsys->do_ramdump_on_put)
+ subsystem_ramdump(subsys, NULL);
+ subsystem_free_memory(subsys, NULL);
+ }
+ mutex_unlock(&track->lock);
+
+ subsys_d = find_subsys(subsys->desc->depends_on);
+ if (subsys_d) {
+ subsystem_put(subsys_d);
+ put_device(&subsys_d->dev);
+ }
+ module_put(subsys->owner);
+ put_device(&subsys->dev);
+ return;
+err_out:
+ mutex_unlock(&track->lock);
+}
+EXPORT_SYMBOL(subsystem_put);
+
+static void subsystem_restart_wq_func(struct work_struct *work)
+{
+ struct subsys_device *dev = container_of(work,
+ struct subsys_device, work);
+ struct subsys_device **list;
+ struct subsys_desc *desc = dev->desc;
+ struct subsys_soc_restart_order *order = dev->restart_order;
+ struct subsys_tracking *track;
+ unsigned count;
+ unsigned long flags;
+
+ /*
+ * It's OK to not take the registration lock at this point.
+ * This is because the subsystem list inside the relevant
+ * restart order is not being traversed.
+ */
+ if (order) {
+ list = order->subsys_ptrs;
+ count = order->count;
+ track = &order->track;
+ } else {
+ list = &dev;
+ count = 1;
+ track = &dev->track;
+ }
+
+ mutex_lock(&track->lock);
+ do_epoch_check(dev);
+
+ if (dev->track.state == SUBSYS_OFFLINE) {
+ mutex_unlock(&track->lock);
+ WARN(1, "SSR aborted: %s subsystem not online\n", desc->name);
+ return;
+ }
+
+ /*
+ * It's necessary to take the registration lock because the subsystem
+ * list in the SoC restart order will be traversed and it shouldn't be
+ * changed until _this_ restart sequence completes.
+ */
+ mutex_lock(&soc_order_reg_lock);
+
+ pr_debug("[%p]: Starting restart sequence for %s\n", current,
+ desc->name);
+ notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
+ for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
+
+ notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
+ NULL);
+
+ spin_lock_irqsave(&track->s_lock, flags);
+ track->p_state = SUBSYS_RESTARTING;
+ spin_unlock_irqrestore(&track->s_lock, flags);
+
+ /* Collect ram dumps for all subsystems in order here */
+ for_each_subsys_device(list, count, NULL, subsystem_ramdump);
+
+ for_each_subsys_device(list, count, NULL, subsystem_free_memory);
+
+ notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
+ for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
+
+ pr_info("[%p]: Restart sequence for %s completed.\n",
+ current, desc->name);
+
+ mutex_unlock(&soc_order_reg_lock);
+ mutex_unlock(&track->lock);
+
+ spin_lock_irqsave(&track->s_lock, flags);
+ track->p_state = SUBSYS_NORMAL;
+ __pm_relax(&dev->ssr_wlock);
+ spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void __subsystem_restart_dev(struct subsys_device *dev)
+{
+ struct subsys_desc *desc = dev->desc;
+ const char *name = dev->desc->name;
+ struct subsys_tracking *track;
+ unsigned long flags;
+
+ pr_debug("Restarting %s [level=%s]!\n", desc->name,
+ restart_levels[dev->restart_level]);
+
+ track = subsys_get_track(dev);
+ /*
+ * Allow drivers to call subsystem_restart{_dev}() as many times as
+ * they want up until the point where the subsystem is shutdown.
+ */
+ spin_lock_irqsave(&track->s_lock, flags);
+ if (track->p_state != SUBSYS_CRASHED &&
+ dev->track.state == SUBSYS_ONLINE) {
+ if (track->p_state != SUBSYS_RESTARTING) {
+ track->p_state = SUBSYS_CRASHED;
+ __pm_stay_awake(&dev->ssr_wlock);
+ queue_work(ssr_wq, &dev->work);
+ } else {
+ panic("Subsystem %s crashed during SSR!", name);
+ }
+ } else
+ WARN(dev->track.state == SUBSYS_OFFLINE,
+ "SSR aborted: %s subsystem not online\n", name);
+ spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void device_restart_work_hdlr(struct work_struct *work)
+{
+ struct subsys_device *dev = container_of(work, struct subsys_device,
+ device_restart_work);
+
+ notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
+ /*
+ * Temporary workaround until ramdump userspace application calls
+ * sync() and fclose() on attempting the dump.
+ */
+ msleep(100);
+ panic("subsys-restart: Resetting the SoC - %s crashed.",
+ dev->desc->name);
+}
+
+int subsystem_restart_dev(struct subsys_device *dev)
+{
+ const char *name;
+
+ if (!get_device(&dev->dev))
+ return -ENODEV;
+
+ if (!try_module_get(dev->owner)) {
+ put_device(&dev->dev);
+ return -ENODEV;
+ }
+
+ name = dev->desc->name;
+
+ /*
+ * If a system reboot/shutdown is underway, ignore subsystem errors.
+ * However, print a message so that we know that a subsystem behaved
+ * unexpectedly here.
+ */
+ if (system_state == SYSTEM_RESTART
+ || system_state == SYSTEM_POWER_OFF) {
+ pr_err("%s crashed during a system poweroff/shutdown.\n", name);
+ return -EBUSY;
+ }
+
+ pr_info("Restart sequence requested for %s, restart_level = %s.\n",
+ name, restart_levels[dev->restart_level]);
+
+ if (WARN(disable_restart_work == DISABLE_SSR,
+ "subsys-restart: Ignoring restart request for %s.\n", name)) {
+ return 0;
+ }
+
+ switch (dev->restart_level) {
+
+ case RESET_SUBSYS_COUPLED:
+ __subsystem_restart_dev(dev);
+ break;
+ case RESET_SOC:
+ __pm_stay_awake(&dev->ssr_wlock);
+ schedule_work(&dev->device_restart_work);
+ return 0;
+ default:
+ panic("subsys-restart: Unknown restart level!\n");
+ break;
+ }
+ module_put(dev->owner);
+ put_device(&dev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+ int ret;
+ struct subsys_device *dev = find_subsys(name);
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = subsystem_restart_dev(dev);
+ put_device(&dev->dev);
+ return ret;
+}
+EXPORT_SYMBOL(subsystem_restart);
+
+int subsystem_crashed(const char *name)
+{
+ struct subsys_device *dev = find_subsys(name);
+ struct subsys_tracking *track;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!get_device(&dev->dev))
+ return -ENODEV;
+
+ track = subsys_get_track(dev);
+
+ mutex_lock(&track->lock);
+ dev->do_ramdump_on_put = true;
+ /*
+ * TODO: Make this work with multiple consumers where one is calling
+ * subsystem_restart() and another is calling this function. To do
+ * so would require updating private state, etc.
+ */
+ mutex_unlock(&track->lock);
+
+ put_device(&dev->dev);
+ return 0;
+}
+EXPORT_SYMBOL(subsystem_crashed);
+
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+{
+ dev->crashed = crashed;
+}
+
+bool subsys_get_crash_status(struct subsys_device *dev)
+{
+ return dev->crashed;
+}
+
+static struct subsys_device *desc_to_subsys(struct device *d)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (device->desc->dev == d)
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+ return subsys_dev;
+}
+
+void notify_proxy_vote(struct device *device)
+{
+ struct subsys_device *dev = desc_to_subsys(device);
+
+ if (dev)
+ notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
+}
+
+void notify_proxy_unvote(struct device *device)
+{
+ struct subsys_device *dev = desc_to_subsys(device);
+
+ if (dev)
+ notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int r;
+ char buf[40];
+ struct subsys_device *subsys = filp->private_data;
+
+ r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t subsys_debugfs_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct subsys_device *subsys = filp->private_data;
+ char buf[10];
+ char *cmp;
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+ cmp = strstrip(buf);
+
+ if (!strcmp(cmp, "restart")) {
+ if (subsystem_restart_dev(subsys))
+ return -EIO;
+ } else if (!strcmp(cmp, "get")) {
+ if (subsystem_get(subsys->desc->name))
+ return -EIO;
+ } else if (!strcmp(cmp, "put")) {
+ subsystem_put(subsys);
+ } else {
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static const struct file_operations subsys_debugfs_fops = {
+ .open = simple_open,
+ .read = subsys_debugfs_read,
+ .write = subsys_debugfs_write,
+};
+
+static struct dentry *subsys_base_dir;
+
+static int __init subsys_debugfs_init(void)
+{
+ subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
+ return !subsys_base_dir ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_exit(void)
+{
+ debugfs_remove_recursive(subsys_base_dir);
+}
+
+static int subsys_debugfs_add(struct subsys_device *subsys)
+{
+ if (!subsys_base_dir)
+ return -ENOMEM;
+
+ subsys->dentry = debugfs_create_file(subsys->desc->name,
+ S_IRUGO | S_IWUSR, subsys_base_dir,
+ subsys, &subsys_debugfs_fops);
+ return !subsys->dentry ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_remove(struct subsys_device *subsys)
+{
+ debugfs_remove(subsys->dentry);
+}
+#else
+static int __init subsys_debugfs_init(void) { return 0; };
+static void subsys_debugfs_exit(void) { }
+static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
+static void subsys_debugfs_remove(struct subsys_device *subsys) { }
+#endif
+
+static int subsys_device_open(struct inode *inode, struct file *file)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+ void *retval;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (MINOR(device->dev_no) == iminor(inode))
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+
+ if (!subsys_dev)
+ return -EINVAL;
+
+ retval = subsystem_get_with_fwname(subsys_dev->desc->name,
+ subsys_dev->desc->fw_name);
+ if (IS_ERR(retval))
+ return PTR_ERR(retval);
+
+ return 0;
+}
+
+static int subsys_device_close(struct inode *inode, struct file *file)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (MINOR(device->dev_no) == iminor(inode))
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+
+ if (!subsys_dev)
+ return -EINVAL;
+
+ subsystem_put(subsys_dev);
+ return 0;
+}
+
+static const struct file_operations subsys_device_fops = {
+ .owner = THIS_MODULE,
+ .open = subsys_device_open,
+ .release = subsys_device_close,
+};
+
+static void subsys_device_release(struct device *dev)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+
+ wakeup_source_trash(&subsys->ssr_wlock);
+ mutex_destroy(&subsys->track.lock);
+ ida_simple_remove(&subsys_ida, subsys->id);
+ kfree(subsys);
+}
+static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
+{
+ struct subsys_device *subsys_dev = subsys;
+ dev_info(subsys_dev->desc->dev,
+ "Subsystem error monitoring/handling services are up\n");
+
+ if (subsys_dev->desc->is_not_loadable)
+ return IRQ_HANDLED;
+
+ complete(&subsys_dev->err_ready);
+ return IRQ_HANDLED;
+}
+
+static int subsys_char_device_add(struct subsys_device *subsys_dev)
+{
+ int ret = 0;
+ static int major, minor;
+ dev_t dev_no;
+
+ mutex_lock(&char_device_lock);
+ if (!major) {
+ ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
+ if (ret < 0) {
+ pr_err("Failed to alloc subsys_dev region, err %d\n",
+ ret);
+ goto fail;
+ }
+ major = MAJOR(dev_no);
+ minor = MINOR(dev_no);
+ } else
+ dev_no = MKDEV(major, minor);
+
+ if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
+ NULL, "subsys_%s", subsys_dev->desc->name)) {
+ pr_err("Failed to create subsys_%s device\n",
+ subsys_dev->desc->name);
+ goto fail_unregister_cdev_region;
+ }
+
+ cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
+ subsys_dev->char_dev.owner = THIS_MODULE;
+ ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
+ if (ret < 0)
+ goto fail_destroy_device;
+
+ subsys_dev->dev_no = dev_no;
+ minor++;
+ mutex_unlock(&char_device_lock);
+
+ return 0;
+
+fail_destroy_device:
+ device_destroy(char_class, dev_no);
+fail_unregister_cdev_region:
+ unregister_chrdev_region(dev_no, 1);
+fail:
+ mutex_unlock(&char_device_lock);
+ return ret;
+}
+
+static void subsys_char_device_remove(struct subsys_device *subsys_dev)
+{
+ cdev_del(&subsys_dev->char_dev);
+ device_destroy(char_class, subsys_dev->dev_no);
+ unregister_chrdev_region(subsys_dev->dev_no, 1);
+}
+
+static void subsys_remove_restart_order(struct device_node *device)
+{
+ struct subsys_soc_restart_order *order;
+ int i;
+
+ mutex_lock(&ssr_order_mutex);
+ list_for_each_entry(order, &ssr_order_list, list)
+ for (i = 0; i < order->count; i++)
+ if (order->device_ptrs[i] == device)
+ order->subsys_ptrs[i] = NULL;
+ mutex_unlock(&ssr_order_mutex);
+}
+
+static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
+ subsys_desc * desc)
+{
+ int i, j, count, num = 0;
+ struct subsys_soc_restart_order *order, *tmp;
+ struct device *dev = desc->dev;
+ struct device_node *ssr_node;
+ uint32_t len;
+
+ if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
+ return NULL;
+
+ count = len/sizeof(uint32_t);
+
+ order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
+ if (!order)
+ return ERR_PTR(-ENOMEM);
+
+ order->subsys_ptrs = devm_kzalloc(dev,
+ count * sizeof(struct subsys_device *),
+ GFP_KERNEL);
+ if (!order->subsys_ptrs)
+ return ERR_PTR(-ENOMEM);
+
+ order->device_ptrs = devm_kzalloc(dev,
+ count * sizeof(struct device_node *),
+ GFP_KERNEL);
+ if (!order->device_ptrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < count; i++) {
+ ssr_node = of_parse_phandle(dev->of_node,
+ "qcom,restart-group", i);
+ if (!ssr_node)
+ return ERR_PTR(-ENXIO);
+ of_node_put(ssr_node);
+ pr_info("%s device has been added to %s's restart group\n",
+ ssr_node->name, desc->name);
+ order->device_ptrs[i] = ssr_node;
+ }
+
+ /*
+ * Check for similar restart groups. If found, return
+ * without adding the new group to the ssr_order_list.
+ */
+ mutex_lock(&ssr_order_mutex);
+ list_for_each_entry(tmp, &ssr_order_list, list) {
+ for (i = 0; i < count; i++) {
+ for (j = 0; j < count; j++) {
+ if (order->device_ptrs[j] !=
+ tmp->device_ptrs[i])
+ continue;
+ else
+ num++;
+ }
+ }
+
+ if (num == count && tmp->count == count)
+ goto err;
+ else if (num) {
+ tmp = ERR_PTR(-EINVAL);
+ goto err;
+ }
+ }
+
+ order->count = count;
+ mutex_init(&order->track.lock);
+ spin_lock_init(&order->track.s_lock);
+
+ INIT_LIST_HEAD(&order->list);
+ list_add_tail(&order->list, &ssr_order_list);
+ mutex_unlock(&ssr_order_mutex);
+
+ return order;
+err:
+ mutex_unlock(&ssr_order_mutex);
+ return tmp;
+}
+
+static int __get_gpio(struct subsys_desc *desc, const char *prop,
+ int *gpio)
+{
+ struct device_node *dnode = desc->dev->of_node;
+ int ret = -ENOENT;
+
+ if (of_find_property(dnode, prop, NULL)) {
+ *gpio = of_get_named_gpio(dnode, prop, 0);
+ ret = *gpio < 0 ? *gpio : 0;
+ }
+
+ return ret;
+}
+
+static int __get_irq(struct subsys_desc *desc, const char *prop,
+ unsigned int *irq, int *gpio)
+{
+ int ret, gpiol, irql;
+
+ ret = __get_gpio(desc, prop, &gpiol);
+ if (ret)
+ return ret;
+
+ irql = gpio_to_irq(gpiol);
+
+ if (irql == -ENOENT)
+ irql = -ENXIO;
+
+ if (irql < 0) {
+ pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
+ prop);
+ return irql;
+ } else {
+ if (gpio)
+ *gpio = gpiol;
+ *irq = irql;
+ }
+
+ return 0;
+}
+
+static int subsys_parse_devicetree(struct subsys_desc *desc)
+{
+ struct subsys_soc_restart_order *order;
+ int ret;
+
+ struct platform_device *pdev = container_of(desc->dev,
+ struct platform_device, dev);
+
+ ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq,
+ &desc->err_fatal_gpio);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq,
+ NULL);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq, NULL);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_gpio(desc, "qcom,gpio-ramdump-disable",
+ &desc->ramdump_disable_gpio);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_gpio(desc, "qcom,gpio-shutdown-ack",
+ &desc->shutdown_ack_gpio);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0)
+ desc->wdog_bite_irq = ret;
+
+ order = ssr_parse_restart_orders(desc);
+ if (IS_ERR(order)) {
+ pr_err("Could not initialize SSR restart order, err = %ld\n",
+ PTR_ERR(order));
+ return PTR_ERR(order);
+ }
+
+ return 0;
+}
+
+static int subsys_setup_irqs(struct subsys_device *subsys)
+{
+ struct subsys_desc *desc = subsys->desc;
+ int ret;
+
+ if (desc->err_fatal_irq && desc->err_fatal_handler) {
+ ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
+ desc->err_fatal_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->err_fatal_irq);
+ }
+
+ if (desc->stop_ack_irq && desc->stop_ack_handler) {
+ ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
+ desc->stop_ack_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->stop_ack_irq);
+ }
+
+ if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
+ ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
+ desc->wdog_bite_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->wdog_bite_irq);
+ }
+
+ if (desc->err_ready_irq) {
+ ret = devm_request_irq(desc->dev,
+ desc->err_ready_irq,
+ subsys_err_ready_intr_handler,
+ IRQF_TRIGGER_RISING,
+ "error_ready_interrupt", subsys);
+ if (ret < 0) {
+ dev_err(desc->dev,
+ "[%s]: Unable to register err ready handler\n",
+ desc->name);
+ return ret;
+ }
+ disable_irq(desc->err_ready_irq);
+ }
+
+ return 0;
+}
+
+static void subsys_free_irqs(struct subsys_device *subsys)
+{
+ struct subsys_desc *desc = subsys->desc;
+
+ if (desc->err_fatal_irq && desc->err_fatal_handler)
+ devm_free_irq(desc->dev, desc->err_fatal_irq, desc);
+ if (desc->stop_ack_irq && desc->stop_ack_handler)
+ devm_free_irq(desc->dev, desc->stop_ack_irq, desc);
+ if (desc->wdog_bite_irq && desc->wdog_bite_handler)
+ devm_free_irq(desc->dev, desc->wdog_bite_irq, desc);
+ if (desc->err_ready_irq)
+ devm_free_irq(desc->dev, desc->err_ready_irq, subsys);
+}
+
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ struct subsys_device *subsys;
+ struct device_node *ofnode = desc->dev->of_node;
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return ERR_PTR(-ENOMEM);
+
+ subsys->desc = desc;
+ subsys->owner = desc->owner;
+ subsys->dev.parent = desc->dev;
+ subsys->dev.bus = &subsys_bus_type;
+ subsys->dev.release = subsys_device_release;
+ subsys->notif_state = -1;
+ subsys->desc->sysmon_pid = -1;
+ strlcpy(subsys->desc->fw_name, desc->name,
+ sizeof(subsys->desc->fw_name));
+
+ subsys->notify = subsys_notif_add_subsys(desc->name);
+
+ snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
+ wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
+ INIT_WORK(&subsys->work, subsystem_restart_wq_func);
+ INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
+ spin_lock_init(&subsys->track.s_lock);
+
+ subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
+ if (subsys->id < 0) {
+ wakeup_source_trash(&subsys->ssr_wlock);
+ ret = subsys->id;
+ kfree(subsys);
+ return ERR_PTR(ret);
+ }
+
+ dev_set_name(&subsys->dev, "subsys%d", subsys->id);
+
+ mutex_init(&subsys->track.lock);
+
+ ret = subsys_debugfs_add(subsys);
+ if (ret) {
+ ida_simple_remove(&subsys_ida, subsys->id);
+ wakeup_source_trash(&subsys->ssr_wlock);
+ kfree(subsys);
+ return ERR_PTR(ret);
+ }
+
+ ret = device_register(&subsys->dev);
+ if (ret) {
+ subsys_debugfs_remove(subsys);
+ put_device(&subsys->dev);
+ return ERR_PTR(ret);
+ }
+
+ ret = subsys_char_device_add(subsys);
+ if (ret) {
+ goto err_register;
+ }
+
+ if (ofnode) {
+ ret = subsys_parse_devicetree(desc);
+ if (ret)
+ goto err_register;
+
+ subsys->restart_order = update_restart_order(subsys);
+
+ ret = subsys_setup_irqs(subsys);
+ if (ret < 0)
+ goto err_setup_irqs;
+
+ if (of_property_read_u32(ofnode, "qcom,ssctl-instance-id",
+ &desc->ssctl_instance_id))
+ pr_debug("Reading instance-id for %s failed\n",
+ desc->name);
+
+ if (of_property_read_u32(ofnode, "qcom,sysmon-id",
+ &subsys->desc->sysmon_pid))
+ pr_debug("Reading sysmon-id for %s failed\n",
+ desc->name);
+
+ subsys->desc->edge = of_get_property(ofnode, "qcom,edge",
+ NULL);
+ if (!subsys->desc->edge)
+ pr_debug("Reading qcom,edge for %s failed\n",
+ desc->name);
+ }
+
+ ret = sysmon_notifier_register(desc);
+ if (ret < 0)
+ goto err_sysmon_notifier;
+
+ if (subsys->desc->edge) {
+ ret = sysmon_glink_register(desc);
+ if (ret < 0)
+ goto err_sysmon_glink_register;
+ }
+ mutex_lock(&subsys_list_lock);
+ INIT_LIST_HEAD(&subsys->list);
+ list_add_tail(&subsys->list, &subsys_list);
+ mutex_unlock(&subsys_list_lock);
+
+ return subsys;
+err_sysmon_glink_register:
+ sysmon_notifier_unregister(subsys->desc);
+err_sysmon_notifier:
+ if (ofnode)
+ subsys_free_irqs(subsys);
+err_setup_irqs:
+ if (ofnode)
+ subsys_remove_restart_order(ofnode);
+err_register:
+ subsys_debugfs_remove(subsys);
+ device_unregister(&subsys->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *subsys)
+{
+ struct subsys_device *subsys_dev, *tmp;
+ struct device_node *device = subsys->desc->dev->of_node;
+
+ if (IS_ERR_OR_NULL(subsys))
+ return;
+
+ if (get_device(&subsys->dev)) {
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
+ if (subsys_dev == subsys)
+ list_del(&subsys->list);
+ mutex_unlock(&subsys_list_lock);
+
+ if (device) {
+ subsys_free_irqs(subsys);
+ subsys_remove_restart_order(device);
+ }
+ mutex_lock(&subsys->track.lock);
+ WARN_ON(subsys->count);
+ device_unregister(&subsys->dev);
+ mutex_unlock(&subsys->track.lock);
+ subsys_debugfs_remove(subsys);
+ subsys_char_device_remove(subsys);
+ sysmon_notifier_unregister(subsys->desc);
+ if (subsys->desc->edge)
+ sysmon_glink_unregister(subsys->desc);
+ put_device(&subsys->dev);
+ }
+}
+EXPORT_SYMBOL(subsys_unregister);
+
+static int subsys_panic(struct device *dev, void *data)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+
+ if (subsys->desc->crash_shutdown)
+ subsys->desc->crash_shutdown(subsys->desc);
+ return 0;
+}
+
+static int ssr_panic_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_nb = {
+ .notifier_call = ssr_panic_handler,
+};
+
+static int __init subsys_restart_init(void)
+{
+ int ret;
+
+ ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
+ BUG_ON(!ssr_wq);
+
+ ret = bus_register(&subsys_bus_type);
+ if (ret)
+ goto err_bus;
+ ret = subsys_debugfs_init();
+ if (ret)
+ goto err_debugfs;
+
+ char_class = class_create(THIS_MODULE, "subsys");
+ if (IS_ERR(char_class)) {
+ ret = -ENOMEM;
+ pr_err("Failed to create subsys_dev class\n");
+ goto err_class;
+ }
+
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_nb);
+ if (ret)
+ goto err_soc;
+
+ return 0;
+
+err_soc:
+ class_destroy(char_class);
+err_class:
+ subsys_debugfs_exit();
+err_debugfs:
+ bus_unregister(&subsys_bus_type);
+err_bus:
+ destroy_workqueue(ssr_wq);
+ return ret;
+}
+arch_initcall(subsys_restart_init);
+
+MODULE_DESCRIPTION("Subsystem Restart Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
new file mode 100644
index 000000000000..7ef69b527ef8
--- /dev/null
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/sysmon.h>
+
+#define QMI_RESP_BIT_SHIFT(x) (x << 16)
+
+#define QMI_SSCTL_RESTART_REQ_V02 0x0020
+#define QMI_SSCTL_RESTART_RESP_V02 0x0020
+#define QMI_SSCTL_RESTART_READY_IND_V02 0x0020
+#define QMI_SSCTL_SHUTDOWN_REQ_V02 0x0021
+#define QMI_SSCTL_SHUTDOWN_RESP_V02 0x0021
+#define QMI_SSCTL_SHUTDOWN_READY_IND_V02 0x0021
+#define QMI_SSCTL_GET_FAILURE_REASON_REQ_V02 0x0022
+#define QMI_SSCTL_GET_FAILURE_REASON_RESP_V02 0x0022
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_V02 0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_RESP_V02 0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_READY_IND_V02 0x0023
+
+#define QMI_SSCTL_ERROR_MSG_LENGTH 90
+#define QMI_SSCTL_SUBSYS_NAME_LENGTH 15
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH 40
+#define QMI_SSCTL_RESP_MSG_LENGTH 7
+#define QMI_SSCTL_EMPTY_MSG_LENGTH 0
+
+#define SSCTL_SERVICE_ID 0x2B
+#define SSCTL_VER_2 2
+#define SERVER_TIMEOUT 500
+#define SHUTDOWN_TIMEOUT 10000
+
+#define QMI_EOTI_DATA_TYPE \
+{ \
+ .data_type = QMI_EOTI, \
+ .elem_len = 0, \
+ .elem_size = 0, \
+ .is_array = NO_ARRAY, \
+ .tlv_type = 0x00, \
+ .offset = 0, \
+ .ei_array = NULL, \
+},
+
+struct sysmon_qmi_data {
+ const char *name;
+ int instance_id;
+ struct work_struct svc_arrive;
+ struct work_struct svc_exit;
+ struct work_struct svc_rcv_msg;
+ struct qmi_handle *clnt_handle;
+ struct notifier_block notifier;
+ void *notif_handle;
+ bool legacy_version;
+ struct completion server_connect;
+ struct completion ind_recv;
+ struct list_head list;
+};
+
+static struct workqueue_struct *sysmon_wq;
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+static DEFINE_MUTEX(sysmon_lock);
+
+static void sysmon_clnt_recv_msg(struct work_struct *work);
+static void sysmon_clnt_svc_arrive(struct work_struct *work);
+static void sysmon_clnt_svc_exit(struct work_struct *work);
+
+static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+ [SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
+ [SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
+ [SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+ [SUBSYS_AFTER_SHUTDOWN] = SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static void sysmon_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+ void *msg, unsigned int msg_len, void *ind_cb_priv)
+{
+ struct sysmon_qmi_data *data = NULL, *temp;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(temp, &sysmon_list, list)
+ if (!strcmp(temp->name, (char *)ind_cb_priv))
+ data = temp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (!data)
+ return;
+
+ pr_debug("%s: Indication received from subsystem\n", data->name);
+ complete(&data->ind_recv);
+}
+
+static int sysmon_svc_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ struct sysmon_qmi_data *data = container_of(this,
+ struct sysmon_qmi_data, notifier);
+
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ queue_work(sysmon_wq, &data->svc_arrive);
+ break;
+ case QMI_SERVER_EXIT:
+ queue_work(sysmon_wq, &data->svc_exit);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void sysmon_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ struct sysmon_qmi_data *data = container_of(notify_priv,
+ struct sysmon_qmi_data, svc_arrive);
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ schedule_work(&data->svc_rcv_msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void sysmon_clnt_svc_arrive(struct work_struct *work)
+{
+ int rc;
+ struct sysmon_qmi_data *data = container_of(work,
+ struct sysmon_qmi_data, svc_arrive);
+
+ /* Create a Local client port for QMI communication */
+ data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
+ if (!data->clnt_handle) {
+ pr_err("QMI client handle alloc failed for %s\n", data->name);
+ return;
+ }
+
+ rc = qmi_connect_to_service(data->clnt_handle, SSCTL_SERVICE_ID,
+ SSCTL_VER_2, data->instance_id);
+ if (rc < 0) {
+ pr_err("%s: Could not connect handle to service\n",
+ data->name);
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+ return;
+ }
+ pr_info("Connection established between QMI handle and %s's SSCTL service\n"
+ , data->name);
+
+ rc = qmi_register_ind_cb(data->clnt_handle, sysmon_ind_cb,
+ (void *)data->name);
+ if (rc < 0)
+ pr_warn("%s: Could not register the indication callback\n",
+ data->name);
+}
+
+static void sysmon_clnt_svc_exit(struct work_struct *work)
+{
+ struct sysmon_qmi_data *data = container_of(work,
+ struct sysmon_qmi_data, svc_exit);
+
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+}
+
+static void sysmon_clnt_recv_msg(struct work_struct *work)
+{
+ int ret;
+ struct sysmon_qmi_data *data = container_of(work,
+ struct sysmon_qmi_data, svc_rcv_msg);
+
+ do {
+ pr_debug("%s: Notified about a Receive event\n", data->name);
+ } while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+ if (ret != -ENOMSG)
+ pr_err("%s: Error receiving message\n", data->name);
+}
+
+struct qmi_ssctl_subsys_event_req_msg {
+ uint8_t subsys_name_len;
+ char subsys_name[QMI_SSCTL_SUBSYS_NAME_LENGTH];
+ enum ssctl_ssr_event_enum_type event;
+ uint8_t evt_driven_valid;
+ enum ssctl_ssr_event_driven_enum_type evt_driven;
+};
+
+struct qmi_ssctl_subsys_event_resp_msg {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_subsys_event_req_msg_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+ subsys_name_len),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_SSCTL_SUBSYS_NAME_LENGTH,
+ .elem_size = sizeof(char),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+ subsys_name),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+ event),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+ evt_driven_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+ evt_driven),
+ .ei_array = NULL,
+ },
+ QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_subsys_event_resp_msg_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_event() - Notify a subsystem of another's state change
+ * @dest_desc: Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc: Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif: ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_event_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds, but with something other than an acknowledgement.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif)
+{
+ struct qmi_ssctl_subsys_event_req_msg req;
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_ssctl_subsys_event_resp_msg resp = { { 0, 0 } };
+ struct sysmon_qmi_data *data = NULL, *temp;
+ const char *event_ss = event_desc->name;
+ const char *dest_ss = dest_desc->name;
+ int ret;
+
+ if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
+ || dest_ss == NULL)
+ return -EINVAL;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(temp, &sysmon_list, list)
+ if (!strcmp(temp->name, dest_desc->name))
+ data = temp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (!data)
+ return -EINVAL;
+
+ if (!data->clnt_handle) {
+ pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+ dest_ss);
+ ret = sysmon_send_event_no_qmi(dest_desc, event_desc, notif);
+ if (ret)
+ pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+ return ret;
+ }
+
+ snprintf(req.subsys_name, ARRAY_SIZE(req.subsys_name), "%s", event_ss);
+ req.subsys_name_len = strlen(req.subsys_name);
+ req.event = notif_map[notif];
+ req.evt_driven_valid = 1;
+ req.evt_driven = SSCTL_SSR_EVENT_FORCED;
+
+ req_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_REQ_V02;
+ req_desc.max_msg_len = QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH;
+ req_desc.ei_array = qmi_ssctl_subsys_event_req_msg_ei;
+
+ resp_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_RESP_V02;
+ resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+ resp_desc.ei_array = qmi_ssctl_subsys_event_resp_msg_ei;
+
+ mutex_lock(&sysmon_lock);
+ ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+ if (ret < 0) {
+ pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+ goto out;
+ }
+
+ /* Check the response */
+ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+ pr_debug("QMI request failed 0x%x\n",
+ QMI_RESP_BIT_SHIFT(resp.resp.error));
+ ret = -EREMOTEIO;
+ }
+out:
+ mutex_unlock(&sysmon_lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event);
+
+struct qmi_ssctl_shutdown_req_msg {
+};
+
+struct qmi_ssctl_shutdown_resp_msg {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_shutdown_req_msg_ei[] = {
+ QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_shutdown_resp_msg_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_ssctl_shutdown_resp_msg,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_shutdown() - send shutdown command to a
+ * subsystem.
+ * @dest_desc: Subsystem descriptor of the subsystem to send to
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_shutdown_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_ssctl_shutdown_resp_msg resp = { { 0, 0 } };
+ struct sysmon_qmi_data *data = NULL, *temp;
+ const char *dest_ss = dest_desc->name;
+ char req = 0;
+ int ret, shutdown_ack_ret;
+
+ if (dest_ss == NULL)
+ return -EINVAL;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(temp, &sysmon_list, list)
+ if (!strcmp(temp->name, dest_desc->name))
+ data = temp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (!data)
+ return -EINVAL;
+
+ if (!data->clnt_handle) {
+ pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+ dest_ss);
+ ret = sysmon_send_shutdown_no_qmi(dest_desc);
+ if (ret)
+ pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+ return ret;
+ }
+
+ req_desc.msg_id = QMI_SSCTL_SHUTDOWN_REQ_V02;
+ req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+ req_desc.ei_array = qmi_ssctl_shutdown_req_msg_ei;
+
+ resp_desc.msg_id = QMI_SSCTL_SHUTDOWN_RESP_V02;
+ resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+ resp_desc.ei_array = qmi_ssctl_shutdown_resp_msg_ei;
+
+ reinit_completion(&data->ind_recv);
+ mutex_lock(&sysmon_lock);
+ ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+ if (ret < 0) {
+ pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+ goto out;
+ }
+
+ /* Check the response */
+ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+ pr_err("QMI request failed 0x%x\n",
+ QMI_RESP_BIT_SHIFT(resp.resp.error));
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
+ shutdown_ack_ret = wait_for_shutdown_ack(dest_desc);
+ if (shutdown_ack_ret < 0) {
+ pr_err("shutdown_ack SMP2P bit for %s not set\n", data->name);
+ if (!&data->ind_recv.done) {
+ pr_err("QMI shutdown indication not received\n");
+ ret = shutdown_ack_ret;
+ }
+ goto out;
+ } else if (shutdown_ack_ret > 0)
+ goto out;
+
+ if (!wait_for_completion_timeout(&data->ind_recv,
+ msecs_to_jiffies(SHUTDOWN_TIMEOUT))) {
+ pr_err("Timed out waiting for shutdown indication from %s\n",
+ data->name);
+ ret = -ETIMEDOUT;
+ }
+out:
+ mutex_unlock(&sysmon_lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown);
+
+struct qmi_ssctl_get_failure_reason_req_msg {
+};
+
+struct qmi_ssctl_get_failure_reason_resp_msg {
+ struct qmi_response_type_v01 resp;
+ uint8_t error_message_valid;
+ uint32_t error_message_len;
+ char error_message[QMI_SSCTL_ERROR_MSG_LENGTH];
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_req_msg_ei[] = {
+ QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct qmi_ssctl_get_failure_reason_resp_msg,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct qmi_ssctl_get_failure_reason_resp_msg,
+ error_message_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct qmi_ssctl_get_failure_reason_resp_msg,
+ error_message_len),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_SSCTL_ERROR_MSG_LENGTH,
+ .elem_size = sizeof(char),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct qmi_ssctl_get_failure_reason_resp_msg,
+ error_message),
+ .ei_array = NULL,
+ },
+ QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_get_reason() - Retrieve failure reason from a subsystem.
+ * @dest_desc: Subsystem descriptor of the subsystem to query
+ * @buf: Caller-allocated buffer for the returned NUL-terminated reason
+ * @len: Length of @buf
+ *
+ * Reverts to using legacy sysmon API (sysmon_get_reason_no_qmi()) if client
+ * handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
+{
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_ssctl_get_failure_reason_resp_msg resp;
+ struct sysmon_qmi_data *data = NULL, *temp;
+ const char *dest_ss = dest_desc->name;
+ const char expect[] = "ssr:return:";
+ char req = 0;
+ int ret;
+
+ if (dest_ss == NULL || buf == NULL || len == 0)
+ return -EINVAL;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(temp, &sysmon_list, list)
+ if (!strcmp(temp->name, dest_desc->name))
+ data = temp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (!data)
+ return -EINVAL;
+
+ if (!data->clnt_handle) {
+ pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+ dest_ss);
+ ret = sysmon_get_reason_no_qmi(dest_desc, buf, len);
+ if (ret)
+ pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+ return ret;
+ }
+
+ req_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_REQ_V02;
+ req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+ req_desc.ei_array = qmi_ssctl_get_failure_reason_req_msg_ei;
+
+ resp_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_RESP_V02;
+ resp_desc.max_msg_len = QMI_SSCTL_ERROR_MSG_LENGTH;
+ resp_desc.ei_array = qmi_ssctl_get_failure_reason_resp_msg_ei;
+
+ mutex_lock(&sysmon_lock);
+ ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+ if (ret < 0) {
+ pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+ goto out;
+ }
+
+ /* Check the response */
+ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+ pr_err("QMI request failed 0x%x\n",
+ QMI_RESP_BIT_SHIFT(resp.resp.error));
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
+ if (!strcmp(resp.error_message, expect)) {
+ pr_err("Unexpected response %s\n", resp.error_message);
+ ret = -ENOSYS;
+ goto out;
+ }
+ strlcpy(buf, resp.error_message, resp.error_message_len);
+out:
+ mutex_unlock(&sysmon_lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason);
+
+/**
+ * sysmon_notifier_register() - Initialize sysmon data for a subsystem.
+ * @dest_desc: Subsystem descriptor of the subsystem
+ *
+ * Returns 0 for success. If the subsystem does not support SSCTL v2, a
+ * value of 0 is returned after adding the subsystem entry to the sysmon_list.
+ * In addition, if the SSCTL v2 support exists, the notifier block to receive
+ * events from the SSCTL service on the subsystem is registered.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_notifier_register(struct subsys_desc *desc)
+{
+ struct sysmon_qmi_data *data;
+ int rc = 0;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->name = desc->name;
+ data->instance_id = desc->ssctl_instance_id;
+ data->clnt_handle = NULL;
+ data->legacy_version = false;
+
+ mutex_lock(&sysmon_list_lock);
+ if (data->instance_id <= 0) {
+ pr_debug("SSCTL instance id not defined\n");
+ goto add_list;
+ }
+
+ if (sysmon_wq)
+ goto notif_register;
+
+ sysmon_wq = create_singlethread_workqueue("sysmon_wq");
+ if (!sysmon_wq) {
+ mutex_unlock(&sysmon_list_lock);
+ pr_err("Could not create workqueue\n");
+ kfree(data);
+ return -ENOMEM;
+ }
+
+notif_register:
+ data->notifier.notifier_call = sysmon_svc_event_notify;
+ init_completion(&data->ind_recv);
+
+ INIT_WORK(&data->svc_arrive, sysmon_clnt_svc_arrive);
+ INIT_WORK(&data->svc_exit, sysmon_clnt_svc_exit);
+ INIT_WORK(&data->svc_rcv_msg, sysmon_clnt_recv_msg);
+
+ rc = qmi_svc_event_notifier_register(SSCTL_SERVICE_ID, SSCTL_VER_2,
+ data->instance_id, &data->notifier);
+ if (rc < 0)
+ pr_err("Notifier register failed for %s\n", data->name);
+add_list:
+ INIT_LIST_HEAD(&data->list);
+ list_add_tail(&data->list, &sysmon_list);
+ mutex_unlock(&sysmon_list_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(sysmon_notifier_register);
+
+/**
+ * sysmon_notifier_unregister() - Cleanup the subsystem's sysmon data.
+ * @dest_desc: Subsystem descriptor of the subsystem
+ *
+ * If the subsystem does not support SSCTL v2, its entry is simply removed from
+ * the sysmon_list. In addition, if the SSCTL v2 support exists, the notifier
+ * block to receive events from the SSCTL service is unregistered.
+ */
+void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+ struct sysmon_qmi_data *data = NULL, *sysmon_data, *tmp;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry_safe(sysmon_data, tmp, &sysmon_list, list)
+ if (!strcmp(sysmon_data->name, desc->name)) {
+ data = sysmon_data;
+ list_del(&data->list);
+ }
+
+ if (data == NULL)
+ goto exit;
+
+ if (data->instance_id > 0)
+ qmi_svc_event_notifier_unregister(SSCTL_SERVICE_ID,
+ SSCTL_VER_2, data->instance_id, &data->notifier);
+
+ if (sysmon_wq && list_empty(&sysmon_list))
+ destroy_workqueue(sysmon_wq);
+exit:
+ mutex_unlock(&sysmon_list_lock);
+ kfree(data);
+}
+EXPORT_SYMBOL(sysmon_notifier_unregister);
diff --git a/drivers/soc/qcom/sysmon.c b/drivers/soc/qcom/sysmon.c
new file mode 100644
index 000000000000..8a12341a6f91
--- /dev/null
+++ b/drivers/soc/qcom/sysmon.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/qcom/hsic_sysmon.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/smd.h>
+
+#define TX_BUF_SIZE 50
+#define RX_BUF_SIZE 500
+#define TIMEOUT_MS 500
+
+enum transports {
+ TRANSPORT_SMD,
+ TRANSPORT_HSIC,
+};
+
+struct sysmon_subsys {
+ struct mutex lock;
+ struct smd_channel *chan;
+ bool chan_open;
+ struct completion resp_ready;
+ char rx_buf[RX_BUF_SIZE];
+ enum transports transport;
+ struct device *dev;
+ u32 pid;
+ struct list_head list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+ [SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+ [SUBSYS_AFTER_SHUTDOWN] = "after_shutdown",
+ [SUBSYS_BEFORE_POWERUP] = "before_powerup",
+ [SUBSYS_AFTER_POWERUP] = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+
+static int sysmon_send_smd(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+
+ if (!ss->chan_open)
+ return -ENODEV;
+
+ init_completion(&ss->resp_ready);
+ pr_debug("Sending SMD message: %s\n", tx_buf);
+ smd_write(ss->chan, tx_buf, len);
+ ret = wait_for_completion_timeout(&ss->resp_ready,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int sysmon_send_hsic(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+ size_t actual_len;
+
+ pr_debug("Sending HSIC message: %s\n", tx_buf);
+ ret = hsic_sysmon_write(HSIC_SYSMON_DEV_EXT_MODEM,
+ tx_buf, len, TIMEOUT_MS);
+ if (ret)
+ return ret;
+ ret = hsic_sysmon_read(HSIC_SYSMON_DEV_EXT_MODEM, ss->rx_buf,
+ ARRAY_SIZE(ss->rx_buf), &actual_len, TIMEOUT_MS);
+ return ret;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+
+ switch (ss->transport) {
+ case TRANSPORT_SMD:
+ ret = sysmon_send_smd(ss, tx_buf, len);
+ break;
+ case TRANSPORT_HSIC:
+ ret = sysmon_send_hsic(ss, tx_buf, len);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ pr_debug("Received response: %s\n", ss->rx_buf);
+
+ return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc: Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc: Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif: ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds, but with something other than an acknowledgement.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif)
+{
+
+ char tx_buf[TX_BUF_SIZE];
+ int ret;
+ struct sysmon_subsys *tmp, *ss = NULL;
+ const char *event_ss = event_desc->name;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(tmp, &sysmon_list, list)
+ if (tmp->pid == dest_desc->sysmon_pid)
+ ss = tmp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (ss == NULL)
+ return -EINVAL;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL ||
+ notif_name[notif] == NULL)
+ return -EINVAL;
+
+ snprintf(tx_buf, ARRAY_SIZE(tx_buf), "ssr:%s:%s", event_ss,
+ notif_name[notif]);
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+ if (ret) {
+ pr_err("Message sending failed %d\n", ret);
+ goto out;
+ }
+
+ if (strcmp(ss->rx_buf, "ssr:ack")) {
+ pr_debug("Unexpected response %s\n", ss->rx_buf);
+ ret = -ENOSYS;
+ }
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc: Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+ struct sysmon_subsys *tmp, *ss = NULL;
+ const char tx_buf[] = "system:shutdown";
+ const char expect[] = "system:ack";
+ int ret;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(tmp, &sysmon_list, list)
+ if (tmp->pid == dest_desc->sysmon_pid)
+ ss = tmp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (ss == NULL)
+ return -EINVAL;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ if (ret) {
+ pr_err("Message sending failed %d\n", ret);
+ goto out;
+ }
+
+ if (strcmp(ss->rx_buf, expect)) {
+ pr_err("Unexpected response %s\n", ss->rx_buf);
+ ret = -ENOSYS;
+ }
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc: Subsystem descriptor of the subsystem to query
+ * @buf: Caller-allocated buffer for the returned NUL-terminated reason
+ * @len: Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+ char *buf, size_t len)
+{
+ struct sysmon_subsys *tmp, *ss = NULL;
+ const char tx_buf[] = "ssr:retrieve:sfr";
+ const char expect[] = "ssr:return:";
+ size_t prefix_len = ARRAY_SIZE(expect) - 1;
+ int ret;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry(tmp, &sysmon_list, list)
+ if (tmp->pid == dest_desc->sysmon_pid)
+ ss = tmp;
+ mutex_unlock(&sysmon_list_lock);
+
+ if (ss == NULL || buf == NULL || len == 0)
+ return -EINVAL;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ if (ret) {
+ pr_err("Message sending failed %d\n", ret);
+ goto out;
+ }
+
+ if (strncmp(ss->rx_buf, expect, prefix_len)) {
+ pr_err("Unexpected response %s\n", ss->rx_buf);
+ ret = -ENOSYS;
+ goto out;
+ }
+ strlcpy(buf, ss->rx_buf + prefix_len, len);
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void sysmon_smd_notify(void *priv, unsigned int smd_event)
+{
+ struct sysmon_subsys *ss = priv;
+
+ switch (smd_event) {
+ case SMD_EVENT_DATA: {
+ if (smd_read_avail(ss->chan) > 0) {
+ smd_read_from_cb(ss->chan, ss->rx_buf,
+ ARRAY_SIZE(ss->rx_buf));
+ complete(&ss->resp_ready);
+ }
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ ss->chan_open = true;
+ break;
+ case SMD_EVENT_CLOSE:
+ ss->chan_open = false;
+ break;
+ }
+}
+
+static int sysmon_probe(struct platform_device *pdev)
+{
+ struct sysmon_subsys *ss;
+ int ret;
+
+ if (pdev->id < 0 || pdev->id >= SYSMON_NUM_SS)
+ return -ENODEV;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ mutex_init(&ss->lock);
+ if (pdev->id == SYSMON_SS_EXT_MODEM) {
+ ss->transport = TRANSPORT_HSIC;
+ ret = hsic_sysmon_open(HSIC_SYSMON_DEV_EXT_MODEM);
+ if (ret) {
+ pr_err("HSIC open failed\n");
+ return ret;
+ }
+ } else if (pdev->id < SMD_NUM_TYPE) {
+ ss->transport = TRANSPORT_SMD;
+ ret = smd_named_open_on_edge("sys_mon", pdev->id, &ss->chan,
+ ss, sysmon_smd_notify);
+ if (ret) {
+ pr_err("SMD open failed\n");
+ return ret;
+ }
+ smd_disable_read_intr(ss->chan);
+ } else
+ return -EINVAL;
+
+ ss->dev = &pdev->dev;
+ ss->pid = pdev->id;
+
+ mutex_lock(&sysmon_list_lock);
+ INIT_LIST_HEAD(&ss->list);
+ list_add_tail(&ss->list, &sysmon_list);
+ mutex_unlock(&sysmon_list_lock);
+ return 0;
+}
+
+static int sysmon_remove(struct platform_device *pdev)
+{
+ struct sysmon_subsys *sysmon, *tmp, *ss = NULL;
+
+ mutex_lock(&sysmon_list_lock);
+ list_for_each_entry_safe(sysmon, tmp, &sysmon_list, list) {
+ if (sysmon->pid == pdev->id) {
+ ss = sysmon;
+ list_del(&ss->list);
+ }
+ }
+ mutex_unlock(&sysmon_list_lock);
+
+ if (ss == NULL)
+ return -EINVAL;
+
+ mutex_lock(&ss->lock);
+ switch (ss->transport) {
+ case TRANSPORT_SMD:
+ smd_close(ss->chan);
+ break;
+ case TRANSPORT_HSIC:
+ hsic_sysmon_close(HSIC_SYSMON_DEV_EXT_MODEM);
+ break;
+ }
+ mutex_unlock(&ss->lock);
+
+ return 0;
+}
+
+static struct platform_driver sysmon_driver = {
+ .probe = sysmon_probe,
+ .remove = sysmon_remove,
+ .driver = {
+ .name = "sys_mon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sysmon_init(void)
+{
+ return platform_driver_register(&sysmon_driver);
+}
+subsys_initcall(sysmon_init);
+
+static void __exit sysmon_exit(void)
+{
+ platform_driver_unregister(&sysmon_driver);
+}
+module_exit(sysmon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("system monitor communication library");
+MODULE_ALIAS("platform:sys_mon");
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
new file mode 100644
index 000000000000..bda973764d8b
--- /dev/null
+++ b/include/soc/qcom/sysmon.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SYSMON_H
+#define __MSM_SYSMON_H
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+/**
+ * enum subsys_id - Destination subsystems for events.
+ */
+enum subsys_id {
+ /* SMD subsystems */
+ SYSMON_SS_MODEM = SMD_APPS_MODEM,
+ SYSMON_SS_LPASS = SMD_APPS_QDSP,
+ SYSMON_SS_WCNSS = SMD_APPS_WCNSS,
+ SYSMON_SS_DSPS = SMD_APPS_DSPS,
+ SYSMON_SS_Q6FW = SMD_APPS_Q6FW,
+
+ /* Non-SMD subsystems */
+ SYSMON_SS_EXT_MODEM = SMD_NUM_TYPE,
+ SYSMON_NUM_SS
+};
+
+/**
+ * enum ssctl_ssr_event_enum_type - Subsystem notification type.
+ */
+enum ssctl_ssr_event_enum_type {
+ SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+ SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
+ SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
+ SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
+ SSCTL_SSR_EVENT_AFTER_SHUTDOWN = 3,
+ SSCTL_SSR_EVENT_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+/**
+ * enum ssctl_ssr_event_driven_enum_type - Subsystem shutdown type.
+ */
+enum ssctl_ssr_event_driven_enum_type {
+ SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+ SSCTL_SSR_EVENT_FORCED = 0,
+ SSCTL_SSR_EVENT_GRACEFUL = 1,
+ SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+#if defined(CONFIG_MSM_SYSMON_COMM) || defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_send_event(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif);
+extern int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif);
+extern int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf,
+ size_t len);
+extern int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+ char *buf, size_t len);
+extern int sysmon_send_shutdown(struct subsys_desc *dest_desc);
+extern int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc);
+extern int sysmon_notifier_register(struct subsys_desc *desc);
+extern void sysmon_notifier_unregister(struct subsys_desc *desc);
+extern int sysmon_glink_register(struct subsys_desc *desc);
+extern void sysmon_glink_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_send_event(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif)
+{
+ return 0;
+}
+static inline int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+ struct subsys_desc *event_desc,
+ enum subsys_notif_type notif)
+{
+ return 0;
+}
+static inline int sysmon_get_reason(struct subsys_desc *dest_desc,
+ char *buf, size_t len)
+{
+ return 0;
+}
+static inline int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+ char *buf, size_t len)
+{
+ return 0;
+}
+static inline int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+ return 0;
+}
+static inline int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+ return 0;
+}
+static inline int sysmon_notifier_register(struct subsys_desc *desc)
+{
+ return 0;
+}
+static inline void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+}
+static inline int sysmon_glink_register(struct subsys_desc *desc)
+{
+ return 0;
+}
+static inline void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+
+#endif