summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-04-18 12:57:33 -0700
committerLinux Build Service Account <lnxbuild@localhost>2016-04-18 12:57:34 -0700
commitff586a964c7d34d12eaeabc7ece026b4b67a05fc (patch)
tree4c0e70babc54b30fa3e95836730c88d7384b31b6
parent45155991fba12d4873758efad79d3333322eda69 (diff)
parent217cb4db86edfff435195d5f536f76888b59249a (diff)
Promotion of kernel.lnx.4.4-160412.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1001463 Idc71b0abb24cf8c103dfde893ba8c40d342a7fb8 ARM: dts: msm: Fix USB3_PHY_SW_RESET register's offset o 974792 I9d835cddc85c007bcc6b918b9dc4335b82b97306 ARM: dts: msm: Add RPM handshake with System sleep for M 1001866 450383 I76394b203b4ab2312437c839976f0ecb7b6dde4e android/lowmemorykiller: Ignore tasks with freed mm 1001866 I88515703d64730e42598ab16136dcce4c18b099c lowmemorykiller: Do proper NULL checks 1004911 I991dfdc22936dba667110de338d0109c58e68bd5 defconfig: Enable Event timers for msmcortex defconfig 990856 Iae170042b70c6eaf5bc05ea2b4a1ccdb7dd6f946 msm: ipa3: add support for MHI burst mode 1002397 I0f68975dfcad9483182e5af5477153f39a98ac1f uapi: Add MHI device 1001866 437016 I7f06d53e2d8cfe7439e5561fe6e5209ce73b1c90 android/lowmemorykiller: Selectively count free CMA page 998858 Id6d18a67329abe5a89b4284fc5a3d1cf6a042dc4 defconfig: arm64: set SELINUX as default security for ms 973565 I73b40d264c4054a43c2776337b80af88adff077e msm: kgsl: Enable GPMU and SPTP/RAC power collapse on A5 1004911 I09313d7809ec939a9d0440d0ab30a5992f512b96 defconfig: Enable MPM for msmcortex defconfig 1001463 Ia826e361d8259126a8168c07539ba4b4f6053f65 ARM: dts: msm: Update QMP PHY supply name on msm8996 and 999530 Idd56140e11f4fdc48fd999a1e808f3263024f34d soc: qcom: Service notification driver for remote servic 997749 Ie80846978a84a114b38d69b0bca86639bfab7e91 soc: qcom: remoteqdss: Implement the remaining message t 1001866 I3b6876c5ecdf192ecc271aed3f37579f66d47a08 lowmemorykiller: enhance debug information 1001866 I6f1f8660d5da920a0e3af45a160499965032081d android: lowmemorykiller: add lmk parameters tunning cod 986169 I6dfcab462a933ef31e3bba6bef07f17016ae50b9 msm: kgsl: Return EOPNOTSUPP for A3XX command batch prof 995378 I1a9aeb3f1dd67f014847322e5b14cba8775a82a4 msm: kgsl: Pass correct buffer size for mapping gpuobj u 1002397 I64990a972cbf7c2022d638c35f7517071de67f19 msm: mhi_dev: Add MHI device driver 995821 I39539cf661d9e0e0bb59236c92b169d3054485a9 msm: ipa: add common internal header 1001866 648978 I53f5f064ac16a50ee10c84ff2bb50fdb7e085bd0 mm: Increase number of GFP masks 1001866 I938644584f374763d10d429d835e74daa4854a38 lowmemorykiller: Account for highmem during kswapd recla 988993 I0e097d7e4e4c414c0849e33bcc61a26fb94291ad msm: kgsl: verify user memory permissions before mapping 1001866 I5eb6e4b463f81142a2a7824db389201357432ec7 lowmemorykiller: use for_each_thread instead of buggy wh 993518 I75033cdf4637881ecd6fa4dd31aea083b134e6d2 msm: kgsl: Zero the adreno ioctl command buffer 1001866 893699 I2d77103d7c8f4d8a66e4652cba78e619a7bcef9a lowmemorykiller: avoid false adaptive LMK triggers 1001469 Ic62b4dae798726055beb778509e6b65e69f4db34 usb: gadget: f_gsi: Add missing includes to compile f_gs 1001469 Ib7dce6b2ae1670554a29847e4381e71ba7b75edf USB: QTI: Add missing usb_ctrl_qti.h 1001463 I4cc68a447d0cf3571a50b18d7eec5415430f9423 ARM: dts: msm: Select CML clock with USB QMP PHY on msmc 1001222 I74a00cc76ab86ee96905d270b1f6e09fb3fb9db7 ARM: dts:msm: Disable USB QMP PHY on msmcobalt 993267 I17d8b4ba2c74a787a065dbdb0ac88d065605fcb1 msm: kgsl: Fix gpudev NULL dereference in adreno_remove 1001866 I1a08160c35d3e33bdfd1d2c789c288fc07d0f0d3 lowmemorykiller: adapt to vmpressure 997749 I7738b6369e095868fa6087aac94116f4767dc168 soc: qcom: remoteqdss: Implement the new communication p 1000079 I8563fafc56515fde764046f882814c1c6e4c4299 coresight: replace bitmap_scnprintf with scnprintf 1001866 I38239283e572f814b277c718eaf6be7f92abacbb android/lowmemorykiller: Account for total_swapcache_pag 990078 I04a1a44f12dd3a09c50b4fe39e14a2bd636b24de msm: kgsl: Correct the order of preemption packets 1001463 If100d36bade241dedf28e3cea9e07be192bdfdc2 usb: phy: qmp: Add support to use different voltage with 1001866 452508 I6165dda01b705309eebabc6dfa67146b7a95c174 cma: redirect page allocation to CMA 1001469 I91b2531a2ce739613181f2e13c692263d9e2454a defconfig: Enable USB GSI Function driver for msmcobalt 1004911 I88c579189287d655b10b48496be39ed9f20d9bfc ARM: dts: msm: Add device node for MPM for MSMCobalt 978318 I7b9faf485dda6f450f6df4410e3ae25efa40aad1 ARM: dts: msm: update the pmcobalt GPIO nodes 1001866 I650bbfbf0fbbabd01d82bdb3502b57ff59c3e14f lowmemorykiller: Don't count swap cache pages twice 1004911 I59c99348a44d364d74af6b67ccabdd2d8c5008b0 soc: qcom: event_timer: Fix irq_desc structure usage 973565 Ie6480fc3ba0e1b95aab40e31b09ff2bd798ff30f msm: kgsl: Invoke DCVS callbacks on A540 1004911 I67244ff55690c164634e9233e2d0cec3388c5be8 defconfig: enable MSM_PM for msmcortex 999351 I49ae94cea34dda039d03dbeeab2add2bdd1760bd msm: ipa: fix to handle deaggr error 1001866 I809589a25c6abca51f1c963f118adfc78e955cf9 mm: add cma pcp list 983144 I4a933c9b3355b0aa1b653719ec1ec7ded1f368dd msm: ADSPRPC: FastRPC migration to GLINK from SMD 995821 I4a7d8c328af7cd5506b3fbbdc76b1bc5bb0de698 msm: ipa: unify IPA RM 988881 Idc8ac5fc273d91035254c675c7cedefa9a92a5a5 ARM: dts: msm: add IPA smp2p entries for msmcobalt Change-Id: I855482e011c76b10ad187940905ea508725dc8d8 CRs-Fixed: 999530, 973565, 1001469, 983144, 1001463, 1002397, 1001222, 1001866, 993267, 995378, 998858, 999351, 974792, 1004911, 978318, 997749, 990078, 452508, 990856, 993518, 995821, 986169, 450383, 1000079, 648978, 988881, 893699, 988993, 437016
-rw-r--r--Documentation/arm/msm/service_notifier.txt43
-rw-r--r--Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt34
-rw-r--r--Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/msm-phy.txt5
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi257
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi69
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig5
-rw-r--r--arch/arm64/configs/msmcortex_defconfig5
-rw-r--r--drivers/char/adsprpc.c189
-rw-r--r--drivers/char/adsprpc_shared.h3
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h3
-rw-r--r--drivers/gpu/msm/adreno.c7
-rw-r--r--drivers/gpu/msm/adreno.h1
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c59
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h5
-rw-r--r--drivers/gpu/msm/adreno_ioctl.c4
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c20
-rw-r--r--drivers/gpu/msm/kgsl.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c4
-rw-r--r--drivers/platform/msm/Kconfig10
-rw-r--r--drivers/platform/msm/Makefile1
-rw-r--r--drivers/platform/msm/ipa/Makefile7
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c535
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h64
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c39
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h128
-rw-r--r--drivers/platform/msm/ipa/ipa_rm.c (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm.c)74
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_dependency_graph.c (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c)0
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_dependency_graph.h (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h)0
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_i.h (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h)6
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c)84
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_peers_list.c (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c)3
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_peers_list.h (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h)0
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_resource.c (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c)29
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_resource.h (renamed from drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h)7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/Makefile3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c138
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dma.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c80
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h166
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc.c24
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c128
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/teth_bridge.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/Makefile3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c116
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c40
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h157
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c226
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm.c1039
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c245
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h47
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h129
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c268
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c247
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h53
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c1176
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h164
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c36
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/teth_bridge.c16
-rw-r--r--drivers/platform/msm/mhi_dev/Makefile6
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.c1952
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.h1126
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_hwio.h191
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_mmio.c999
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_ring.c438
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.c1319
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.h51
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_uci.c835
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/event_timer.c4
-rw-r--r--drivers/soc/qcom/remoteqdss.c278
-rw-r--r--drivers/soc/qcom/service-notifier.c660
-rw-r--r--drivers/soc/qcom/service-notifier.h303
-rw-r--r--drivers/staging/android/lowmemorykiller.c340
-rw-r--r--drivers/usb/gadget/function/f_gsi.h2
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c99
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/highmem.h15
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/trace/events/almk.h84
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/mhi.h37
-rw-r--r--include/uapi/linux/usb/Kbuild1
-rw-r--r--include/uapi/linux/usb/usb_ctrl_qti.h41
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c5
-rw-r--r--mm/page_alloc.c93
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmstat.c3
108 files changed, 10616 insertions, 4921 deletions
diff --git a/Documentation/arm/msm/service_notifier.txt b/Documentation/arm/msm/service_notifier.txt
new file mode 100644
index 000000000000..cfa64256d93a
--- /dev/null
+++ b/Documentation/arm/msm/service_notifier.txt
@@ -0,0 +1,43 @@
+Introduction
+=============
+
+The service notifier driver facilitates a mechanism for a client
+to register for state notifications regarding a particular remote service.
+A remote service here refers to a process providing certain services like audio,
+the identifier for which is provided by the service locator. The process
+domain will typically run on a remote processor within the same SoC.
+
+Software Description
+=====================
+
+The driver provides the following two APIs:
+* service_notif_register_notifier() - Register a notifier for a service
+ On success, it returns back a handle. It takes the following arguments:
+ service_path: Individual service identifier path for which a client
+ registers for notifications.
+ instance_id: Instance id specific to a subsystem.
+ current_state: Current state of service returned by the registration
+ process.
+ notifier block: notifier callback for service events.
+
+* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ This takes the handle returned during registration and the notifier block
+ previously registered as the arguments.
+
+Types of notifications:
+=======================
+
+A client can get either a SERVICE_DOWN notification or a SERVICE_UP
+notification. A SERVICE_UP notification will be sent out when the SERVICE comes
+up and is functional while a SERVICE_DOWN notification is sent after a
+service ceases to exist. At the point a SERVICE_DOWN notification is sent out,
+all the clients should assume that the service is already dead.
+
+Interaction with SSR
+=====================
+In general, it is recommended that clients register for either service
+notifications using the service notifier or SSR notifications, but not both.
+In case it is necessary to register for both, the client can expect to get
+the SERVICE_DOWN notification before the SUBSYS_AFTER_SHUTDOWN notification.
+However, the client may receive the SUBSYS_BEFORE_SHUTDOWN notification
+either before or after the SERVICE_DOWN notification.
diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
new file mode 100644
index 000000000000..49d33a3c4440
--- /dev/null
+++ b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
@@ -0,0 +1,34 @@
+MSM MHI DEV
+
+MSM MHI DEV enables communication with the host over a PCIe link using the
+Modem Host Interface protocol. The driver interfaces with the IPA for
+enabling the HW acceleration channel path and provides interface for
+software channels to communicate between Host and device.
+
+Required properties:
+ - compatible: should be "qcom,msm-mhi-dev" for MHI device driver.
+ - reg: MHI MMIO physical register space.
+ - reg-names: resource names used for the MHI MMIO physical address region,
+ IPA uC command and event ring doorbell mail box address.
+ Should be "mhi_mmio_base" for MHI MMIO physical address,
+ "ipa_uc_mbox_crdb" for IPA uC Command Ring doorbell,
+ "ipa_uc_mbox_erdb" for IPA uC Event Ring doorbell passed to
+ the IPA driver.
+ - qcom,mhi-ifc-id: ID of HW interface via which MHI on device side
+ communicates with host side.
+ - qcom,mhi-ep-msi: End point MSI number.
+ - qcom,mhi-version: MHI specification version supported by the device.
+
+Example:
+
+ mhi: qcom,msm-mhi-dev {
+ compatible = "qcom,msm-mhi-dev";
+ reg = <0xfc527000 0x1000>,
+ <0xfd4fa000 0x1>,
+ <0xfd4fa080 0x1>;
+ reg-names = "mhi_mmio_base", "ipa_uc_mbox_crdb",
+ "ipa_uc_mbox_erdb";
+ qcom,mhi-ifc-id = <0x030017cb>;
+ qcom,mhi-ep-msi = <1>;
+ qcom,mhi-version = <0x1000000>;
+ };
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index dfdf1f8fe1b5..f419655722d4 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -9,18 +9,22 @@ other tasks.
Required properties:
- compatible : Must be "qcom,msm-fastrpc-adsp"
+Optional properties:
+- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC
+
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context
banks
-Subnode properties:
-- compatible : Must be "qcom,msm-fastrpc-compute-cb"
-- label: Label describing the channel this context bank belongs to
-- iommus : A list of phandle and IOMMU specifier pairs that describe the
- IOMMU master interfaces of the device
+Subnode Required properties:
+- compatible : Must be "qcom,msm-fastrpc-compute-cb"
+- label: Label describing the channel this context bank belongs to
+- iommus : A list of phandle and IOMMU specifier pairs that describe the
+ IOMMU master interfaces of the device
Example:
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
+ qcom,fastrpc-glink;
qcom,msm_fastrpc_compute_cb_1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index cccdb281a31d..929fdee7157b 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -100,7 +100,7 @@ Required properties:
- <supply-name>-supply: phandle to the regulator device tree node
Required "supply-name" examples are:
"vdd" : vdd supply for SSPHY digital circuit operation
- "vdda18" : 1.8v high-voltage analog supply for SSPHY
+ "core" : high-voltage analog supply for SSPHY
- qcom,vdd-voltage-level: This property must be a list of three integer
values (no, min, max) where each value represents either a voltage in
microvolts or a value corresponding to voltage corner
@@ -121,6 +121,9 @@ Optional properties:
the USB PHY and the controller must rely on external VBUS notification in
order to manually relay the notification to the SSPHY.
- qcom,emulation: Indicates that we are running on emulation platform.
+ - qcom,core-voltage-level: This property must be a list of three integer
+ values (no, min, max) where each value represents either a voltage in
+ microvolts or a value corresponding to voltage corner.
Example:
ssphy0: ssphy@f9b38000 {
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 3a5b55848d53..e0bd005f8b76 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -2140,7 +2140,8 @@
reg-names = "qmp_phy_base",
"vls_clamp_reg";
vdd-supply = <&pm8994_l28>;
- vdda18-supply = <&pm8994_l12>;
+ core-supply = <&pm8994_l12>;
+ qcom,core-voltage-level = <0 1800000 1800000>;
qcom,vdd-voltage-level = <0 925000 925000>;
qcom,vbus-valid-override;
qcom,qmp-phy-init-seq =
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
index 06719a0fcd06..e68f746bda38 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
@@ -280,9 +280,9 @@
};
&pmcobalt_gpios {
gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */
- qcom,mode = <1>; /* Output*/
- qcom,pull = <4>; /* Pulldown 10uA */
- qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
qcom,src-sel = <0>; /* GPIO */
qcom,invert = <0>; /* Invert */
qcom,master-en = <1>; /* Enable GPIO */
@@ -290,9 +290,9 @@
};
gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */
- qcom,mode = <1>; /* Output*/
- qcom,pull = <4>; /* Pulldown 10uA */
- qcom,vin-sel = <1>; /* VIN1 GPIO_MV*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
qcom,src-sel = <0>; /* GPIO */
qcom,invert = <0>; /* Invert */
qcom,master-en = <1>; /* Enable GPIO */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
index 06719a0fcd06..e68f746bda38 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
@@ -280,9 +280,9 @@
};
&pmcobalt_gpios {
gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */
- qcom,mode = <1>; /* Output*/
- qcom,pull = <4>; /* Pulldown 10uA */
- qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
qcom,src-sel = <0>; /* GPIO */
qcom,invert = <0>; /* Invert */
qcom,master-en = <1>; /* Enable GPIO */
@@ -290,9 +290,9 @@
};
gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */
- qcom,mode = <1>; /* Output*/
- qcom,pull = <4>; /* Pulldown 10uA */
- qcom,vin-sel = <1>; /* VIN1 GPIO_MV*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
qcom,src-sel = <0>; /* GPIO */
qcom,invert = <0>; /* Invert */
qcom,master-en = <1>; /* Enable GPIO */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
index 31b1c9486226..0c3dac376c55 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
@@ -9,6 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <dt-bindings/interrupt-controller/arm-gic.h>
&soc {
qcom,spm@178120000 {
@@ -77,6 +78,7 @@
qcom,time-overhead = <550>;
qcom,min-child-idx = <3>;
qcom,is-reset;
+ qcom,notify-rpm;
};
qcom,pm-cluster@0{
@@ -277,4 +279,259 @@
qcom,sleep-stats-version = <2>;
};
+ qcom,mpm@7781b8 {
+ compatible = "qcom,mpm-v2";
+ reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+ <0x17911008 0x4>; /* MSM_APCS_GCC_BASE 4K */
+ reg-names = "vmpm", "ipc";
+ interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clock_gcc clk_cxo_lpm_clk>;
+ clock-names = "xo";
+ qcom,num-mpm-irqs = <96>;
+
+ qcom,ipc-bit-offset = <1>;
+
+ qcom,gic-parent = <&intc>;
+ qcom,gic-map = <2 216>, /* tsens_upper_lower_int */
+ <79 379>, /* qusb2phy_dmse_hv_prim */
+ <80 384>, /* qusb2phy_dmse_hv_sec */
+ <52 275>, /* qmp_usb3_lfps_rxterm_irq */
+ <87 358>, /* ee0_krait_hlos_spmi_periph_irq */
+ <0xff 16>, /* APCj_qgicdrCpu0HwFaultIrptReq */
+ <0xff 23>, /* APCj_qgicdrCpu0PerfMonIrptReq */
+ <0xff 27>, /* APCj_qgicdrCpu0QTmrVirtIrptReq */
+ <0xff 32>, /* APCj_qgicdrL2PerfMonIrptReq */
+ <0xff 33>, /* APCC_qgicL2PerfMonIrptReq */
+ <0xff 34>, /* APCC_qgicL2ErrorIrptReq */
+ <0xff 35>, /* WDT_barkInt */
+ <0xff 40>, /* qtimer_phy_irq */
+ <0xff 41>, /* APCj_qgicdrL2HwFaultNonFatalIrptReq */
+ <0xff 42>, /* APCj_qgicdrL2HwFaultFatalIrptReq */
+ <0xff 49>, /* L3UX_qgicL3ErrorIrptReq */
+ <0xff 54>, /* M4M_sysErrorInterrupt */
+ <0xff 55>, /* M4M_sysDlmInterrupt */
+ <0xff 57>, /* mss_to_apps_irq(0) */
+ <0xff 58>, /* mss_to_apps_irq(1) */
+ <0xff 59>, /* mss_to_apps_irq(2) */
+ <0xff 60>, /* mss_to_apps_irq(3) */
+ <0xff 61>, /* mss_a2_bam_irq */
+ <0xff 62>, /* QTMR_qgicFrm0VirtIrq */
+ <0xff 63>, /* QTMR_qgicFrm1PhysIrq */
+ <0xff 64>, /* QTMR_qgicFrm2PhysIrq */
+ <0xff 65>, /* QTMR_qgicFrm3PhysIrq */
+ <0xff 66>, /* QTMR_qgicFrm4PhysIrq */
+ <0xff 67>, /* QTMR_qgicFrm5PhysIrq */
+ <0xff 68>, /* QTMR_qgicFrm6PhysIrq */
+ <0xff 69>, /* QTMR_qgicFrm7PhysIrq */
+ <0xff 70>, /* iommu_pmon_nonsecure_irq */
+ <0xff 74>, /* osmmu_CIrpt[1] */
+ <0xff 75>, /* osmmu_CIrpt[0] */
+ <0xff 77>, /* osmmu_CIrpt[0] */
+ <0xff 78>, /* osmmu_CIrpt[0] */
+ <0xff 79>, /* osmmu_CIrpt[0] */
+ <0xff 80>, /* CPR3_irq */
+ <0xff 94>, /* osmmu_CIrpt[0] */
+ <0xff 97>, /* iommu_nonsecure_irq */
+ <0xff 99>, /* msm_iommu_pmon_nonsecure_irq */
+ <0xff 102>, /* osmmu_CIrpt[1] */
+ <0xff 105>, /* iommu_pmon_nonsecure_irq */
+ <0xff 108>, /* osmmu_PMIrpt */
+ <0xff 109>, /* ocmem_dm_nonsec_irq */
+ <0xff 110>, /* csiphy_0_irq */
+ <0xff 111>, /* csiphy_1_irq */
+ <0xff 112>, /* csiphy_2_irq */
+ <0xff 115>, /* mdss_irq */
+ <0xff 126>, /* bam_irq[0] */
+ <0xff 127>, /* blsp1_qup_irq(0) */
+ <0xff 132>, /* blsp1_qup_irq(5) */
+ <0xff 133>, /* blsp2_qup_irq(0) */
+ <0xff 134>, /* blsp2_qup_irq(1) */
+ <0xff 138>, /* blsp2_qup_irq(5) */
+ <0xff 140>, /* blsp1_uart_irq(1) */
+ <0xff 146>, /* blsp2_uart_irq(1) */
+ <0xff 155>, /* sdcc_irq[0] */
+ <0xff 157>, /* sdc2_irq[0] */
+ <0xff 163>, /* usb30_ee1_irq */
+ <0xff 164>, /* usb30_bam_irq(0) */
+ <0xff 165>, /* usb30_hs_phy_irq */
+ <0xff 166>, /* sdc1_pwr_cmd_irq */
+ <0xff 170>, /* sdcc_pwr_cmd_irq */
+ <0xff 173>, /* sdc1_irq[0] */
+ <0xff 174>, /* o_wcss_apss_smd_med */
+ <0xff 175>, /* o_wcss_apss_smd_low */
+ <0xff 176>, /* o_wcss_apss_smsm_irq */
+ <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
+ <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
+ <0xff 179>, /* o_wcss_apss_asic_intr */
+ <0xff 180>, /* pcie20_2_int_pls_err */
+ <0xff 181>, /* wcnss watchdog */
+ <0xff 188>, /* lpass_irq_out_apcs(0) */
+ <0xff 189>, /* lpass_irq_out_apcs(1) */
+ <0xff 190>, /* lpass_irq_out_apcs(2) */
+ <0xff 191>, /* lpass_irq_out_apcs(3) */
+ <0xff 192>, /* lpass_irq_out_apcs(4) */
+ <0xff 193>, /* lpass_irq_out_apcs(5) */
+ <0xff 194>, /* lpass_irq_out_apcs(6) */
+ <0xff 195>, /* lpass_irq_out_apcs(7) */
+ <0xff 196>, /* lpass_irq_out_apcs(8) */
+ <0xff 197>, /* lpass_irq_out_apcs(9) */
+ <0xff 198>, /* coresight-tmc-etr interrupt */
+ <0xff 200>, /* rpm_ipc(4) */
+ <0xff 201>, /* rpm_ipc(5) */
+ <0xff 202>, /* rpm_ipc(6) */
+ <0xff 203>, /* rpm_ipc(7) */
+ <0xff 204>, /* rpm_ipc(24) */
+ <0xff 205>, /* rpm_ipc(25) */
+ <0xff 206>, /* rpm_ipc(26) */
+ <0xff 207>, /* rpm_ipc(27) */
+ <0xff 208>,
+ <0xff 210>,
+ <0xff 211>, /* usb_dwc3_otg */
+ <0xff 215>, /* o_bimc_intr(0) */
+ <0xff 224>, /* spdm_realtime_irq[1] */
+ <0xff 238>, /* crypto_bam_irq[0] */
+ <0xff 240>, /* summary_irq_kpss */
+ <0xff 253>, /* sdc2_pwr_cmd_irq */
+ <0xff 258>, /* lpass_irq_out_apcs[21] */
+ <0xff 268>, /* bam_irq[1] */
+ <0xff 270>, /* bam_irq[0] */
+ <0xff 271>, /* bam_irq[0] */
+ <0xff 276>, /* wlan_pci */
+ <0xff 283>, /* pcie20_0_int_pls_err */
+ <0xff 284>, /* pcie20_0_int_aer_legacy */
+ <0xff 286>, /* pcie20_0_int_pls_link_down */
+ <0xff 290>, /* ufs_ice_nonsec_level_irq */
+ <0xff 293>, /* pcie20_2_int_pls_link_down */
+ <0xff 295>, /* camss_cpp_mmu_cirpt[0] */
+ <0xff 296>, /* camss_cpp_mmu_pmirpt */
+ <0xff 297>, /* ufs_intrq */
+ <0xff 302>, /* qdss_etrbytecnt_irq */
+ <0xff 310>, /* pcie20_1_int_pls_err */
+ <0xff 311>, /* pcie20_1_int_aer_legacy */
+ <0xff 313>, /* pcie20_1_int_pls_link_down */
+ <0xff 318>, /* venus0_mmu_pmirpt */
+ <0xff 319>, /* venus0_irq */
+ <0xff 325>, /* camss_irq18 */
+ <0xff 326>, /* camss_irq0 */
+ <0xff 327>, /* camss_irq1 */
+ <0xff 328>, /* camss_irq2 */
+ <0xff 329>, /* camss_irq3 */
+ <0xff 330>, /* camss_irq4 */
+ <0xff 331>, /* camss_irq5 */
+ <0xff 332>, /* sps */
+ <0xff 346>, /* camss_irq8 */
+ <0xff 347>, /* camss_irq9 */
+ <0xff 352>, /* mdss_mmu_cirpt[0] */
+ <0xff 353>, /* mdss_mmu_cirpt[1] */
+ <0xff 361>, /* ogpu_mmu_cirpt[0] */
+ <0xff 362>, /* ogpu_mmu_cirpt[1] */
+ <0xff 365>, /* ipa_irq[0] */
+ <0xff 366>, /* ogpu_mmu_pmirpt */
+ <0xff 367>, /* venus0_mmu_cirpt[0] */
+ <0xff 368>, /* venus0_mmu_cirpt[1] */
+ <0xff 369>, /* venus0_mmu_cirpt[2] */
+ <0xff 370>, /* venus0_mmu_cirpt[3] */
+ <0xff 375>, /* camss_vfe_mmu_cirpt[0] */
+ <0xff 376>, /* camss_vfe_mmu_cirpt[1] */
+ <0xff 380>, /* mdss_dma_mmu_cirpt[0] */
+ <0xff 381>, /* mdss_dma_mmu_cirpt[1] */
+ <0xff 385>, /* mdss_dma_mmu_pmirpt */
+ <0xff 387>, /* osmmu_CIrpt[0] */
+ <0xff 394>, /* osmmu_PMIrpt */
+ <0xff 403>, /* osmmu_PMIrpt */
+ <0xff 405>, /* osmmu_CIrpt[0] */
+ <0xff 413>, /* osmmu_PMIrpt */
+ <0xff 422>, /* ssc_irq_out_apcs[5] */
+ <0xff 424>, /* ipa_irq[2] */
+ <0xff 425>, /* lpass_irq_out_apcs[22] */
+ <0xff 426>, /* lpass_irq_out_apcs[23] */
+ <0xff 427>, /* lpass_irq_out_apcs[24] */
+ <0xff 428>, /* lpass_irq_out_apcs[25] */
+ <0xff 429>, /* lpass_irq_out_apcs[26] */
+ <0xff 430>, /* lpass_irq_out_apcs[27] */
+ <0xff 431>, /* lpass_irq_out_apcs[28] */
+ <0xff 432>, /* lpass_irq_out_apcs[29] */
+ <0xff 436>, /* lpass_irq_out_apcs[37] */
+ <0xff 437>, /* pcie20_0_int_msi_dev0 */
+ <0xff 445>, /* pcie20_1_int_msi_dev0 */
+ <0xff 453>, /* pcie20_2_int_msi_dev0 */
+ <0xff 461>, /* o_vmem_nonsec_irq */
+ <0xff 462>, /* tsens1_tsens_critical_int */
+ <0xff 464>, /* ipa_bam_irq[0] */
+ <0xff 465>, /* ipa_bam_irq[2] */
+ <0xff 477>, /* tsens0_tsens_critical_int */
+ <0xff 480>, /* q6_wdog_expired_irq */
+ <0xff 481>, /* mss_ipc_out_irq(4) */
+ <0xff 483>, /* mss_ipc_out_irq(6) */
+ <0xff 484>, /* mss_ipc_out_irq(7) */
+ <0xff 487>, /* mss_ipc_out_irq(30) */
+ <0xff 490>, /* tsens0_tsens_upper_lower_int */
+ <0xff 493>; /* sdc1_ice_nonsec_level_irq */
+
+ qcom,gpio-parent = <&tlmm>;
+ qcom,gpio-map = <3 1>,
+ <4 5>,
+ <5 9>,
+ <6 11>,
+ <7 66>,
+ <8 22>,
+ <9 24>,
+ <10 26>,
+ <11 34>,
+ <12 36>,
+ <13 37>, /* PCIe0 */
+ <14 38>,
+ <15 40>,
+ <16 42>,
+ <17 46>,
+ <18 50>,
+ <19 53>,
+ <20 54>,
+ <21 56>,
+ <22 57>,
+ <23 58>,
+ <24 59>,
+ <25 60>,
+ <26 61>,
+ <27 62>,
+ <28 63>,
+ <29 64>,
+ <30 71>,
+ <31 73>,
+ <32 77>,
+ <33 78>,
+ <34 79>,
+ <35 80>,
+ <36 82>,
+ <37 86>,
+ <38 91>,
+ <39 92>,
+ <40 95>,
+ <41 97>,
+ <42 101>,
+ <43 104>,
+ <44 106>,
+ <45 108>,
+ <46 112>,
+ <47 113>,
+ <48 110>,
+ <50 127>,
+ <51 115>,
+ <54 116>, /* PCIe2 */
+ <55 117>,
+ <56 118>,
+ <57 119>,
+ <58 120>,
+ <59 121>,
+ <60 122>,
+ <61 123>,
+ <62 124>,
+ <63 125>,
+ <64 126>,
+ <65 129>,
+ <66 131>,
+ <67 132>, /* PCIe1 */
+ <68 133>,
+ <69 145>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi
index d20f3ba3ffe6..2926a6889395 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -241,4 +241,27 @@
compatible = "qcom,smp2pgpio_sleepstate_3_out";
gpios = <&smp2pgpio_sleepstate_3_out 0 0>;
};
+
+ /* ipa - outbound entry to mss */
+ smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ipa - inbound entry from mss */
+ smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index f04291d41c95..01f0e6a4fd2a 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -780,6 +780,17 @@
<90 512 206000 960000>, <90 585 206000 960000>, /* NOMINAL */
<90 512 206000 3600000>, <90 585 206000 3600000>; /* TURBO */
qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+
+ /* smp2p gpio information */
+ qcom,smp2pgpio_map_ipa_1_out {
+ compatible = "qcom,smp2pgpio-map-ipa-1-out";
+ gpios = <&smp2pgpio_ipa_1_out 0 0>;
+ };
+
+ qcom,smp2pgpio_map_ipa_1_in {
+ compatible = "qcom,smp2pgpio-map-ipa-1-in";
+ gpios = <&smp2pgpio_ipa_1_in 0 0>;
+ };
};
qcom,ipa_fws@1e08000 {
@@ -851,6 +862,52 @@
};
};
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-adsp";
+ qcom,fastrpc-glink;
+
+ qcom,msm_fastrpc_compute_cb1 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 8>;
+ };
+ qcom,msm_fastrpc_compute_cb2 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 9>;
+ };
+ qcom,msm_fastrpc_compute_cb3 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 10>;
+ };
+ qcom,msm_fastrpc_compute_cb4 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 11>;
+ };
+ qcom,msm_fastrpc_compute_cb5 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 12>;
+ };
+ qcom,msm_fastrpc_compute_cb6 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 5>;
+ };
+ qcom,msm_fastrpc_compute_cb7 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 6>;
+ };
+ qcom,msm_fastrpc_compute_cb8 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 7>;
+ };
+ };
+
rpm_bus: qcom,rpm-smd {
compatible = "qcom,rpm-glink";
qcom,glink-edge = "rpm";
@@ -1221,10 +1278,11 @@
reg = <0x0a800000 0xcd00>;
interrupt-parent = <&intc>;
interrupts = <0 131 0>;
- usb-phy = <&qusb_phy0>, <&ssphy>;
+ usb-phy = <&qusb_phy0>, <&usb_nop_phy>;
tx-fifo-resize;
snps,nominal-elastic-buffer;
snps,hird_thresh = <0x10>;
+ maximum-speed = "high-speed";
};
qcom,usbbam@a904000 {
@@ -1295,13 +1353,14 @@
reg-names = "qmp_phy_base",
"vls_clamp_reg";
vdd-supply = <&pmcobalt_l1>;
- vdda18-supply = <&pmcobalt_l12>;
+ core-supply = <&pmcobalt_l2>;
qcom,vdd-voltage-level = <0 880000 880000>;
qcom,vbus-valid-override;
qcom,qmp-phy-init-seq =
/* <reg_offset, value, delay> */
<0x138 0x30 0x00 /* Common block */
0x3c 0x06 0x00
+ 0x80 0x14 0x00 /* SYSCLK_EN_SEL */
0x8c 0x08 0x00
0x15c 0x06 0x00
0x164 0x01 0x00
@@ -1417,7 +1476,7 @@
0xcd8 /* USB3_PHY_AUTONOMOUS_MODE_CTRL */
0xcdc /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */
0xc04 /* USB3_PHY_POWER_DOWN_CONTROL */
- 0x000 /* USB3_PHY_SW_RESET */
+ 0xc00 /* USB3_PHY_SW_RESET */
0xc08>; /* USB3_PHY_START */
clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>,
@@ -1438,6 +1497,10 @@
qcom,reset-ep-after-lpm-resume;
};
+ usb_nop_phy: usb_nop_phy {
+ compatible = "usb-nop-xceiv";
+ };
+
qcom,lpass@17300000 {
compatible = "qcom,pil-tz-generic";
reg = <0x17300000 0x00100>;
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d1604a..f23454db246f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
+#define TIF_MM_RELEASED 21 /* task MM has been released */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 5913b9491e57..0b075acbc8e4 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -293,6 +293,7 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_MSM_PM=y
CONFIG_MSM_APM=y
# CONFIG_HWMON is not set
CONFIG_THERMAL_TSENS8974=y
@@ -358,6 +359,7 @@ CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
@@ -430,6 +432,8 @@ CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_TRACER_PKT=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_EXTCON=y
@@ -480,7 +484,6 @@ CONFIG_DEBUG_RODATA=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 3bde1a0038c4..9c571aab8fc8 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -306,6 +306,7 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_MSM_PM=y
CONFIG_MSM_APM=y
# CONFIG_HWMON is not set
CONFIG_THERMAL_TSENS8974=y
@@ -372,6 +373,7 @@ CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
@@ -455,6 +457,8 @@ CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_TRACER_PKT=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_EXTCON=y
@@ -533,7 +537,6 @@ CONFIG_FREE_PAGES_RDONLY=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 0a70bc44dcdb..73e615dabe41 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -26,6 +26,7 @@
#include <linux/msm_ion.h>
#include <soc/qcom/secure_buffer.h>
#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
#include <soc/qcom/subsystem_notif.h>
#include <soc/qcom/subsystem_restart.h>
#include <linux/scatterlist.h>
@@ -137,6 +138,7 @@ struct smq_invoke_ctx {
uint32_t sc;
struct overlap *overs;
struct overlap **overps;
+ struct smq_msg msg;
};
struct fastrpc_ctx_lst {
@@ -159,7 +161,7 @@ struct fastrpc_session_ctx {
struct fastrpc_channel_ctx {
char *name;
char *subsys;
- smd_channel_t *chan;
+ void *chan;
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
struct completion work;
@@ -174,6 +176,10 @@ struct fastrpc_channel_ctx {
int vmid;
int ramdumpenabled;
void *remoteheap_ramdump_dev;
+ struct glink_link_info link_info;
+ void *link_notify_handle;
+ struct glink_open_config cfg;
+ char *edge;
};
struct fastrpc_apps {
@@ -189,6 +195,7 @@ struct fastrpc_apps {
spinlock_t hlock;
struct ion_client *client;
struct device *dev;
+ bool glink;
};
struct fastrpc_mmap {
@@ -231,14 +238,15 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
.name = "adsprpc-smd",
.subsys = "adsp",
.channel = SMD_APPS_QDSP,
+ .edge = "lpass",
},
{
.name = "sdsprpc-smd",
.subsys = "dsps",
.channel = SMD_APPS_DSPS,
+ .edge = "dsps",
.vmid = VMID_SSC_Q6,
},
-
};
static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
@@ -1164,31 +1172,40 @@ static void inv_args(struct smq_invoke_ctx *ctx)
static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
uint32_t kernel, uint32_t handle)
{
- struct smq_msg msg = {0};
+ struct smq_msg *msg = &ctx->msg;
struct fastrpc_file *fl = ctx->fl;
int err = 0, len;
VERIFY(err, 0 != fl->apps->channel[fl->cid].chan);
if (err)
goto bail;
- msg.pid = current->tgid;
- msg.tid = current->pid;
+ msg->pid = current->tgid;
+ msg->tid = current->pid;
if (kernel)
- msg.pid = 0;
- msg.invoke.header.ctx = ptr_to_uint64(ctx);
- msg.invoke.header.handle = handle;
- msg.invoke.header.sc = ctx->sc;
- msg.invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
- msg.invoke.page.size = buf_page_size(ctx->used);
- spin_lock(&fl->apps->hlock);
- len = smd_write(fl->apps->channel[fl->cid].chan, &msg, sizeof(msg));
- spin_unlock(&fl->apps->hlock);
- VERIFY(err, len == sizeof(msg));
+ msg->pid = 0;
+ msg->invoke.header.ctx = ptr_to_uint64(ctx);
+ msg->invoke.header.handle = handle;
+ msg->invoke.header.sc = ctx->sc;
+ msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
+ msg->invoke.page.size = buf_page_size(ctx->used);
+
+ if (fl->apps->glink) {
+ err = glink_tx(fl->apps->channel[fl->cid].chan,
+ (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
+ GLINK_TX_REQ_INTENT);
+ } else {
+ spin_lock(&fl->apps->hlock);
+ len = smd_write((smd_channel_t *)
+ fl->apps->channel[fl->cid].chan,
+ msg, sizeof(*msg));
+ spin_unlock(&fl->apps->hlock);
+ VERIFY(err, len == sizeof(*msg));
+ }
bail:
return err;
}
-static void fastrpc_read_handler(int cid)
+static void fastrpc_smd_read_handler(int cid)
{
struct fastrpc_apps *me = &gfa;
struct smq_invoke_rsp rsp = {0};
@@ -1216,7 +1233,7 @@ static void smd_event_handler(void *priv, unsigned event)
fastrpc_notify_drivers(me, cid);
break;
case SMD_EVENT_DATA:
- fastrpc_read_handler(cid);
+ fastrpc_smd_read_handler(cid);
break;
}
}
@@ -1334,7 +1351,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
int pageslen;
} inbuf;
inbuf.pgid = current->tgid;
- inbuf.namelen = strlen(current->comm);
+ inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init->filelen;
VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, init->file,
init->filelen, mflags, &file));
@@ -1632,7 +1649,12 @@ static void fastrpc_channel_close(struct kref *kref)
int cid;
ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
- smd_close(ctx->chan);
+ if (!me->glink) {
+ smd_close(ctx->chan);
+ } else {
+ glink_unregister_link_state_cb(ctx->link_notify_handle);
+ glink_close(ctx->chan);
+ }
ctx->chan = 0;
mutex_unlock(&me->smd_mutex);
cid = ctx - &gcinfo[0];
@@ -1707,6 +1729,49 @@ static int fastrpc_session_free(struct fastrpc_channel_ctx *chan, int session)
return err;
}
+bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
+{
+ if (glink_queue_rx_intent(h, NULL, size))
+ return false;
+ return true;
+}
+
+void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr)
+{
+}
+
+void fastrpc_glink_notify_rx(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size)
+{
+ struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
+ int len = size;
+
+ while (len >= sizeof(*rsp) && rsp) {
+ context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
+ rsp++;
+ len = len - sizeof(*rsp);
+ }
+ glink_rx_done(handle, ptr, true);
+}
+
+void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
+{
+ struct fastrpc_apps *me = &gfa;
+ int cid = (int)(uintptr_t)priv;
+
+ switch (event) {
+ case GLINK_CONNECTED:
+ complete(&me->channel[cid].work);
+ break;
+ case GLINK_LOCAL_DISCONNECTED:
+ break;
+ case GLINK_REMOTE_DISCONNECTED:
+ fastrpc_notify_drivers(me, cid);
+ break;
+ }
+}
+
static int fastrpc_device_release(struct inode *inode, struct file *file)
{
struct fastrpc_apps *me = &gfa;
@@ -1725,6 +1790,55 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
return 0;
}
+static void fastrpc_glink_register_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ switch (cb_info->link_state) {
+ case GLINK_LINK_STATE_UP:
+ if (priv)
+ complete(priv);
+ break;
+ case GLINK_LINK_STATE_DOWN:
+ break;
+ default:
+ pr_err("adsprpc: unknown glnk state %d\n", cb_info->link_state);
+ break;
+ }
+}
+
+static int fastrpc_glink_open(int cid, struct fastrpc_apps *me)
+{
+ int err = 0;
+ struct glink_open_config *cfg = &me->channel[cid].cfg;
+ struct glink_link_info *link_info = &me->channel[cid].link_info;
+
+ link_info->edge = gcinfo[cid].edge;
+ link_info->transport = "smem";
+ link_info->glink_link_state_notif_cb = fastrpc_glink_register_cb;
+ me->channel[cid].link_notify_handle = glink_register_link_state_cb(
+ &me->channel[cid].link_info,
+ (void *)(&me->channel[cid].work));
+ VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link_notify_handle));
+ if (err)
+ goto bail;
+
+ VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+ RPC_TIMEOUT));
+ if (err)
+ goto bail;
+
+ cfg->priv = (void *)(uintptr_t)cid;
+ cfg->edge = gcinfo[cid].edge;
+ cfg->name = FASTRPC_GLINK_GUID;
+ cfg->notify_rx = fastrpc_glink_notify_rx;
+ cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
+ cfg->notify_state = fastrpc_glink_notify_state;
+ cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
+ VERIFY(err, 0 != (me->channel[cid].chan = glink_open(cfg)));
+bail:
+ return err;
+}
+
static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
int cid = MINOR(inode->i_rdev);
@@ -1756,17 +1870,24 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == 0)) {
- VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
- gcinfo[cid].channel,
- &me->channel[cid].chan,
- (void *)(uintptr_t)cid,
- smd_event_handler));
+ if (me->glink) {
+ VERIFY(err, 0 == fastrpc_glink_open(cid, me));
+ } else {
+ VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
+ gcinfo[cid].channel,
+ (smd_channel_t **)&me->channel[cid].chan,
+ (void *)(uintptr_t)cid,
+ smd_event_handler));
+ }
if (err)
goto bail;
+
VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
- RPC_TIMEOUT));
- if (err)
+ RPC_TIMEOUT));
+ if (err) {
+ me->channel[cid].chan = 0;
goto bail;
+ }
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
@@ -1884,10 +2005,16 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
mutex_lock(&me->smd_mutex);
ctx->ssrcount++;
if (ctx->chan) {
- smd_close(ctx->chan);
+ if (me->glink) {
+ glink_unregister_link_state_cb(
+ ctx->link_notify_handle);
+ glink_close(ctx->chan);
+ } else {
+ smd_close(ctx->chan);
+ }
ctx->chan = 0;
- pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
- MAJOR(me->dev_no), cid);
+ pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
+ gcinfo[cid].name, MAJOR(me->dev_no), cid);
}
mutex_unlock(&me->smd_mutex);
fastrpc_notify_drivers(me, cid);
@@ -1956,6 +2083,7 @@ static int fastrpc_cb_probe(struct device *dev)
VERIFY(err, chan->sesscount < NUM_SESSIONS);
if (err)
goto bail;
+
VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells", 0, &iommuspec));
if (err)
@@ -2093,6 +2221,9 @@ static int fastrpc_probe(struct platform_device *pdev)
return 0;
}
+ me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink");
+ pr_debug("adsprpc: channel link type: %d\n", me->glink);
+
VERIFY(err, !of_platform_populate(pdev->dev.of_node,
fastrpc_match_table,
NULL, &pdev->dev));
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index fdd479df0d8d..d0a1e11871f3 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t)
#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init)
+#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
#define DEVICE_NAME "adsprpc-smd"
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 65beccc5d7fe..778c76f52d0b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -246,7 +246,8 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.minor = 0,
.patchid = ANY_ID,
.features = ADRENO_PREEMPTION | ADRENO_64BIT |
- ADRENO_CONTENT_PROTECTION,
+ ADRENO_CONTENT_PROTECTION |
+ ADRENO_GPMU | ADRENO_SPTP_PC,
.pm4fw_name = "a530_pm4.fw",
.pfpfw_name = "a530_pfp.fw",
.zap_name = "a530_zap",
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 79e95ebb8363..24c5186340e5 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1012,13 +1012,14 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
static int adreno_remove(struct platform_device *pdev)
{
struct adreno_device *adreno_dev = adreno_get_dev(pdev);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct adreno_gpudev *gpudev;
struct kgsl_device *device;
if (adreno_dev == NULL)
return 0;
device = KGSL_DEVICE(adreno_dev);
+ gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (gpudev->remove != NULL)
gpudev->remove(adreno_dev);
@@ -1221,7 +1222,7 @@ static void _setup_throttling_counters(struct adreno_device *adreno_dev)
if (!adreno_is_a540(adreno_dev))
return;
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
return;
for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
@@ -1259,7 +1260,7 @@ static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
if (!adreno_is_a540(adreno_dev))
return 0;
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
return 0;
for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index f739783ebd84..816185e9aad4 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -721,6 +721,7 @@ struct adreno_gpudev {
struct adreno_ringbuffer *, unsigned int *,
struct kgsl_context *, uint64_t cond_addr,
struct kgsl_memobj_node *);
+ int (*preemption_yield_enable)(unsigned int *);
int (*preemption_post_ibsubmit)(struct adreno_device *,
struct adreno_ringbuffer *, unsigned int *,
struct kgsl_context *);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 9d37c86aee0f..ef3d5d8fc552 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -317,10 +317,6 @@ static int a5xx_preemption_token(struct adreno_device *adreno_dev,
{
unsigned int *cmds_orig = cmds;
- /* Enable yield in RB only */
- *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
- *cmds++ = 1;
-
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
*cmds++ = 1;
@@ -411,18 +407,11 @@ static int a5xx_preemption_pre_ibsubmit(
}
/*
- * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
+ * a5xx_preemption_yield_enable() - Below PM4 commands are
* added after every cmdbatch submission.
*/
-static int a5xx_preemption_post_ibsubmit(
- struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context)
+static int a5xx_preemption_yield_enable(unsigned int *cmds)
{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *cmds_orig = cmds;
- unsigned int ctx_id = context ? context->id : 0;
-
/*
* SRM -- set render mode (ex binning, direct render etc)
* SRM is set by UMD usually at start of IB to tell CP the type of
@@ -437,11 +426,27 @@ static int a5xx_preemption_post_ibsubmit(
*cmds++ = 0;
*cmds++ = 0;
- cmds += a5xx_preemption_token(adreno_dev, rb, cmds,
+ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
+ *cmds++ = 1;
+
+ return 8;
+}
+
+/*
+ * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
+ * added after every cmdbatch submission.
+ */
+static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, unsigned int *cmds,
+ struct kgsl_context *context)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int ctx_id = context ? context->id : 0;
+
+ return a5xx_preemption_token(adreno_dev, rb, cmds,
device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(ctx_id, preempted));
- return cmds - cmds_orig;
}
static void a5xx_platform_setup(struct adreno_device *adreno_dev)
@@ -1840,7 +1845,7 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
uint32_t agc_lm_config =
- ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) | 0x3)
+ ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
<< AGC_GPU_VERSION_SHIFT);
unsigned int r, i;
@@ -1850,8 +1855,8 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
AGC_THROTTLE_SEL_DCS;
kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- if (r & GPMU_BCL_ENABLED)
- agc_lm_config |= AGC_BCL_ENABLED;
+ if (!(r & GPMU_BCL_ENABLED))
+ agc_lm_config |= AGC_BCL_DISABLED;
if (r & GPMU_LLM_ENABLED)
agc_lm_config |= AGC_LLM_ENABLED;
@@ -1900,6 +1905,9 @@ start_agc:
kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD,
PWR_THRESHOLD_VALID | lm_limit(adreno_dev));
+ kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
+ VOLTAGE_INTR_EN);
+
if (lm_on(adreno_dev))
wake_llm(adreno_dev);
}
@@ -1948,7 +1956,10 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
{
int on = 0;
- /* Only call through if PPD or LM is supported and enabled */
+ /*
+ * On pre A540 HW only call through if PPD or LMx
+ * is supported and enabled
+ */
if (ADRENO_FEATURE(adreno_dev, ADRENO_PPD) &&
test_bit(ADRENO_PPD_CTRL, &adreno_dev->pwrctrl_flag))
on = ADRENO_PPD;
@@ -1957,6 +1968,12 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
on = ADRENO_LM;
+ /* On 540+ HW call through unconditionally as long as GPMU is enabled */
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+ if (adreno_is_a540(adreno_dev))
+ on = ADRENO_GPMU;
+ }
+
if (!on)
return;
@@ -4182,8 +4199,10 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.regulator_disable = a5xx_regulator_disable,
.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
+ .preemption_yield_enable =
+ a5xx_preemption_yield_enable,
.preemption_post_ibsubmit =
- a5xx_preemption_post_ibsubmit,
+ a5xx_preemption_post_ibsubmit,
.preemption_token = a5xx_preemption_token,
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 6c1b8d141671..e41b5b9cce0c 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -132,6 +132,9 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
#define AMP_CALIBRATION_RETRY_CNT 3
#define AMP_CALIBRATION_TIMEOUT 6
+/* A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK */
+#define VOLTAGE_INTR_EN BIT(0)
+
/* A5XX_GPMU_GPMU_PWR_THRESHOLD */
#define PWR_THRESHOLD_VALID 0x80000000
/* AGC */
@@ -170,7 +173,7 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
#define AGC_LLM_ENABLED (1 << 16)
#define AGC_GPU_VERSION_MASK GENMASK(18, 17)
#define AGC_GPU_VERSION_SHIFT 17
-#define AGC_BCL_ENABLED (1 << 24)
+#define AGC_BCL_DISABLED (1 << 24)
#define AGC_LEVEL_CONFIG (140/4)
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 13d3353946ca..519087a77b83 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -117,7 +117,7 @@ long adreno_ioctl_helper(struct kgsl_device_private *dev_priv,
unsigned int cmd, unsigned long arg,
const struct kgsl_ioctl *cmds, int len)
{
- unsigned char data[128];
+ unsigned char data[128] = { 0 };
long ret;
int i;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a397a3e83cf4..dceb8fb93461 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -520,7 +520,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- total_sizedwords += 13;
+ total_sizedwords += 5;
/*
* a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -707,8 +707,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
- rb, ringcmds, &drawctxt->base);
+ ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb,
+ ringcmds, &drawctxt->base);
/*
* If we have more ringbuffer commands than space reserved
@@ -801,6 +801,11 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
&& !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+ /* A3XX does not have support for command batch profiling */
+ if (adreno_is_a3xx(adreno_dev) &&
+ (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ return -EOPNOTSUPP;
+
/* Queue the command in the ringbuffer */
ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
timestamp);
@@ -860,6 +865,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -978,6 +984,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
dwords += 2;
}
+ if (gpudev->preemption_yield_enable &&
+ adreno_is_preemption_enabled(adreno_dev))
+ dwords += 8;
+
link = kzalloc(sizeof(unsigned int) * dwords, GFP_KERNEL);
if (!link) {
ret = -ENOMEM;
@@ -1028,6 +1038,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
}
}
+ if (gpudev->preemption_yield_enable &&
+ adreno_is_preemption_enabled(adreno_dev))
+ cmds += gpudev->preemption_yield_enable(cmds);
+
if (cmdbatch_kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
adreno_dev->cmdbatch_profile_buffer.gpuaddr +
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e3e0b0973410..77494ebb2c92 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1962,6 +1962,20 @@ static inline int _check_region(unsigned long start, unsigned long size,
return (end > len);
}
+static int check_vma_flags(struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ unsigned long flags_requested = (VM_READ | VM_WRITE);
+
+ if (flags & KGSL_MEMFLAGS_GPUREADONLY)
+ flags_requested &= ~VM_WRITE;
+
+ if ((vma->vm_flags & flags_requested) == flags_requested)
+ return 0;
+
+ return -EFAULT;
+}
+
static int check_vma(struct vm_area_struct *vma, struct file *vmfile,
struct kgsl_memdesc *memdesc)
{
@@ -1975,7 +1989,7 @@ static int check_vma(struct vm_area_struct *vma, struct file *vmfile,
if (vma->vm_start != memdesc->useraddr ||
(memdesc->useraddr + memdesc->size) != vma->vm_end)
return -EINVAL;
- return 0;
+ return check_vma_flags(vma, memdesc->flags);
}
static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
@@ -1984,7 +1998,7 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
long npages = 0, i;
size_t sglen = (size_t) (memdesc->size / PAGE_SIZE);
struct page **pages = NULL;
- int write = (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) != 0;
+ int write = ((memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 0 : 1);
if (sglen == 0 || sglen >= LONG_MAX)
return -EINVAL;
@@ -2103,6 +2117,12 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
if (vma && vma->vm_file) {
int fd;
+ ret = check_vma_flags(vma, entry->memdesc.flags);
+ if (ret) {
+ up_read(&current->mm->mmap_sem);
+ return ret;
+ }
+
/*
* Check to see that this isn't our own memory that we have
* already mapped
@@ -2191,7 +2211,7 @@ static long _gpuobj_map_useraddr(struct kgsl_device *device,
return -EINVAL;
return kgsl_setup_useraddr(device, pagetable, entry,
- (unsigned long) useraddr.virtaddr, 0, 0);
+ (unsigned long) useraddr.virtaddr, 0, param->priv_len);
}
#ifdef CONFIG_DMA_SHARED_BUFFER
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index b911be00b338..e71b8718037c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -706,8 +706,8 @@ static ssize_t stm_show_entities(struct device *dev,
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
ssize_t len;
- len = bitmap_scnprintf(buf, PAGE_SIZE, drvdata->entities,
- OST_ENTITY_MAX);
+ len = scnprintf(buf, PAGE_SIZE, "%*pb\n",
+ OST_ENTITY_MAX, drvdata->entities);
if (PAGE_SIZE - len < 2)
len = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index bc4246e6724a..8870fceb0350 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -727,8 +727,8 @@ static ssize_t tpdm_show_enable_datasets(struct device *dev,
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
ssize_t size;
- size = bitmap_scnprintf(buf, PAGE_SIZE, drvdata->enable_ds,
- TPDM_DATASETS);
+ size = scnprintf(buf, PAGE_SIZE, "%*pb\n", TPDM_DATASETS,
+ drvdata->enable_ds);
if (PAGE_SIZE - size < 2)
size = -EINVAL;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 19510e5c2279..18ae7fa5454b 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -171,6 +171,16 @@ config MSM_MHI_DEBUG
throughput as individual MHI packets and state transitions
will be logged.
+config MSM_MHI_DEV
+ tristate "Modem Device Interface Driver"
+ depends on EP_PCIE && IPA
+ help
+ This kernel module is used to interact with PCIe Root complex
+ supporting MHI protocol. MHI is a data transmission protocol
+ involving communication between a host and a device over shared
+ memory. MHI interacts with the IPA for supporting transfers
+ on the HW accelerated channels between Host and device.
+
config MSM_11AD
tristate "Platform driver for 11ad chip"
depends on PCI
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index c33f5e53c1b3..d5e87c209c21 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_MSM_11AD) += msm_11ad/
obj-$(CONFIG_SEEMP_CORE) += seemp_core/
obj-$(CONFIG_SSM) += ssm.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index 8bc7d8a498f6..704dd0abfefa 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,5 +1,4 @@
-obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/
-obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/
+obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
-obj-$(CONFIG_IPA) += ipa_api.o
-obj-$(CONFIG_IPA3) += ipa_api.o
+ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 04054fe1211f..13ea3b2fb920 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -76,6 +76,78 @@
static enum ipa_hw_type ipa_api_hw_type;
static struct ipa_api_controller *ipa_api_ctrl;
+const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
+ __stringify(IPA_CLIENT_HSIC1_PROD),
+ __stringify(IPA_CLIENT_WLAN1_PROD),
+ __stringify(IPA_CLIENT_HSIC2_PROD),
+ __stringify(IPA_CLIENT_USB2_PROD),
+ __stringify(IPA_CLIENT_HSIC3_PROD),
+ __stringify(IPA_CLIENT_USB3_PROD),
+ __stringify(IPA_CLIENT_HSIC4_PROD),
+ __stringify(IPA_CLIENT_USB4_PROD),
+ __stringify(IPA_CLIENT_HSIC5_PROD),
+ __stringify(IPA_CLIENT_USB_PROD),
+ __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
+ __stringify(IPA_CLIENT_A2_TETHERED_PROD),
+ __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
+ __stringify(IPA_CLIENT_APPS_CMD_PROD),
+ __stringify(IPA_CLIENT_ODU_PROD),
+ __stringify(IPA_CLIENT_MHI_PROD),
+ __stringify(IPA_CLIENT_Q6_LAN_PROD),
+ __stringify(IPA_CLIENT_Q6_WAN_PROD),
+ __stringify(IPA_CLIENT_Q6_CMD_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+ __stringify(IPA_CLIENT_UC_USB_PROD),
+
+ /* Below PROD client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_PROD),
+ __stringify(IPA_CLIENT_TEST1_PROD),
+ __stringify(IPA_CLIENT_TEST2_PROD),
+ __stringify(IPA_CLIENT_TEST3_PROD),
+ __stringify(IPA_CLIENT_TEST4_PROD),
+
+ __stringify(IPA_CLIENT_HSIC1_CONS),
+ __stringify(IPA_CLIENT_WLAN1_CONS),
+ __stringify(IPA_CLIENT_HSIC2_CONS),
+ __stringify(IPA_CLIENT_USB2_CONS),
+ __stringify(IPA_CLIENT_WLAN2_CONS),
+ __stringify(IPA_CLIENT_HSIC3_CONS),
+ __stringify(IPA_CLIENT_USB3_CONS),
+ __stringify(IPA_CLIENT_WLAN3_CONS),
+ __stringify(IPA_CLIENT_HSIC4_CONS),
+ __stringify(IPA_CLIENT_USB4_CONS),
+ __stringify(IPA_CLIENT_WLAN4_CONS),
+ __stringify(IPA_CLIENT_HSIC5_CONS),
+ __stringify(IPA_CLIENT_USB_CONS),
+ __stringify(IPA_CLIENT_USB_DPL_CONS),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+ __stringify(IPA_CLIENT_A2_TETHERED_CONS),
+ __stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+ __stringify(IPA_CLIENT_APPS_LAN_CONS),
+ __stringify(IPA_CLIENT_APPS_WAN_CONS),
+ __stringify(IPA_CLIENT_ODU_EMB_CONS),
+ __stringify(IPA_CLIENT_ODU_TETH_CONS),
+ __stringify(IPA_CLIENT_MHI_CONS),
+ __stringify(IPA_CLIENT_Q6_LAN_CONS),
+ __stringify(IPA_CLIENT_Q6_WAN_CONS),
+ __stringify(IPA_CLIENT_Q6_DUN_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+ __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+ /* Below CONS client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_CONS),
+ __stringify(IPA_CLIENT_TEST1_CONS),
+ __stringify(IPA_CLIENT_TEST2_CONS),
+ __stringify(IPA_CLIENT_TEST3_CONS),
+ __stringify(IPA_CLIENT_TEST4_CONS),
+};
+
/**
* ipa_connect() - low-level IPA client connect
@@ -1459,314 +1531,6 @@ int ipa_uc_dereg_rdyCB(void)
EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
/**
- * ipa_rm_create_resource() - create resource
- * @create_params: [in] parameters needed
- * for resource initialization
- *
- * Returns: 0 on success, negative on failure
- *
- * This function is called by IPA RM client to initialize client's resources.
- * This API should be called before any other IPA RM API on a given resource
- * name.
- *
- */
-int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_create_resource, create_params);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_create_resource);
-
-/**
- * ipa_rm_delete_resource() - delete resource
- * @resource_name: name of resource to be deleted
- *
- * Returns: 0 on success, negative on failure
- *
- * This function is called by IPA RM client to delete client's resources.
- *
- */
-int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_delete_resource, resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_delete_resource);
-
-/**
- * ipa_rm_add_dependency() - create dependency
- * between 2 resources
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: IPA_RM_RESORCE_GRANTED could be generated
- * in case client registered with IPA RM
- */
-int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_add_dependency, resource_name,
- depends_on_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_add_dependency);
-
-/**
- * ipa_rm_delete_dependency() - create dependency
- * between 2 resources
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: IPA_RM_RESORCE_GRANTED could be generated
- * in case client registered with IPA RM
- */
-int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_delete_dependency, resource_name,
- depends_on_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_delete_dependency);
-
-/**
- * ipa_rm_request_resource() - request resource
- * @resource_name: [in] name of the requested resource
- *
- * Returns: 0 on success, negative on failure
- *
- * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
- * on successful completion of this operation.
- */
-int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_request_resource, resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_request_resource);
-
-/**
- * ipa_rm_release_resource() - release resource
- * @resource_name: [in] name of the requested resource
- *
- * Returns: 0 on success, negative on failure
- *
- * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
- * on successful completion of this operation.
- */
-int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_release_resource, resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_release_resource);
-
-/**
- * ipa_rm_register() - register for event
- * @resource_name: resource name
- * @reg_params: [in] registration parameters
- *
- * Returns: 0 on success, negative on failure
- *
- * Registration parameters provided here should be the same
- * as provided later in ipa_rm_deregister() call.
- */
-int ipa_rm_register(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_register, resource_name, reg_params);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_register);
-
-/**
- * ipa_rm_deregister() - cancel the registration
- * @resource_name: resource name
- * @reg_params: [in] registration parameters
- *
- * Returns: 0 on success, negative on failure
- *
- * Registration parameters provided here should be the same
- * as provided in ipa_rm_register() call.
- */
-int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_deregister, resource_name, reg_params);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_deregister);
-
-/**
- * ipa_rm_set_perf_profile() - set performance profile
- * @resource_name: resource name
- * @profile: [in] profile information.
- *
- * Returns: 0 on success, negative on failure
- *
- * Set resource performance profile.
- * Updates IPA driver if performance level changed.
- */
-int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_perf_profile *profile)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(
- ipa_rm_set_perf_profile,
- resource_name,
- profile);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_set_perf_profile);
-
-/**
- * ipa_rm_notify_completion() -
- * consumer driver notification for
- * request_resource / release_resource operations
- * completion
- * @event: notified event
- * @resource_name: resource name
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa_rm_notify_completion(enum ipa_rm_event event,
- enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_notify_completion, event, resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_notify_completion);
-
-/**
-* ipa_rm_inactivity_timer_init() - Init function for IPA RM
-* inactivity timer. This function shall be called prior calling
-* any other API of IPA RM inactivity timer.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-* @msecs: time in miliseccond, that IPA RM inactivity timer
-* shall wait prior calling to ipa_rm_release_resource().
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
- unsigned long msecs)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_init, resource_name,
- msecs);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
-
-/**
-* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
-* RM inactivity timer.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_destroy, resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
-
-/**
-* ipa_rm_inactivity_timer_request_resource() - Same as
-* ipa_rm_request_resource(), with a difference that calling to
-* this function will also cancel the inactivity timer, if
-* ipa_rm_inactivity_timer_release_resource() was called earlier.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa_rm_inactivity_timer_request_resource(
- enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_request_resource,
- resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
-
-/**
-* ipa_rm_inactivity_timer_release_resource() - Sets the
-* inactivity timer to the timeout set by
-* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
-* RM inactivity timer will call to ipa_rm_release_resource().
-* If a call to ipa_rm_inactivity_timer_request_resource() was
-* made BEFORE the timout has expired, rge timer will be
-* cancelled.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa_rm_inactivity_timer_release_resource(
- enum ipa_rm_resource_name resource_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_release_resource,
- resource_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);
-
-/**
* teth_bridge_init() - Initialize the Tethering bridge driver
* @params - in/out params for USB initialization API (please look at struct
* definition for more info)
@@ -2431,31 +2195,6 @@ int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr);
/**
- * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
- * in a synchronized fashion. In case a producer resource is in GRANTED state
- * and the newly added consumer resource is in RELEASED state, the consumer
- * entity will be requested and the function will block until the consumer
- * is granted.
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: May block. See documentation above.
- */
-int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_rm_add_dependency_sync, resource_name,
- depends_on_name);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_rm_add_dependency_sync);
-
-/**
* ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer
*
* Return value: pointer to ipa_ctx dma dev pointer
@@ -2634,6 +2373,136 @@ int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
}
EXPORT_SYMBOL(ipa_register_ipa_ready_cb);
+/**
+ * ipa_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_INC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+ IPA_API_DISPATCH(ipa_inc_client_enable_clks, id);
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks);
+
+/**
+ * ipa_dec_client_disable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_DEC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+ IPA_API_DISPATCH(ipa_dec_client_disable_clks, id);
+}
+EXPORT_SYMBOL(ipa_dec_client_disable_clks);
+
+/**
+ * ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done.Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Please do not use this API, use the wrapper macros instead(ipa_i.h)
+ *
+ *
+ * Return codes : 0 for success
+ * -EPERM if an asynchronous action should have been done
+ */
+int ipa_inc_client_enable_clks_no_block(
+ struct ipa_active_client_logging_info *id)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block);
+
+/**
+* ipa_suspend_resource_no_block() - suspend client endpoints related to the
+* IPA_RM resource and decrement active clients counter. This function is
+* guaranteed to avoid sleeping.
+*
+* @resource: [IN] IPA Resource Manager resource
+*
+* Return codes: 0 on success, negative on failure.
+*/
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_no_block);
+/**
+ * ipa_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_resume_resource(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_resume_resource);
+
+/**
+ * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_sync);
+
+/**
+ * ipa_set_required_perf_profile() - set IPA to the specified performance
+ * profile based on the bandwidth, unless minimum voltage required is
+ * higher. In this case the floor_voltage specified will be used.
+ * @floor_voltage: minimum voltage to operate
+ * @bandwidth_mbps: needed bandwidth from IPA
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage,
+ bandwidth_mbps);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_required_perf_profile);
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 04b7ba64a6aa..7edbf4e5b1d9 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#include "ipa_common_i.h"
+
#ifndef _IPA_API_H_
#define _IPA_API_H_
@@ -183,45 +185,6 @@ struct ipa_api_controller {
int (*ipa_uc_dereg_rdyCB)(void);
- int (*ipa_rm_create_resource)(
- struct ipa_rm_create_params *create_params);
-
- int (*ipa_rm_delete_resource)(enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_register)(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
- int (*ipa_rm_deregister)(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
- int (*ipa_rm_set_perf_profile)(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_perf_profile *profile);
-
- int (*ipa_rm_add_dependency)(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
- int (*ipa_rm_delete_dependency)(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
- int (*ipa_rm_request_resource)(enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_release_resource)(enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_notify_completion)(enum ipa_rm_event event,
- enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_inactivity_timer_init)(enum ipa_rm_resource_name
- resource_name, unsigned long msecs);
-
- int (*ipa_rm_inactivity_timer_destroy)(
- enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_inactivity_timer_request_resource)(
- enum ipa_rm_resource_name resource_name);
-
- int (*ipa_rm_inactivity_timer_release_resource)(
- enum ipa_rm_resource_name resource_name);
-
int (*teth_bridge_init)(struct teth_bridge_init_params *params);
int (*teth_bridge_disconnect)(enum ipa_client_type client);
@@ -308,10 +271,6 @@ struct ipa_api_controller {
int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
uint32_t agg_count);
- int (*ipa_rm_add_dependency_sync)(
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
struct device *(*ipa_get_dma_dev)(void);
int (*ipa_release_wdi_mapping)(u32 num_buffers,
@@ -325,6 +284,25 @@ struct ipa_api_controller {
int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
void *user_data);
+ void (*ipa_inc_client_enable_clks)(
+ struct ipa_active_client_logging_info *id);
+
+ void (*ipa_dec_client_disable_clks)(
+ struct ipa_active_client_logging_info *id);
+
+ int (*ipa_inc_client_enable_clks_no_block)(
+ struct ipa_active_client_logging_info *id);
+
+ int (*ipa_suspend_resource_no_block)(
+ enum ipa_rm_resource_name resource);
+
+ int (*ipa_resume_resource)(enum ipa_rm_resource_name name);
+
+ int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource);
+
+ int (*ipa_set_required_perf_profile)(
+ enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps);
+
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index a0c94c8e37ec..8004fa9e42ae 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -19,6 +19,7 @@
#include <linux/rndis_ipa.h>
#include <linux/ecm_ipa.h>
#include "../ipa_v3/ipa_i.h"
+#include "../ipa_rm_i.h"
#define IPA_USB_RM_TIMEOUT_MSEC 10000
#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
@@ -394,7 +395,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
}
/* Notify RM that consumer is granted */
if (rm_ctx->cons_requested) {
- ipa3_rm_notify_completion(
+ ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
rm_ctx->cons_params.name);
rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
@@ -529,12 +530,12 @@ static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
IPA_USB_DBG(":%s granted\n",
- ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
complete_all(&rm_ctx->prod_comp);
break;
case IPA_RM_RESOURCE_RELEASED:
IPA_USB_DBG(":%s released\n",
- ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
complete_all(&rm_ctx->prod_comp);
break;
}
@@ -824,13 +825,13 @@ static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
result = ipa_rm_create_resource(&rm_ctx->prod_params);
if (result) {
IPA_USB_ERR("Failed to create %s RM resource\n",
- ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
return result;
}
rm_ctx->prod_valid = true;
created = true;
IPA_USB_DBG("Created %s RM resource\n",
- ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
}
/* Create CONS */
@@ -852,12 +853,12 @@ static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
result = ipa_rm_create_resource(&rm_ctx->cons_params);
if (result) {
IPA_USB_ERR("Failed to create %s RM resource\n",
- ipa3_rm_resource_str(rm_ctx->cons_params.name));
+ ipa_rm_resource_str(rm_ctx->cons_params.name));
goto create_cons_rsc_fail;
}
rm_ctx->cons_valid = true;
IPA_USB_DBG("Created %s RM resource\n",
- ipa3_rm_resource_str(rm_ctx->cons_params.name));
+ ipa_rm_resource_str(rm_ctx->cons_params.name));
}
return 0;
@@ -1298,11 +1299,11 @@ static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
const char *rsrc_str;
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
- rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name);
+ rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
init_completion(&rm_ctx->prod_comp);
- result = ipa3_rm_request_resource(rm_ctx->prod_params.name);
+ result = ipa_rm_request_resource(rm_ctx->prod_params.name);
if (result) {
if (result != -EINPROGRESS) {
IPA_USB_ERR("failed to request %s: %d\n",
@@ -1328,7 +1329,7 @@ static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
const char *rsrc_str;
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
- rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name);
+ rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
@@ -1408,10 +1409,10 @@ static int ipa3_usb_connect_dpl(void)
* is sync in order to make sure the IPA clocks are up before we
* continue and notify the USB driver it may continue.
*/
- res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0) {
- IPA_USB_ERR("ipa3_rm_add_dependency_sync() failed.\n");
+ IPA_USB_ERR("ipa_rm_add_dependency_sync() failed.\n");
return res;
}
@@ -1420,11 +1421,11 @@ static int ipa3_usb_connect_dpl(void)
* status is connected (which can happen only later in the flow),
* the clocks are already up so the call doesn't need to block.
*/
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_DPL_CONS);
if (res < 0 && res != -EINPROGRESS) {
- IPA_USB_ERR("ipa3_rm_add_dependency() failed.\n");
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_USB_ERR("ipa_rm_add_dependency() failed.\n");
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
return res;
}
@@ -1590,12 +1591,12 @@ static int ipa3_usb_disconnect_dpl(void)
int res;
/* Remove DPL RM dependency */
- res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res)
IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
- res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_DPL_CONS);
if (res)
IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
@@ -1716,7 +1717,7 @@ static int ipa3_usb_xdci_connect_internal(
&profile);
if (result) {
IPA_USB_ERR("failed to set %s perf profile\n",
- ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
rm_ctx.prod_params.name));
return result;
}
@@ -1725,7 +1726,7 @@ static int ipa3_usb_xdci_connect_internal(
&profile);
if (result) {
IPA_USB_ERR("failed to set %s perf profile\n",
- ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
rm_ctx.cons_params.name));
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
new file mode 100644
index 000000000000..8149837b2de0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_COMMON_I_H_
+#define _IPA_COMMON_I_H_
+
+#define __FILENAME__ \
+ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = EP; \
+ log_info.id_string = ipa_clients_strings[client]
+
+#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SIMPLE; \
+ log_info.id_string = __func__
+
+#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = RESOURCE; \
+ log_info.id_string = resource_name
+
+#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SPECIAL; \
+ log_info.id_string = id_str
+
+#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+
+enum ipa_active_client_log_type {
+ EP,
+ SIMPLE,
+ RESOURCE,
+ SPECIAL,
+ INVALID
+};
+
+struct ipa_active_client_logging_info {
+ const char *id_string;
+ char *file;
+ int line;
+ enum ipa_active_client_log_type type;
+};
+
+extern const char *ipa_clients_strings[];
+
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+int ipa_inc_client_enable_clks_no_block(
+ struct ipa_active_client_logging_info *id);
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource);
+int ipa_resume_resource(enum ipa_rm_resource_name name);
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource);
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps);
+
+
+#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index ff93394a9363..53c72b154096 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -13,9 +13,9 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/ipa.h>
-#include "ipa_i.h"
#include "ipa_rm_dependency_graph.h"
#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_Q6_PROD),
@@ -61,7 +61,7 @@ struct ipa_rm_notify_ipa_work_type {
};
/**
- * ipa2_rm_create_resource() - create resource
+ * ipa_rm_create_resource() - create resource
* @create_params: [in] parameters needed
* for resource initialization
*
@@ -72,7 +72,7 @@ struct ipa_rm_notify_ipa_work_type {
* name.
*
*/
-int ipa2_rm_create_resource(struct ipa_rm_create_params *create_params)
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
{
struct ipa_rm_resource *resource;
unsigned long flags;
@@ -122,9 +122,10 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_create_resource);
/**
- * ipa2_rm_delete_resource() - delete resource
+ * ipa_rm_delete_resource() - delete resource
* @resource_name: name of resource to be deleted
*
* Returns: 0 on success, negative on failure
@@ -132,7 +133,7 @@ bail:
* This function is called by IPA RM client to delete client's resources.
*
*/
-int ipa2_rm_delete_resource(enum ipa_rm_resource_name resource_name)
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
{
struct ipa_rm_resource *resource;
unsigned long flags;
@@ -169,9 +170,10 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_delete_resource);
/**
- * ipa2_rm_add_dependency() - create dependency
+ * ipa_rm_add_dependency() - create dependency
* between 2 resources
* @resource_name: name of dependent resource
* @depends_on_name: name of its dependency
@@ -181,7 +183,7 @@ bail:
* Side effects: IPA_RM_RESORCE_GRANTED could be generated
* in case client registered with IPA RM
*/
-int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name)
{
unsigned long flags;
@@ -204,9 +206,10 @@ int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name,
return result;
}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
/**
- * ipa2_rm_add_dependency_sync() - Create a dependency between 2 resources
+ * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
* in a synchronized fashion. In case a producer resource is in GRANTED state
* and the newly added consumer resource is in RELEASED state, the consumer
* entity will be requested and the function will block until the consumer
@@ -218,7 +221,7 @@ int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name,
*
* Side effects: May block. See documentation above.
*/
-int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name)
{
int result;
@@ -265,9 +268,10 @@ int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
return result;
}
+EXPORT_SYMBOL(ipa_rm_add_dependency_sync);
/**
- * ipa2_rm_delete_dependency() - create dependency
+ * ipa_rm_delete_dependency() - create dependency
* between 2 resources
* @resource_name: name of dependent resource
* @depends_on_name: name of its dependency
@@ -277,7 +281,7 @@ int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
* Side effects: IPA_RM_RESORCE_GRANTED could be generated
* in case client registered with IPA RM
*/
-int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name)
{
unsigned long flags;
@@ -300,9 +304,10 @@ int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
return result;
}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
/**
- * ipa2_rm_request_resource() - request resource
+ * ipa_rm_request_resource() - request resource
* @resource_name: [in] name of the requested resource
*
* Returns: 0 on success, negative on failure
@@ -310,7 +315,7 @@ int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
* All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
* on successful completion of this operation.
*/
-int ipa2_rm_request_resource(enum ipa_rm_resource_name resource_name)
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
{
struct ipa_rm_resource *resource;
unsigned long flags;
@@ -341,6 +346,7 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_request_resource);
void delayed_release_work_func(struct work_struct *work)
{
@@ -402,7 +408,7 @@ int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name)
goto bail;
}
result = ipa_rm_resource_consumer_request(
- (struct ipa_rm_resource_cons *)resource, 0, false);
+ (struct ipa_rm_resource_cons *)resource, 0, false, true);
if (result != 0 && result != -EINPROGRESS) {
IPA_RM_ERR("consumer request returned error %d\n", result);
result = -EPERM;
@@ -426,8 +432,9 @@ bail:
return result;
}
+
/**
- * ipa2_rm_release_resource() - release resource
+ * ipa_rm_release_resource() - release resource
* @resource_name: [in] name of the requested resource
*
* Returns: 0 on success, negative on failure
@@ -435,7 +442,7 @@ bail:
* All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
* on successful completion of this operation.
*/
-int ipa2_rm_release_resource(enum ipa_rm_resource_name resource_name)
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
{
unsigned long flags;
struct ipa_rm_resource *resource;
@@ -466,18 +473,19 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_release_resource);
/**
- * ipa2_rm_register() - register for event
+ * ipa_rm_register() - register for event
* @resource_name: resource name
* @reg_params: [in] registration parameters
*
* Returns: 0 on success, negative on failure
*
* Registration parameters provided here should be the same
- * as provided later in ipa2_rm_deregister() call.
+ * as provided later in ipa_rm_deregister() call.
*/
-int ipa2_rm_register(enum ipa_rm_resource_name resource_name,
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
struct ipa_rm_register_params *reg_params)
{
int result;
@@ -508,18 +516,19 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_register);
/**
- * ipa2_rm_deregister() - cancel the registration
+ * ipa_rm_deregister() - cancel the registration
* @resource_name: resource name
* @reg_params: [in] registration parameters
*
* Returns: 0 on success, negative on failure
*
* Registration parameters provided here should be the same
- * as provided in ipa2_rm_register() call.
+ * as provided in ipa_rm_register() call.
*/
-int ipa2_rm_deregister(enum ipa_rm_resource_name resource_name,
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
struct ipa_rm_register_params *reg_params)
{
int result;
@@ -549,9 +558,10 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_deregister);
/**
- * ipa2_rm_set_perf_profile() - set performance profile
+ * ipa_rm_set_perf_profile() - set performance profile
* @resource_name: resource name
* @profile: [in] profile information.
*
@@ -560,7 +570,7 @@ bail:
* Set resource performance profile.
* Updates IPA driver if performance level changed.
*/
-int ipa2_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
struct ipa_rm_perf_profile *profile)
{
int result;
@@ -573,6 +583,8 @@ int ipa2_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
}
IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+ if (profile)
+ IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
@@ -596,9 +608,10 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_set_perf_profile);
/**
- * ipa2_rm_notify_completion() -
+ * ipa_rm_notify_completion() -
* consumer driver notification for
* request_resource / release_resource operations
* completion
@@ -607,7 +620,7 @@ bail:
*
* Returns: 0 on success, negative on failure
*/
-int ipa2_rm_notify_completion(enum ipa_rm_event event,
+int ipa_rm_notify_completion(enum ipa_rm_event event,
enum ipa_rm_resource_name resource_name)
{
int result;
@@ -634,6 +647,7 @@ bail:
return result;
}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
static void ipa_rm_wq_handler(struct work_struct *work)
{
@@ -707,7 +721,7 @@ static void ipa_rm_wq_resume_handler(struct work_struct *work)
IPA_RM_ERR("resource is not CONS\n");
return;
}
- IPA2_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str(
+ IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str(
ipa_rm_work->resource_name));
spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
@@ -715,7 +729,7 @@ static void ipa_rm_wq_resume_handler(struct work_struct *work)
&resource) != 0){
IPA_RM_ERR("resource does not exists\n");
spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
- IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(
ipa_rm_work->resource_name));
goto bail;
}
@@ -766,7 +780,7 @@ static void ipa_rm_wq_suspend_handler(struct work_struct *work)
* @wq_cmd: command that should be executed
* @resource_name: resource on which command should be executed
* @notify_registered_only: notify only clients registered by
- * ipa2_rm_register()
+ * ipa_rm_register()
*
* Returns: 0 on success, negative otherwise
*/
@@ -1000,7 +1014,7 @@ void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[
resource_name - IPA_RM_RESOURCE_PROD_MAX];
} else {
- IPAERR("Invalid resource_name\n");
+ IPA_RM_ERR("Invalid resource_name\n");
return;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
index fd437b0c8775..fd437b0c8775 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
index b76c6636f873..b76c6636f873 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
index b286c198160c..65dbff66a6dd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,8 @@
#define IPA_RM_DRV_NAME "ipa_rm"
+#define IPA_RM_DBG_LOW(fmt, args...) \
+ pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPA_RM_DBG(fmt, args...) \
pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPA_RM_ERR(fmt, args...) \
@@ -71,7 +73,7 @@ enum ipa_rm_wq_cmd {
* @dep_graph: data structure to search for resource if exists
* @event: event to notify
* @notify_registered_only: notify only clients registered by
- * ipa2_rm_register()
+ * ipa_rm_register()
*/
struct ipa_rm_wq_work_type {
struct work_struct work;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
index ee14e722b885..2f2cef05441d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,7 @@
#include <linux/unistd.h>
#include <linux/workqueue.h>
#include <linux/ipa.h>
-#include "ipa_i.h"
+#include "ipa_rm_i.h"
/**
* struct ipa_rm_it_private - IPA RM Inactivity Timer private
@@ -53,9 +53,9 @@ static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
* ipa_rm_inactivity_timer_func() - called when timer expired in
* the context of the shared workqueue. Checks internally if
* reschedule_work flag is set. In case it is not set this function calls to
- * ipa2_rm_release_resource(). In case reschedule_work is set this function
+ * ipa_rm_release_resource(). In case reschedule_work is set this function
* reschedule the work. This flag is cleared cleared when
- * calling to ipa2_rm_inactivity_timer_release_resource().
+ * calling to ipa_rm_inactivity_timer_release_resource().
*
* @work: work object provided by the work queue
*
@@ -70,24 +70,24 @@ static void ipa_rm_inactivity_timer_func(struct work_struct *work)
work);
unsigned long flags;
- IPADBG("%s: timer expired for resource %d!\n", __func__,
+ IPA_RM_DBG_LOW("%s: timer expired for resource %d!\n", __func__,
me->resource_name);
spin_lock_irqsave(
&ipa_rm_it_handles[me->resource_name].lock, flags);
if (ipa_rm_it_handles[me->resource_name].reschedule_work) {
- IPADBG("%s: setting delayed work\n", __func__);
+ IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
ipa_rm_it_handles[me->resource_name].reschedule_work = false;
schedule_delayed_work(
&ipa_rm_it_handles[me->resource_name].work,
ipa_rm_it_handles[me->resource_name].jiffies);
} else if (ipa_rm_it_handles[me->resource_name].resource_requested) {
- IPADBG("%s: not calling release\n", __func__);
+ IPA_RM_DBG_LOW("%s: not calling release\n", __func__);
ipa_rm_it_handles[me->resource_name].work_in_progress = false;
} else {
- IPADBG("%s: calling release_resource on resource %d!\n",
+ IPA_RM_DBG_LOW("%s: calling release_resource on resource %d!\n",
__func__, me->resource_name);
- ipa2_rm_release_resource(me->resource_name);
+ ipa_rm_release_resource(me->resource_name);
ipa_rm_it_handles[me->resource_name].work_in_progress = false;
}
spin_unlock_irqrestore(
@@ -95,31 +95,31 @@ static void ipa_rm_inactivity_timer_func(struct work_struct *work)
}
/**
-* ipa2_rm_inactivity_timer_init() - Init function for IPA RM
+* ipa_rm_inactivity_timer_init() - Init function for IPA RM
* inactivity timer. This function shall be called prior calling
* any other API of IPA RM inactivity timer.
*
* @resource_name: Resource name. @see ipa_rm.h
* @msecs: time in miliseccond, that IPA RM inactivity timer
-* shall wait prior calling to ipa2_rm_release_resource().
+* shall wait prior calling to ipa_rm_release_resource().
*
* Return codes:
* 0: success
* -EINVAL: invalid parameters
*/
-int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
unsigned long msecs)
{
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
return -EINVAL;
}
if (ipa_rm_it_handles[resource_name].initied) {
- IPAERR("%s: resource %d already inited\n",
+ IPA_RM_ERR("%s: resource %d already inited\n",
__func__, resource_name);
return -EINVAL;
}
@@ -137,9 +137,10 @@ int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
return 0;
}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
/**
-* ipa2_rm_inactivity_timer_destroy() - De-Init function for IPA
+* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
* RM inactivity timer.
*
* @resource_name: Resource name. @see ipa_rm.h
@@ -148,18 +149,18 @@ int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
* 0: success
* -EINVAL: invalid parameters
*/
-int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
{
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
- IPAERR("%s: resource %d already inited\n",
+ IPA_RM_ERR("%s: resource %d already inited\n",
__func__, resource_name);
return -EINVAL;
}
@@ -171,13 +172,13 @@ int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
return 0;
}
-
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
/**
-* ipa2_rm_inactivity_timer_request_resource() - Same as
-* ipa2_rm_request_resource(), with a difference that calling to
+* ipa_rm_inactivity_timer_request_resource() - Same as
+* ipa_rm_request_resource(), with a difference that calling to
* this function will also cancel the inactivity timer, if
-* ipa2_rm_inactivity_timer_release_resource() was called earlier.
+* ipa_rm_inactivity_timer_release_resource() was called earlier.
*
* @resource_name: Resource name. @see ipa_rm.h
*
@@ -185,40 +186,42 @@ int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
* 0: success
* -EINVAL: invalid parameters
*/
-int ipa2_rm_inactivity_timer_request_resource(
+int ipa_rm_inactivity_timer_request_resource(
enum ipa_rm_resource_name resource_name)
{
int ret;
unsigned long flags;
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
- IPAERR("%s: Not initialized\n", __func__);
+ IPA_RM_ERR("%s: Not initialized\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
ipa_rm_it_handles[resource_name].resource_requested = true;
spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
- ret = ipa2_rm_request_resource(resource_name);
- IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret);
+ ret = ipa_rm_request_resource(resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d: returning %d\n", __func__,
+ resource_name, ret);
return ret;
}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
/**
-* ipa2_rm_inactivity_timer_release_resource() - Sets the
+* ipa_rm_inactivity_timer_release_resource() - Sets the
* inactivity timer to the timeout set by
-* ipa2_rm_inactivity_timer_init(). When the timeout expires, IPA
-* RM inactivity timer will call to ipa2_rm_release_resource().
-* If a call to ipa2_rm_inactivity_timer_request_resource() was
+* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+* RM inactivity timer will call to ipa_rm_release_resource().
+* If a call to ipa_rm_inactivity_timer_request_resource() was
* made BEFORE the timout has expired, rge timer will be
* cancelled.
*
@@ -228,28 +231,28 @@ int ipa2_rm_inactivity_timer_request_resource(
* 0: success
* -EINVAL: invalid parameters
*/
-int ipa2_rm_inactivity_timer_release_resource(
+int ipa_rm_inactivity_timer_release_resource(
enum ipa_rm_resource_name resource_name)
{
unsigned long flags;
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
- IPAERR("%s: Not initialized\n", __func__);
+ IPA_RM_ERR("%s: Not initialized\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
ipa_rm_it_handles[resource_name].resource_requested = false;
if (ipa_rm_it_handles[resource_name].work_in_progress) {
- IPADBG("%s: Timer already set, not scheduling again %d\n",
+ IPA_RM_DBG_LOW("%s: Timer already set, no sched again %d\n",
__func__, resource_name);
ipa_rm_it_handles[resource_name].reschedule_work = true;
spin_unlock_irqrestore(
@@ -258,11 +261,12 @@ int ipa2_rm_inactivity_timer_release_resource(
}
ipa_rm_it_handles[resource_name].work_in_progress = true;
ipa_rm_it_handles[resource_name].reschedule_work = false;
- IPADBG("%s: setting delayed work\n", __func__);
+ IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
schedule_delayed_work(&ipa_rm_it_handles[resource_name].work,
ipa_rm_it_handles[resource_name].jiffies);
spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
return 0;
}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
index 6f6f2a64b1fc..51ad9530f6ee 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,6 @@
*/
#include <linux/slab.h>
-#include "ipa_i.h"
#include "ipa_rm_i.h"
/**
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
index b41de0aa3167..b41de0aa3167 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index c22bd3b670bd..75424eb768f4 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,10 +11,9 @@
*/
#include <linux/slab.h>
-#include "ipa_i.h"
#include "ipa_rm_resource.h"
#include "ipa_rm_i.h"
-
+#include "ipa_common_i.h"
/**
* ipa_rm_dep_prod_index() - producer name to producer index mapping
* @resource_name: [in] resource name (should be of producer)
@@ -31,6 +30,7 @@ int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
switch (resource_name) {
case IPA_RM_RESOURCE_Q6_PROD:
case IPA_RM_RESOURCE_USB_PROD:
+ case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
case IPA_RM_RESOURCE_HSIC_PROD:
case IPA_RM_RESOURCE_STD_ECM_PROD:
case IPA_RM_RESOURCE_RNDIS_PROD:
@@ -68,6 +68,7 @@ int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
case IPA_RM_RESOURCE_APPS_CONS:
case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
case IPA_RM_RESOURCE_MHI_CONS:
+ case IPA_RM_RESOURCE_USB_DPL_CONS:
break;
default:
result = IPA_RM_INDEX_INVALID;
@@ -143,11 +144,12 @@ int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
int ipa_rm_resource_consumer_request(
struct ipa_rm_resource_cons *consumer,
u32 prod_needed_bw,
- bool inc_usage_count)
+ bool inc_usage_count,
+ bool wake_client)
{
int result = 0;
enum ipa_rm_resource_state prev_state;
- struct ipa2_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
IPA_RM_DBG("%s state: %d\n",
ipa_rm_resource_str(consumer->resource.name),
@@ -160,10 +162,10 @@ int ipa_rm_resource_consumer_request(
case IPA_RM_RELEASE_IN_PROGRESS:
reinit_completion(&consumer->request_consumer_in_progress);
consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
- IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa_rm_resource_str(consumer->resource.name));
if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
- ipa2_inc_client_enable_clks_no_block(&log_info) != 0) {
+ ipa_inc_client_enable_clks_no_block(&log_info) != 0) {
IPA_RM_DBG("async resume work for %s\n",
ipa_rm_resource_str(consumer->resource.name));
ipa_rm_wq_send_resume_cmd(consumer->resource.name,
@@ -178,6 +180,11 @@ int ipa_rm_resource_consumer_request(
false);
break;
case IPA_RM_GRANTED:
+ if (wake_client) {
+ result = ipa_rm_resource_consumer_request_work(
+ consumer, prev_state, prod_needed_bw, false);
+ break;
+ }
ipa_rm_perf_profile_change(consumer->resource.name);
break;
case IPA_RM_REQUEST_IN_PROGRESS:
@@ -262,7 +269,7 @@ bail:
* @producer: producer
* @event: event to notify
* @notify_registered_only: notify only clients registered by
- * ipa2_rm_register()
+ * ipa_rm_register()
*/
void ipa_rm_resource_producer_notify_clients(
struct ipa_rm_resource_prod *producer,
@@ -492,7 +499,7 @@ int ipa_rm_resource_delete(struct ipa_rm_resource *resource)
* ipa_rm_resource_register() - register resource
* @resource: [in] resource
* @reg_params: [in] registration parameters
- * @explicit: [in] registered explicitly by ipa2_rm_register()
+ * @explicit: [in] registered explicitly by ipa_rm_register()
*
* Returns: 0 on success, negative on failure
*
@@ -630,7 +637,7 @@ int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
consumer_result = ipa_rm_resource_consumer_request(
(struct ipa_rm_resource_cons *)depends_on,
resource->max_bw,
- true);
+ true, false);
if (consumer_result != -EINPROGRESS) {
resource->state = prev_state;
((struct ipa_rm_resource_prod *)
@@ -792,7 +799,7 @@ int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
consumer_result = ipa_rm_resource_consumer_request(
(struct ipa_rm_resource_cons *)consumer,
producer->resource.max_bw,
- true);
+ true, false);
if (consumer_result == -EINPROGRESS) {
result = -EINPROGRESS;
} else {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
index 5b07cf9c837d..26573e243b4e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,7 +39,7 @@ enum ipa_rm_resource_type {
* struct ipa_rm_notification_info - notification information
* of IPA RM client
* @reg_params: registration parameters
- * @explicit: registered explicitly by ipa2_rm_register()
+ * @explicit: registered explicitly by ipa_rm_register()
* @link: link to the list of all registered clients information
*/
struct ipa_rm_notification_info {
@@ -127,7 +127,8 @@ int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer,
u32 needed_bw,
- bool inc_usage_count);
+ bool inc_usage_count,
+ bool wake_client);
int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer,
u32 needed_bw,
diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile
index 1bb9c91d3bd4..435acbf1cab8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v2/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
- ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o ipa_rm.o \
- ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \
+ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 8c825d1f4749..ac6d729db595 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -35,7 +35,7 @@
#include <linux/hashtable.h>
#include <linux/hash.h>
#include "ipa_i.h"
-#include "ipa_rm_i.h"
+#include "../ipa_rm_i.h"
#define CREATE_TRACE_POINTS
#include "ipa_trace.h"
@@ -206,78 +206,6 @@ static bool smmu_disable_htw;
static char *active_clients_table_buf;
-const char *ipa2_clients_strings[IPA_CLIENT_MAX] = {
- __stringify(IPA_CLIENT_HSIC1_PROD),
- __stringify(IPA_CLIENT_WLAN1_PROD),
- __stringify(IPA_CLIENT_USB2_PROD),
- __stringify(IPA_CLIENT_HSIC3_PROD),
- __stringify(IPA_CLIENT_HSIC2_PROD),
- __stringify(IPA_CLIENT_USB3_PROD),
- __stringify(IPA_CLIENT_HSIC4_PROD),
- __stringify(IPA_CLIENT_USB4_PROD),
- __stringify(IPA_CLIENT_HSIC5_PROD),
- __stringify(IPA_CLIENT_USB_PROD),
- __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
- __stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
- __stringify(IPA_CLIENT_A2_TETHERED_PROD),
- __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
- __stringify(IPA_CLIENT_APPS_CMD_PROD),
- __stringify(IPA_CLIENT_ODU_PROD),
- __stringify(IPA_CLIENT_MHI_PROD),
- __stringify(IPA_CLIENT_Q6_LAN_PROD),
- __stringify(IPA_CLIENT_Q6_WAN_PROD),
- __stringify(IPA_CLIENT_Q6_CMD_PROD),
- __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
- __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
- __stringify(IPA_CLIENT_Q6_DECOMP_PROD),
- __stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
- __stringify(IPA_CLIENT_UC_USB_PROD),
-
- /* Below PROD client type is only for test purpose */
- __stringify(IPA_CLIENT_TEST_PROD),
- __stringify(IPA_CLIENT_TEST1_PROD),
- __stringify(IPA_CLIENT_TEST2_PROD),
- __stringify(IPA_CLIENT_TEST3_PROD),
- __stringify(IPA_CLIENT_TEST4_PROD),
-
- __stringify(IPA_CLIENT_HSIC1_CONS),
- __stringify(IPA_CLIENT_WLAN1_CONS),
- __stringify(IPA_CLIENT_HSIC2_CONS),
- __stringify(IPA_CLIENT_USB2_CONS),
- __stringify(IPA_CLIENT_WLAN2_CONS),
- __stringify(IPA_CLIENT_HSIC3_CONS),
- __stringify(IPA_CLIENT_USB3_CONS),
- __stringify(IPA_CLIENT_WLAN3_CONS),
- __stringify(IPA_CLIENT_HSIC4_CONS),
- __stringify(IPA_CLIENT_USB4_CONS),
- __stringify(IPA_CLIENT_WLAN4_CONS),
- __stringify(IPA_CLIENT_HSIC5_CONS),
- __stringify(IPA_CLIENT_USB_CONS),
- __stringify(IPA_CLIENT_USB_DPL_CONS),
- __stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
- __stringify(IPA_CLIENT_A2_TETHERED_CONS),
- __stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
- __stringify(IPA_CLIENT_APPS_LAN_CONS),
- __stringify(IPA_CLIENT_APPS_WAN_CONS),
- __stringify(IPA_CLIENT_ODU_EMB_CONS),
- __stringify(IPA_CLIENT_ODU_TETH_CONS),
- __stringify(IPA_CLIENT_MHI_CONS),
- __stringify(IPA_CLIENT_Q6_LAN_CONS),
- __stringify(IPA_CLIENT_Q6_WAN_CONS),
- __stringify(IPA_CLIENT_Q6_DUN_CONS),
- __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
- __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
- __stringify(IPA_CLIENT_Q6_DECOMP_CONS),
- __stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
- __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
- /* Below CONS client type is only for test purpose */
- __stringify(IPA_CLIENT_TEST_CONS),
- __stringify(IPA_CLIENT_TEST1_CONS),
- __stringify(IPA_CLIENT_TEST2_CONS),
- __stringify(IPA_CLIENT_TEST3_CONS),
- __stringify(IPA_CLIENT_TEST4_CONS),
-};
-
int ipa2_active_clients_log_print_buffer(char *buf, int size)
{
int i;
@@ -626,7 +554,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
return -ENOTTY;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
switch (cmd) {
case IPA_IOC_ALLOC_NAT_MEM:
@@ -1163,7 +1091,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- retval = ipa2_rm_add_dependency(rm_depend.resource_name,
+ retval = ipa_rm_add_dependency(rm_depend.resource_name,
rm_depend.depends_on_name);
break;
case IPA_IOC_RM_DEL_DEPENDENCY:
@@ -1172,7 +1100,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- retval = ipa2_rm_delete_dependency(rm_depend.resource_name,
+ retval = ipa_rm_delete_dependency(rm_depend.resource_name,
rm_depend.depends_on_name);
break;
case IPA_IOC_GENERATE_FLT_EQ:
@@ -1329,12 +1257,12 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
default: /* redundant, as cmd was checked against MAXNR */
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
kfree(param);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return retval;
}
@@ -1521,7 +1449,7 @@ int ipa_init_q6_smem(void)
{
int rc;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0)
rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) -
@@ -1533,7 +1461,7 @@ int ipa_init_q6_smem(void)
if (rc) {
IPAERR("failed to initialize Modem RAM memory\n");
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1541,7 +1469,7 @@ int ipa_init_q6_smem(void)
IPA_MEM_PART(modem_hdr_ofst));
if (rc) {
IPAERR("failed to initialize Modem HDRs RAM memory\n");
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1549,7 +1477,7 @@ int ipa_init_q6_smem(void)
IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
if (rc) {
IPAERR("failed to initialize Modem proc ctx RAM memory\n");
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1557,11 +1485,11 @@ int ipa_init_q6_smem(void)
IPA_MEM_PART(modem_comp_decomp_ofst));
if (rc) {
IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1609,7 +1537,7 @@ int ipa_q6_monitor_holb_mitigation(bool enable)
int ep_idx;
int client_idx;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) {
ep_idx = ipa2_get_ep_mapping(client_idx);
@@ -1621,7 +1549,7 @@ int ipa_q6_monitor_holb_mitigation(bool enable)
ipa_uc_monitor_holb(client_idx, enable);
}
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -1957,7 +1885,7 @@ int ipa_q6_pre_shutdown_cleanup(void)
if (ipa_ctx->uc_ctx.uc_zip_error)
BUG();
- IPA2_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
/*
* pipe delay and holb discard for ZIP pipes are handled
* in post shutdown callback.
@@ -3010,7 +2938,7 @@ static void ipa_start_tag_process(struct work_struct *work)
if (res)
IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
- IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
IPADBG("TAG process done\n");
}
@@ -3038,7 +2966,7 @@ static void ipa_start_tag_process(struct work_struct *work)
* - Remove and deallocate unneeded data structure
* - Log the call in the circular history buffer (unless it is a simple call)
*/
-void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id,
+void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id,
bool inc, bool int_ctx)
{
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
@@ -3094,13 +3022,13 @@ void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id,
}
}
-void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx)
{
ipa2_active_clients_log_mod(id, false, int_ctx);
}
-void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
bool int_ctx)
{
ipa2_active_clients_log_mod(id, true, int_ctx);
@@ -3116,7 +3044,7 @@ void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
* Return codes:
* None
*/
-void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id)
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
ipa_active_clients_lock();
ipa2_active_clients_log_inc(id, false);
@@ -3138,7 +3066,7 @@ void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id)
* Return codes: 0 for success
* -EPERM if an asynchronous action should have been done
*/
-int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id)
{
int res = 0;
@@ -3176,9 +3104,9 @@ bail:
* Return codes:
* None
*/
-void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
{
- struct ipa2_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
ipa_active_clients_lock();
ipa2_active_clients_log_dec(id, false);
@@ -3186,7 +3114,7 @@ void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
if (ipa_ctx->ipa_active_clients.cnt == 0) {
if (ipa_ctx->tag_process_before_gating) {
- IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
"TAG_PROCESS");
ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->tag_process_before_gating = false;
@@ -3282,7 +3210,7 @@ fail:
return retval;
}
-int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
u32 bandwidth_mbps)
{
enum ipa_voltage_level needed_voltage;
@@ -3474,7 +3402,7 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt,
if (!atomic_read(
&ipa_ctx->sps_pm.dec_clients)
) {
- IPA2_ACTIVE_CLIENTS_INC_EP(
+ IPA_ACTIVE_CLIENTS_INC_EP(
ipa_ctx->ep[i].client);
IPADBG("Pipes un-suspended.\n");
IPADBG("Enter poll mode.\n");
@@ -3551,7 +3479,7 @@ static void ipa_sps_release_resource(struct work_struct *work)
ipa_sps_process_irq_schedule_rel();
} else {
atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
- IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
}
}
atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
@@ -3568,14 +3496,14 @@ int ipa_create_apps_resource(void)
apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
apps_cons_create_params.request_resource = apps_cons_request_resource;
apps_cons_create_params.release_resource = apps_cons_release_resource;
- result = ipa2_rm_create_resource(&apps_cons_create_params);
+ result = ipa_rm_create_resource(&apps_cons_create_params);
if (result) {
- IPAERR("ipa2_rm_create_resource failed\n");
+ IPAERR("ipa_rm_create_resource failed\n");
return result;
}
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
- ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+ ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
return result;
}
@@ -3622,7 +3550,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
struct sps_bam_props bam_props = { 0 };
struct ipa_flt_tbl *flt_tbl;
struct ipa_rt_tbl_set *rset;
- struct ipa2_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -3746,7 +3674,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
mutex_init(&ipa_ctx->ipa_active_clients.mutex);
spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
- IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->ipa_active_clients.cnt = 1;
@@ -4083,7 +4011,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
fail_add_interrupt_handler:
free_irq(resource_p->ipa_irq, master_dev);
fail_ipa_interrupts_init:
- ipa2_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
ipa_rm_exit();
fail_ipa_rm_init:
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index b3f50dd52528..f1742e05c598 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -289,7 +289,7 @@ int ipa2_connect(const struct ipa_connect_params *in,
}
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
- IPA2_ACTIVE_CLIENTS_INC_EP(in->client);
+ IPA_ACTIVE_CLIENTS_INC_EP(in->client);
ep->skip_ep_cfg = in->skip_ep_cfg;
@@ -432,7 +432,7 @@ int ipa2_connect(const struct ipa_connect_params *in,
ipa_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
@@ -486,7 +486,7 @@ desc_mem_alloc_fail:
sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
fail:
return result;
}
@@ -556,7 +556,7 @@ int ipa2_disconnect(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
client_type = ipa2_get_client_mapping(clnt_hdl);
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(client_type);
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
/* Set Disconnect in Progress flag. */
spin_lock(&ipa_ctx->disconnect_lock);
@@ -663,7 +663,7 @@ int ipa2_disconnect(u32 clnt_hdl)
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
spin_unlock(&ipa_ctx->disconnect_lock);
- IPA2_ACTIVE_CLIENTS_DEC_EP(client_type);
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -694,7 +694,7 @@ int ipa2_reset_endpoint(u32 clnt_hdl)
}
ep = &ipa_ctx->ep[clnt_hdl];
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
res = sps_disconnect(ep->ep_hdl);
if (res) {
IPAERR("sps_disconnect() failed, res=%d.\n", res);
@@ -709,7 +709,7 @@ int ipa2_reset_endpoint(u32 clnt_hdl)
}
bail:
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return res;
}
@@ -761,7 +761,7 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl)
ep->qmi_request_sent = true;
}
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
/* Set disconnect in progress flag so further flow control events are
* not honored.
*/
@@ -774,7 +774,7 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl)
ep_ctrl.ipa_ep_suspend = false;
ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index daf6091aad67..566cb4d03c51 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include "ipa_i.h"
-#include "ipa_rm_i.h"
+#include "../ipa_rm_i.h"
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_CNTR_ON 127265
@@ -158,9 +158,9 @@ static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
{
int nbytes;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -328,7 +328,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
end_idx = start_idx + 1;
}
pos = *ppos;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = start_idx; i < end_idx; i++) {
nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff,
@@ -338,7 +338,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
nbytes);
if (ret < 0) {
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return ret;
}
@@ -346,7 +346,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
ubuf += nbytes;
count -= nbytes;
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
*ppos = pos + size;
return size;
@@ -370,9 +370,9 @@ static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf,
return -EFAULT;
if (option == 1)
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
else if (option == 0)
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
else
return -EFAULT;
@@ -1230,9 +1230,9 @@ static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf,
if (kstrtou32(dbg_buff, 0, &option))
return -EFAULT;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_ctx->ctrl->ipa_write_dbg_cnt(option);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return count;
}
@@ -1264,9 +1264,9 @@ static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf,
{
int nbytes;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
index 90d3bb4c5e95..e08f281b1864 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
@@ -274,7 +274,7 @@ int ipa2_dma_enable(void)
mutex_unlock(&ipa_dma_ctx->enable_lock);
return -EPERM;
}
- IPA2_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
ipa_dma_ctx->is_enabled = true;
mutex_unlock(&ipa_dma_ctx->enable_lock);
@@ -337,7 +337,7 @@ int ipa2_dma_disable(void)
}
ipa_dma_ctx->is_enabled = false;
spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
- IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
mutex_unlock(&ipa_dma_ctx->enable_lock);
IPADMA_FUNC_EXIT();
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 5929bfc8f96e..0a3f6795e92d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -254,7 +254,7 @@ static void ipa_handle_tx(struct ipa_sys_context *sys)
int inactive_cycles = 0;
int cnt;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
do {
cnt = ipa_handle_tx_core(sys, true, true);
if (cnt == 0) {
@@ -267,7 +267,7 @@ static void ipa_handle_tx(struct ipa_sys_context *sys)
} while (inactive_cycles <= POLLING_INACTIVITY_TX);
ipa_tx_switch_to_intr_mode(sys);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static void ipa_wq_handle_tx(struct work_struct *work)
@@ -653,7 +653,7 @@ int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
}
sys = ipa_ctx->ep[ep_idx].sys;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (num_desc == 1) {
init_completion(&descr->xfer_done);
@@ -687,7 +687,7 @@ int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
}
bail:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
@@ -1002,7 +1002,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
int inactive_cycles = 0;
int cnt;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
do {
cnt = ipa_handle_rx_core(sys, true, true);
if (cnt == 0) {
@@ -1026,7 +1026,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
trace_poll_to_intr(sys->ep->client);
ipa_rx_switch_to_intr_mode(sys);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static void switch_to_intr_rx_work_func(struct work_struct *work)
@@ -1118,7 +1118,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep = &ipa_ctx->ep[ipa_ep_idx];
- IPA2_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
if (ep->valid == 1) {
if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
@@ -1143,7 +1143,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->priv = sys_in->priv;
*clnt_hdl = ipa_ep_idx;
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
return 0;
}
@@ -1357,7 +1357,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
ipa_ep_idx, ep->sys);
@@ -1380,7 +1380,7 @@ fail_wq:
kfree(ep->sys);
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail_and_disable_clocks:
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
fail_gen:
return result;
}
@@ -1410,7 +1410,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_disable_data_path(clnt_hdl);
ep->valid = 0;
@@ -1456,7 +1456,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
ipa_cleanup_wlan_rx_common_cache();
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -2065,9 +2065,9 @@ static void replenish_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
sys->repl_hdlr(sys);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
/**
@@ -2115,7 +2115,6 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
int pad_len_byte;
int len;
unsigned char *buf;
- bool drop_packet;
int src_pipe;
unsigned int used = *(unsigned int *)skb->cb;
unsigned int used_align = ALIGN(used, 32);
@@ -2135,6 +2134,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
memcpy(buf, sys->prev_skb->data, sys->len_partial);
sys->len_partial = 0;
sys->free_skb(sys->prev_skb);
+ sys->prev_skb = NULL;
goto begin;
}
@@ -2154,9 +2154,13 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
skb2->len - sys->len_pad);
skb2->truesize = skb2->len +
sizeof(struct sk_buff);
- sys->ep->client_notify(sys->ep->priv,
- IPA_RECEIVE,
- (unsigned long)(skb2));
+ if (sys->drop_packet)
+ dev_kfree_skb_any(skb2);
+ else
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
} else {
IPAERR("copy expand failed\n");
}
@@ -2187,7 +2191,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
begin:
while (skb->len) {
- drop_packet = false;
+ sys->drop_packet = false;
IPADBG("LEN_REM %d\n", skb->len);
if (skb->len < IPA_PKT_STATUS_SIZE) {
@@ -2226,9 +2230,11 @@ begin:
IPA_STATS_EXCP_CNT(status->exception,
ipa_ctx->stats.rx_excp_pkts);
if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
- status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
- status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+ status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
IPAERR("status fields invalid\n");
+ IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status->status_opcode, status->endp_src_idx,
+ status->endp_dest_idx, status->pkt_len);
WARN_ON(1);
BUG();
}
@@ -2270,7 +2276,7 @@ begin:
* there was no route match.
*/
if (!status->exception && !status->route_match)
- drop_packet = true;
+ sys->drop_packet = true;
if (skb->len == IPA_PKT_STATUS_SIZE &&
!status->exception) {
@@ -2292,8 +2298,7 @@ begin:
if (status->exception ==
IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
IPADBG("Dropping packet on DeAggr Exception\n");
- skb_pull(skb, len + IPA_PKT_STATUS_SIZE);
- continue;
+ sys->drop_packet = true;
}
skb2 = skb_clone(skb, GFP_KERNEL);
@@ -2311,9 +2316,20 @@ begin:
IPA_PKT_STATUS_SIZE);
IPADBG("rx avail for %d\n",
status->endp_dest_idx);
- if (drop_packet)
+ if (sys->drop_packet) {
dev_kfree_skb_any(skb2);
- else {
+ } else if (status->pkt_len >
+ IPA_GENERIC_AGGR_BYTE_LIMIT *
+ 1024) {
+ IPAERR("packet size invalid\n");
+ IPAERR("STATUS opcode=%d\n",
+ status->status_opcode);
+ IPAERR("src=%d dst=%d len=%d\n",
+ status->endp_src_idx,
+ status->endp_dest_idx,
+ status->pkt_len);
+ BUG();
+ } else {
skb2->truesize = skb2->len +
sizeof(struct sk_buff) +
(ALIGN(len +
@@ -3295,7 +3311,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
ep = &ipa_ctx->ep[ipa_ep_idx];
- IPA2_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
if (ep->valid == 1) {
if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
@@ -3322,7 +3338,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
ep->priv = sys_in->priv;
*clnt_hdl = ipa_ep_idx;
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
return 0;
}
@@ -3363,7 +3379,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
*ipa_bam_hdl = ipa_ctx->bam_handle;
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
@@ -3373,7 +3389,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
fail_gen2:
fail_and_disable_clocks:
- IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
fail_gen:
return result;
}
@@ -3391,12 +3407,12 @@ int ipa2_sys_teardown(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_disable_data_path(clnt_hdl);
ep->valid = 0;
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 3f538a3ed8cf..50e30291bb0f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -31,6 +31,7 @@
#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
+#include "../ipa_common_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -150,117 +151,16 @@
#define IPA_SMMU_UC_VA_SIZE 0x20000000
#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START + IPA_SMMU_UC_VA_SIZE)
-#define __FILENAME__ \
- (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
-
-
-#define IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = EP; \
- log_info.id_string = ipa2_clients_strings[client]
-
-#define IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = SIMPLE; \
- log_info.id_string = __func__
-
-#define IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = RESOURCE; \
- log_info.id_string = resource_name
-
-#define IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = SPECIAL; \
- log_info.id_string = id_str
-
-#define IPA2_ACTIVE_CLIENTS_INC_EP(client) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
- ipa2_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_DEC_EP(client) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
- ipa2_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_INC_SIMPLE() \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
- ipa2_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_DEC_SIMPLE() \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
- ipa2_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
- ipa2_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
- ipa2_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
- ipa2_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA2_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
- do { \
- struct ipa2_active_client_logging_info log_info; \
- IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
- ipa2_dec_client_disable_clks(&log_info); \
- } while (0)
-
#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
-extern const char *ipa2_clients_strings[];
-
-enum ipa2_active_client_log_type {
- EP,
- SIMPLE,
- RESOURCE,
- SPECIAL,
- INVALID
-};
-
-struct ipa2_active_client_logging_info {
- const char *id_string;
- char *file;
- int line;
- enum ipa2_active_client_log_type type;
-};
-
struct ipa2_active_client_htable_entry {
struct hlist_node list;
char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
int count;
- enum ipa2_active_client_log_type type;
+ enum ipa_active_client_log_type type;
};
struct ipa2_active_clients_log_ctx {
@@ -719,6 +619,7 @@ struct ipa_sys_context {
unsigned int len_rem;
unsigned int len_pad;
unsigned int len_partial;
+ bool drop_packet;
struct work_struct work;
void (*sps_callback)(struct sps_event_notify *notify);
enum sps_option sps_option;
@@ -1780,46 +1681,6 @@ int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
int ipa2_uc_dereg_rdyCB(void);
/*
- * Resource manager
- */
-int ipa2_rm_create_resource(struct ipa_rm_create_params *create_params);
-
-int ipa2_rm_delete_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_register(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
-int ipa2_rm_deregister(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
-int ipa2_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_perf_profile *profile);
-
-int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-int ipa2_rm_request_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_release_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_notify_completion(enum ipa_rm_event event,
- enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
- unsigned long msecs);
-
-int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_inactivity_timer_request_resource(
- enum ipa_rm_resource_name resource_name);
-
-int ipa2_rm_inactivity_timer_release_resource(
- enum ipa_rm_resource_name resource_name);
-
-/*
* Tethering bridge (Rmnet / MBIM)
*/
int ipa2_teth_bridge_init(struct teth_bridge_init_params *params);
@@ -1952,13 +1813,13 @@ int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
struct ipa_context *ipa_get_ctx(void);
void ipa_enable_clks(void);
void ipa_disable_clks(void);
-void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id);
-int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id);
-void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id);
-void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx);
-void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
bool int_ctx);
int ipa2_active_clients_log_print_buffer(char *buf, int size);
int ipa2_active_clients_log_print_table(char *buf, int size);
@@ -2052,7 +1913,7 @@ int ipa_id_alloc(void *ptr);
void *ipa_id_find(u32 id);
void ipa_id_remove(u32 id);
-int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
u32 bandwidth_mbps);
int ipa2_cfg_ep_status(u32 clnt_hdl,
@@ -2060,9 +1921,9 @@ int ipa2_cfg_ep_status(u32 clnt_hdl,
int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity);
int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity);
-int ipa_suspend_resource_no_block(enum ipa_rm_resource_name name);
-int ipa_suspend_resource_sync(enum ipa_rm_resource_name name);
-int ipa_resume_resource(enum ipa_rm_resource_name name);
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa2_resume_resource(enum ipa_rm_resource_name name);
bool ipa_should_pipe_be_suspended(enum ipa_client_type client);
int ipa_tag_aggr_force_close(int pipe_num);
@@ -2123,8 +1984,6 @@ struct iommu_domain *ipa_get_uc_smmu_domain(void);
int ipa2_ap_suspend(struct device *dev);
int ipa2_ap_resume(struct device *dev);
struct iommu_domain *ipa2_get_smmu_domain(void);
-int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
struct device *ipa2_get_dma_dev(void);
int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
@@ -2136,5 +1995,4 @@ int ipa2_restore_suspend_handler(void);
void ipa_sps_irq_control_all(bool enable);
void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
-const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
index e3797a48c010..f30fd4c60171 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -184,9 +184,9 @@ static void ipa_process_interrupts(bool isr_context)
static void ipa_interrupt_defer(struct work_struct *work)
{
IPADBG("processing interrupts in wq\n");
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_process_interrupts(false);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("Done\n");
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
index a389802de33f..ab86bac63136 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -409,7 +409,7 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
ipa_mhi_ctx->wakeup_notified = false;
if (ipa_mhi_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
- ipa2_rm_notify_completion(
+ ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_ctx->rm_cons_state =
@@ -435,7 +435,7 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
ipa_mhi_ctx->wakeup_notified = false;
if (ipa_mhi_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
- ipa2_rm_notify_completion(
+ ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_ctx->rm_cons_state =
@@ -592,7 +592,7 @@ static int ipa_mhi_request_prod(void)
reinit_completion(&ipa_mhi_ctx->rm_prod_granted_comp);
IPA_MHI_DBG("requesting mhi prod\n");
- res = ipa2_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
if (res) {
if (res != -EINPROGRESS) {
IPA_MHI_ERR("failed to request mhi prod %d\n", res);
@@ -619,7 +619,7 @@ static int ipa_mhi_release_prod(void)
IPA_MHI_FUNC_ENTRY();
- res = ipa2_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
IPA_MHI_FUNC_EXIT();
return res;
@@ -1037,7 +1037,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params)
mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
- res = ipa2_rm_create_resource(&mhi_prod_params);
+ res = ipa_rm_create_resource(&mhi_prod_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
goto fail_create_rm_prod;
@@ -1049,7 +1049,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params)
mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
- res = ipa2_rm_create_resource(&mhi_cons_params);
+ res = ipa_rm_create_resource(&mhi_cons_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
goto fail_create_rm_cons;
@@ -1065,7 +1065,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params)
return 0;
fail_create_rm_cons:
- ipa2_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
fail_create_rm_prod:
destroy_workqueue(ipa_mhi_ctx->wq);
fail_create_wq:
@@ -1122,14 +1122,14 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params)
ipa_mhi_ctx->host_data_addr = params->host_data_addr;
/* Add MHI <-> Q6 dependencies to IPA RM */
- res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
goto fail_add_mhi_q6_dep;
}
- res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
@@ -1164,10 +1164,10 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params)
fail_init_engine:
ipa_mhi_release_prod();
fail_request_prod:
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
fail_add_q6_mhi_dep:
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
fail_add_mhi_q6_dep:
ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
@@ -1240,7 +1240,7 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
channel->client, channel->hdl, channel->id);
- IPA2_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
if (ep->valid == 1) {
IPA_MHI_ERR("EP already allocated.\n");
@@ -1310,7 +1310,7 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
ipa_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
@@ -1328,7 +1328,7 @@ fail_enable_dp:
fail_init_channel:
memset(ep, 0, offsetof(struct ipa_ep_context, sys));
fail_ep_exists:
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
return -EPERM;
}
@@ -1379,7 +1379,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
res = ipa_mhi_reset_channel(channel);
if (res) {
@@ -1390,7 +1390,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl)
ep->valid = 0;
ipa_delete_dflt_flt_rules(clnt_hdl);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
@@ -1398,7 +1398,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl)
fail_reset_channel:
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return res;
}
@@ -1653,7 +1653,7 @@ int ipa2_mhi_suspend(bool force)
* IPA RM resource are released to make sure tag process will not start
*/
if (!bam_empty)
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPA_MHI_DBG("release prod\n");
res = ipa_mhi_release_prod();
@@ -1696,7 +1696,7 @@ int ipa2_mhi_suspend(bool force)
if (!bam_empty) {
ipa_ctx->tag_process_before_gating = false;
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
@@ -1762,7 +1762,7 @@ int ipa2_mhi_resume(void)
}
dl_channel_resumed = true;
- ipa2_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 788d7f6c0f9d..70e0db98e948 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -1064,7 +1064,7 @@ int vote_for_bus_bw(uint32_t *bw_mbps)
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = *bw_mbps;
- ret = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
&profile);
if (ret)
IPAWANERR("Failed to set perf profile to BW %u\n",
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 0142dace47d8..b49815b24bc2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -329,7 +329,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
WARN_ON(private_data != ipa_ctx);
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC evt opcode=%u\n",
ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
@@ -340,7 +340,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
/* Feature specific handling */
@@ -370,7 +370,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
IPADBG("unsupported uC evt opcode=%u\n",
ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
@@ -378,14 +378,14 @@ static int ipa_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
int result = 0;
- struct ipa2_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
result = ipa_uc_state_check();
if (result)
goto fail;
- IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
if (ipa2_inc_client_enable_clks_no_block(&log_info))
goto fail;
@@ -397,7 +397,7 @@ static int ipa_uc_panic_notifier(struct notifier_block *this,
/* give uc enough time to save state */
udelay(IPA_PKT_FLUSH_TO_US);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("err_fatal issued\n");
fail:
@@ -425,7 +425,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
WARN_ON(private_data != ipa_ctx);
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC rsp opcode=%u\n",
ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
@@ -434,7 +434,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
@@ -447,7 +447,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
IPADBG("feature %d specific response handler\n",
feature);
complete_all(&ipa_ctx->uc_ctx.uc_completion);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
}
@@ -490,7 +490,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
IPAERR("Unsupported uC rsp opcode = %u\n",
ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
/**
@@ -816,9 +816,9 @@ EXPORT_SYMBOL(ipa_uc_monitor_holb);
static void ipa_start_monitor_holb(struct work_struct *work)
{
IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n");
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
index 1588fea23ddc..ec3814b4e747 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -614,7 +614,7 @@ int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa_uc_update_hw_flags(0);
if (res) {
@@ -677,7 +677,7 @@ int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -700,7 +700,7 @@ int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
return -EINVAL;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@@ -725,7 +725,7 @@ int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -741,7 +741,7 @@ int ipa_uc_mhi_reset_channel(int channelHandle)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
@@ -763,7 +763,7 @@ int ipa_uc_mhi_reset_channel(int channelHandle)
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -778,7 +778,7 @@ int ipa_uc_mhi_suspend_channel(int channelHandle)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
@@ -800,7 +800,7 @@ int ipa_uc_mhi_suspend_channel(int channelHandle)
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -815,7 +815,7 @@ int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@@ -838,7 +838,7 @@ int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -852,7 +852,7 @@ int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&cmd, 0, sizeof(cmd));
cmd.params.channelHandle = channelHandle;
@@ -870,7 +870,7 @@ int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -888,7 +888,7 @@ int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
@@ -899,7 +899,7 @@ int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
res = 0;
disable_clks:
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 496e77bceb59..df52018f6193 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -407,7 +407,7 @@ int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
return -EINVAL;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
TX_STATS(copy_engine_doorbell_value);
@@ -449,7 +449,7 @@ int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
RX_STATS(reserved1);
RX_STATS(reserved2);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -756,7 +756,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
}
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
- IPA2_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
@@ -960,7 +960,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ipa_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
ep->wdi_state |= IPA_WDI_CONNECTED;
@@ -974,7 +974,7 @@ uc_timeout:
ipa_release_uc_smmu_mappings(in->sys.client);
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
dma_alloc_fail:
- IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail:
return result;
}
@@ -1019,7 +1019,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
tear.params.ipa_pipe_number = clnt_hdl;
@@ -1037,7 +1037,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
ipa_release_uc_smmu_mappings(ep->client);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1084,7 +1084,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
enable.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(enable.raw32b,
@@ -1104,7 +1104,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
ep->wdi_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
@@ -1152,7 +1152,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
result = ipa_disable_data_path(clnt_hdl);
if (result) {
@@ -1205,7 +1205,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
@@ -1252,7 +1252,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
resume.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(resume.raw32b,
@@ -1368,7 +1368,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
}
ipa_ctx->tag_process_before_gating = true;
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
@@ -1401,7 +1401,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
return -EFAULT;
}
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
qmap.params.ipa_pipe_number = clnt_hdl;
qmap.params.qmap_id = qmap_id;
@@ -1415,7 +1415,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
goto uc_timeout;
}
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 888345a23ba5..794eaa32bf90 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -17,6 +17,7 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include "ipa_i.h"
+#include "../ipa_rm_i.h"
#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
@@ -495,7 +496,7 @@ bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
}
/**
- * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
* resource and decrement active clients counter, which may result in clock
* gating of IPA clocks.
*
@@ -503,7 +504,7 @@ bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
*
* Return codes: 0 on success, negative on failure.
*/
-int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
{
struct ipa_client_names clients;
int res;
@@ -546,13 +547,13 @@ int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
/* before gating IPA clocks do TAG process */
ipa_ctx->tag_process_before_gating = true;
- IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
return 0;
}
/**
- * ipa_suspend_resource_no_block() - suspend client endpoints related to the
+ * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
* IPA_RM resource and decrement active clients counter. This function is
* guaranteed to avoid sleeping.
*
@@ -560,7 +561,7 @@ int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
*
* Return codes: 0 on success, negative on failure.
*/
-int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
{
int res;
struct ipa_client_names clients;
@@ -569,7 +570,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
unsigned long flags;
- struct ipa2_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
if (ipa_active_clients_trylock(&flags) == 0)
return -EPERM;
@@ -607,7 +608,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
}
if (res == 0) {
- IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa_rm_resource_str(resource));
ipa2_active_clients_log_dec(&log_info, true);
ipa_ctx->ipa_active_clients.cnt--;
@@ -621,14 +622,14 @@ bail:
}
/**
- * ipa_resume_resource() - resume client endpoints related to the IPA_RM
+ * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
* resource.
*
* @resource: [IN] IPA Resource Manager resource
*
* Return codes: 0 on success, negative on failure.
*/
-int ipa_resume_resource(enum ipa_rm_resource_name resource)
+int ipa2_resume_resource(enum ipa_rm_resource_name resource)
{
struct ipa_client_names clients;
@@ -821,11 +822,11 @@ int ipa_cfg_route(struct ipa_route *route)
route->route_def_hdr_ofst,
route->route_frag_def_pipe);
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_ctx->ctrl->ipa_cfg_route(route);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -841,12 +842,12 @@ int ipa_cfg_filter(u32 disable)
{
u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
IPA_SETFIELD(!disable,
IPA_FILTER_FILTER_EN_SHFT,
IPA_FILTER_FILTER_EN_BMSK));
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -2496,11 +2497,11 @@ int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -2556,11 +2557,11 @@ int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].status = *ep_status;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -2618,11 +2619,11 @@ int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -2674,11 +2675,11 @@ int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -2797,11 +2798,11 @@ int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
/* copy over EP cfg */
ep->cfg.hdr = *ep_hdr;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -2923,11 +2924,11 @@ int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
/* copy over EP cfg */
ep->cfg.hdr_ext = *ep_hdr_ext;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3138,13 +3139,13 @@ int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
ipa_ctx->ep[clnt_hdl].dst_pipe_index,
ep_mode);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3270,11 +3271,11 @@ int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3353,12 +3354,12 @@ int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
else
ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3447,11 +3448,11 @@ int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
ep_holb->tmr_val);
@@ -3548,11 +3549,11 @@ int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
/* copy over EP cfg */
ep->cfg.deaggr = *ep_deaggr;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3600,13 +3601,13 @@ int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3788,11 +3789,11 @@ int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
{
u32 reg_val;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
(reg_val & 0xfffffffe));
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3816,12 +3817,12 @@ int ipa2_set_qcncm_ndp_sig(char sig[3])
IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
return -EINVAL;
}
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
(sig[1] << 12) | (sig[2] << 4) |
(reg_val & 0xf000000f));
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3837,11 +3838,11 @@ int ipa2_set_single_ndp_per_mbim(bool enable)
{
u32 reg_val;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
(enable & 0x1) | (reg_val & 0xfffffffe));
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3857,12 +3858,12 @@ int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
{
u32 reg_val;
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
(enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
(reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
@@ -3905,7 +3906,7 @@ void ipa2_bam_reg_dump(void)
{
static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
if (__ratelimit(&_rs)) {
- IPA2_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
pr_err("IPA BAM START\n");
if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
@@ -3919,7 +3920,7 @@ void ipa2_bam_reg_dump(void)
SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
0, 2);
}
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
}
@@ -4821,7 +4822,7 @@ bool ipa2_is_client_handle_valid(u32 clnt_hdl)
void ipa2_proxy_clk_unvote(void)
{
if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
- IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
ipa_ctx->q6_proxy_clk_vote_valid = false;
}
}
@@ -4834,7 +4835,7 @@ void ipa2_proxy_clk_unvote(void)
void ipa2_proxy_clk_vote(void)
{
if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
- IPA2_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
ipa_ctx->q6_proxy_clk_vote_valid = true;
}
}
@@ -5015,24 +5016,6 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
- api_ctrl->ipa_rm_create_resource = ipa2_rm_create_resource;
- api_ctrl->ipa_rm_delete_resource = ipa2_rm_delete_resource;
- api_ctrl->ipa_rm_register = ipa2_rm_register;
- api_ctrl->ipa_rm_deregister = ipa2_rm_deregister;
- api_ctrl->ipa_rm_set_perf_profile = ipa2_rm_set_perf_profile;
- api_ctrl->ipa_rm_add_dependency = ipa2_rm_add_dependency;
- api_ctrl->ipa_rm_delete_dependency = ipa2_rm_delete_dependency;
- api_ctrl->ipa_rm_request_resource = ipa2_rm_request_resource;
- api_ctrl->ipa_rm_release_resource = ipa2_rm_release_resource;
- api_ctrl->ipa_rm_notify_completion = ipa2_rm_notify_completion;
- api_ctrl->ipa_rm_inactivity_timer_init =
- ipa2_rm_inactivity_timer_init;
- api_ctrl->ipa_rm_inactivity_timer_destroy =
- ipa2_rm_inactivity_timer_destroy;
- api_ctrl->ipa_rm_inactivity_timer_request_resource =
- ipa2_rm_inactivity_timer_request_resource;
- api_ctrl->ipa_rm_inactivity_timer_release_resource =
- ipa2_rm_inactivity_timer_release_resource;
api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
@@ -5073,10 +5056,19 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
api_ctrl->ipa_disable_apps_wan_cons_deaggr =
ipa2_disable_apps_wan_cons_deaggr;
- api_ctrl->ipa_rm_add_dependency_sync = ipa2_rm_add_dependency_sync;
api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+ api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
+ api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
+ api_ctrl->ipa_inc_client_enable_clks_no_block =
+ ipa2_inc_client_enable_clks_no_block;
+ api_ctrl->ipa_suspend_resource_no_block =
+ ipa2_suspend_resource_no_block;
+ api_ctrl->ipa_resume_resource = ipa2_resume_resource;
+ api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
+ api_ctrl->ipa_set_required_perf_profile =
+ ipa2_set_required_perf_profile;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index dd5ca0a8463d..de703bf6b582 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1066,7 +1066,7 @@ static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
send:
/* IPA_RM checking start */
- ret = ipa2_rm_inactivity_timer_request_resource(
+ ret = ipa_rm_inactivity_timer_request_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret == -EINPROGRESS) {
netif_stop_queue(dev);
@@ -1099,7 +1099,7 @@ send:
dev->stats.tx_bytes += skb->len;
ret = NETDEV_TX_OK;
out:
- ipa2_rm_inactivity_timer_release_resource(
+ ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
return ret;
}
@@ -1151,7 +1151,7 @@ static void apps_ipa_tx_complete_notify(void *priv,
}
__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
dev_kfree_skb_any(skb);
- ipa2_rm_inactivity_timer_release_resource(
+ ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
}
@@ -1679,9 +1679,9 @@ static void q6_prod_rm_request_resource(struct work_struct *work)
{
int ret = 0;
- ret = ipa2_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
- IPAWANERR("%s: ipa2_rm_request_resource failed %d\n", __func__,
+ IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
ret);
return;
}
@@ -1698,9 +1698,9 @@ static void q6_prod_rm_release_resource(struct work_struct *work)
{
int ret = 0;
- ret = ipa2_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
- IPAWANERR("%s: ipa2_rm_release_resource failed %d\n", __func__,
+ IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
ret);
return;
}
@@ -1744,44 +1744,44 @@ static int q6_initialize_rm(void)
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_PROD;
create_params.reg_params.notify_cb = &q6_rm_notify_cb;
- result = ipa2_rm_create_resource(&create_params);
+ result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err1;
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_CONS;
create_params.release_resource = &q6_rm_release_resource;
create_params.request_resource = &q6_rm_request_resource;
- result = ipa2_rm_create_resource(&create_params);
+ result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err2;
/* add dependency*/
- result = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = 100;
- result = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
&profile);
if (result)
goto set_perf_err;
- result = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
&profile);
if (result)
goto set_perf_err;
return result;
set_perf_err:
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
add_dpnd_err:
- result = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, result);
create_rsrc_err2:
- result = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, result);
@@ -1794,17 +1794,17 @@ void q6_deinitialize_rm(void)
{
int ret;
- ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
ret);
- ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, ret);
- ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
@@ -2013,13 +2013,13 @@ static int ipa_wwan_probe(struct platform_device *pdev)
ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
ipa_rm_params.reg_params.user_data = dev;
ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
- ret = ipa2_rm_create_resource(&ipa_rm_params);
+ ret = ipa_rm_create_resource(&ipa_rm_params);
if (ret) {
pr_err("%s: unable to create resourse %d in IPA RM\n",
__func__, IPA_RM_RESOURCE_WWAN_0_PROD);
goto create_rsrc_err;
}
- ret = ipa2_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_INACTIVITY_TIMER);
if (ret) {
pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
@@ -2027,14 +2027,14 @@ static int ipa_wwan_probe(struct platform_device *pdev)
goto timer_init_err;
}
/* add dependency */
- ret = ipa2_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
- ret = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
&profile);
if (ret)
goto set_perf_err;
@@ -2066,20 +2066,20 @@ static int ipa_wwan_probe(struct platform_device *pdev)
config_err:
unregister_netdev(ipa_netdevs[0]);
set_perf_err:
- ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
ret);
add_dpnd_err:
- ret = ipa2_rm_inactivity_timer_destroy(
+ ret = ipa_rm_inactivity_timer_destroy(
IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
if (ret)
- IPAWANERR("Error ipa2_rm_inactivity_timer_destroy %d, ret=%d\n",
+ IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
timer_init_err:
- ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
@@ -2113,18 +2113,18 @@ static int ipa_wwan_remove(struct platform_device *pdev)
ipa_to_apps_hdl = -1;
mutex_unlock(&ipa_to_apps_pipe_handle_guard);
unregister_netdev(ipa_netdevs[0]);
- ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
ret);
- ret = ipa2_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR(
- "Error ipa2_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+ "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
- ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
@@ -2175,7 +2175,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev)
/* Make sure that there is no Tx operation ongoing */
netif_tx_lock_bh(netdev);
- ipa2_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
netif_tx_unlock_bh(netdev);
IPAWANDBG("Exit\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
index a7459bc27df0..da68be2ed69b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -113,18 +113,18 @@ int ipa2_teth_bridge_init(struct teth_bridge_init_params *params)
params->skip_ep_cfg = true;
/* Build dependency graph */
- res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0 && res != -EINPROGRESS) {
- TETH_ERR("ipa2_rm_add_dependency() failed.\n");
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
- res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
if (res < 0 && res != -EINPROGRESS) {
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
- TETH_ERR("ipa2_rm_add_dependency() failed.\n");
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
@@ -142,9 +142,9 @@ bail:
int ipa2_teth_bridge_disconnect(enum ipa_client_type client)
{
TETH_DBG_FUNC_ENTRY();
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
- ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
TETH_DBG_FUNC_EXIT();
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index 2ba86bbfd80c..9653dd6d27f2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -2,8 +2,7 @@ obj-$(CONFIG_IPA3) += ipahal/
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
- ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o ipa_rm.o \
- ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \
+ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d09363b725de..1747460bbd86 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -39,7 +39,7 @@
#include <soc/qcom/subsystem_restart.h>
#define IPA_SUBSYSTEM_NAME "ipa_fws"
#include "ipa_i.h"
-#include "ipa_rm_i.h"
+#include "../ipa_rm_i.h"
#include "ipahal/ipahal.h"
#define CREATE_TRACE_POINTS
@@ -219,78 +219,6 @@ static bool smmu_disable_htw;
static char *active_clients_table_buf;
-const char *ipa3_clients_strings[IPA_CLIENT_MAX] = {
- __stringify(IPA_CLIENT_HSIC1_PROD),
- __stringify(IPA_CLIENT_WLAN1_PROD),
- __stringify(IPA_CLIENT_USB2_PROD),
- __stringify(IPA_CLIENT_HSIC3_PROD),
- __stringify(IPA_CLIENT_HSIC2_PROD),
- __stringify(IPA_CLIENT_USB3_PROD),
- __stringify(IPA_CLIENT_HSIC4_PROD),
- __stringify(IPA_CLIENT_USB4_PROD),
- __stringify(IPA_CLIENT_HSIC5_PROD),
- __stringify(IPA_CLIENT_USB_PROD),
- __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
- __stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
- __stringify(IPA_CLIENT_A2_TETHERED_PROD),
- __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
- __stringify(IPA_CLIENT_APPS_CMD_PROD),
- __stringify(IPA_CLIENT_ODU_PROD),
- __stringify(IPA_CLIENT_MHI_PROD),
- __stringify(IPA_CLIENT_Q6_LAN_PROD),
- __stringify(IPA_CLIENT_Q6_WAN_PROD),
- __stringify(IPA_CLIENT_Q6_CMD_PROD),
- __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
- __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
- __stringify(IPA_CLIENT_Q6_DECOMP_PROD),
- __stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
- __stringify(IPA_CLIENT_UC_USB_PROD),
-
- /* Below PROD client type is only for test purpose */
- __stringify(IPA_CLIENT_TEST_PROD),
- __stringify(IPA_CLIENT_TEST1_PROD),
- __stringify(IPA_CLIENT_TEST2_PROD),
- __stringify(IPA_CLIENT_TEST3_PROD),
- __stringify(IPA_CLIENT_TEST4_PROD),
-
- __stringify(IPA_CLIENT_HSIC1_CONS),
- __stringify(IPA_CLIENT_WLAN1_CONS),
- __stringify(IPA_CLIENT_HSIC2_CONS),
- __stringify(IPA_CLIENT_USB2_CONS),
- __stringify(IPA_CLIENT_WLAN2_CONS),
- __stringify(IPA_CLIENT_HSIC3_CONS),
- __stringify(IPA_CLIENT_USB3_CONS),
- __stringify(IPA_CLIENT_WLAN3_CONS),
- __stringify(IPA_CLIENT_HSIC4_CONS),
- __stringify(IPA_CLIENT_USB4_CONS),
- __stringify(IPA_CLIENT_WLAN4_CONS),
- __stringify(IPA_CLIENT_HSIC5_CONS),
- __stringify(IPA_CLIENT_USB_CONS),
- __stringify(IPA_CLIENT_USB_DPL_CONS),
- __stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
- __stringify(IPA_CLIENT_A2_TETHERED_CONS),
- __stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
- __stringify(IPA_CLIENT_APPS_LAN_CONS),
- __stringify(IPA_CLIENT_APPS_WAN_CONS),
- __stringify(IPA_CLIENT_ODU_EMB_CONS),
- __stringify(IPA_CLIENT_ODU_TETH_CONS),
- __stringify(IPA_CLIENT_MHI_CONS),
- __stringify(IPA_CLIENT_Q6_LAN_CONS),
- __stringify(IPA_CLIENT_Q6_WAN_CONS),
- __stringify(IPA_CLIENT_Q6_DUN_CONS),
- __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
- __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
- __stringify(IPA_CLIENT_Q6_DECOMP_CONS),
- __stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
- __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
- /* Below CONS client type is only for test purpose */
- __stringify(IPA_CLIENT_TEST_CONS),
- __stringify(IPA_CLIENT_TEST1_CONS),
- __stringify(IPA_CLIENT_TEST2_CONS),
- __stringify(IPA_CLIENT_TEST3_CONS),
- __stringify(IPA_CLIENT_TEST4_CONS),
-};
-
int ipa3_active_clients_log_print_buffer(char *buf, int size)
{
int i;
@@ -1244,7 +1172,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- retval = ipa3_rm_add_dependency(rm_depend.resource_name,
+ retval = ipa_rm_add_dependency(rm_depend.resource_name,
rm_depend.depends_on_name);
break;
case IPA_IOC_RM_DEL_DEPENDENCY:
@@ -1253,7 +1181,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- retval = ipa3_rm_delete_dependency(rm_depend.resource_name,
+ retval = ipa_rm_delete_dependency(rm_depend.resource_name,
rm_depend.depends_on_name);
break;
case IPA_IOC_GENERATE_FLT_EQ:
@@ -3149,7 +3077,7 @@ static void ipa3_start_tag_process(struct work_struct *work)
* - Remove and deallocate unneeded data structure
* - Log the call in the circular history buffer (unless it is a simple call)
*/
-void ipa3_active_clients_log_mod(struct ipa3_active_client_logging_info *id,
+void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
bool inc, bool int_ctx)
{
char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
@@ -3205,13 +3133,13 @@ void ipa3_active_clients_log_mod(struct ipa3_active_client_logging_info *id,
}
}
-void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id,
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx)
{
ipa3_active_clients_log_mod(id, false, int_ctx);
}
-void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
bool int_ctx)
{
ipa3_active_clients_log_mod(id, true, int_ctx);
@@ -3224,7 +3152,7 @@ void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
* Return codes:
* None
*/
-void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id)
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
ipa3_active_clients_lock();
ipa3_active_clients_log_inc(id, false);
@@ -3243,7 +3171,7 @@ void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id)
* Return codes: 0 for success
* -EPERM if an asynchronous action should have been done
*/
-int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id)
{
int res = 0;
@@ -3276,9 +3204,9 @@ bail:
* Return codes:
* None
*/
-void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id)
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
{
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
ipa3_active_clients_lock();
ipa3_active_clients_log_dec(id, false);
@@ -3466,7 +3394,7 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
} else {
resource = ipa3_get_rm_resource_from_ep(i);
res =
- ipa3_rm_request_resource_with_timer(resource);
+ ipa_rm_request_resource_with_timer(resource);
if (res == -EPERM &&
IPA_CLIENT_IS_CONS(
ipa3_ctx->ep[i].client)) {
@@ -3551,14 +3479,14 @@ int ipa3_create_apps_resource(void)
ipa3_apps_cons_request_resource;
apps_cons_create_params.release_resource =
ipa3_apps_cons_release_resource;
- result = ipa3_rm_create_resource(&apps_cons_create_params);
+ result = ipa_rm_create_resource(&apps_cons_create_params);
if (result) {
- IPAERR("ipa3_rm_create_resource failed\n");
+ IPAERR("ipa_rm_create_resource failed\n");
return result;
}
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
- ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+ ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
return result;
}
@@ -3622,7 +3550,7 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void)
{
int res;
u32 ipa_clk_state;
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
if (ipa3_ctx->smp2p_info.res_sent)
return;
@@ -3832,8 +3760,8 @@ fail_setup_apps_pipes:
else
sps_deregister_bam_device(ipa3_ctx->bam_handle);
fail_register_device:
- ipa3_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
- ipa3_rm_exit();
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_exit();
cdev_del(&ipa3_ctx->cdev);
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
@@ -4012,7 +3940,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
int i;
struct ipa3_flt_tbl *flt_tbl;
struct ipa3_rt_tbl_set *rset;
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -4414,7 +4342,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
/* Initialize IPA RM (resource manager) */
- result = ipa3_rm_initialize();
+ result = ipa_rm_initialize();
if (result) {
IPAERR("RM initialization failed (%d)\n", -result);
result = -ENODEV;
@@ -4469,9 +4397,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
return 0;
fail_ipa_init_interrupts:
- ipa3_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
- ipa3_rm_exit();
+ ipa_rm_exit();
fail_ipa_rm_init:
fail_nat_dev_add:
cdev_del(&ipa3_ctx->cdev);
@@ -5111,7 +5039,7 @@ static void ipa_gsi_request_resource(struct work_struct *work)
void ipa_gsi_req_res_cb(void *user_data, bool *granted)
{
unsigned long flags;
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 6c639e1c7a1a..96832869a496 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include "ipa_i.h"
-#include "ipa_rm_i.h"
+#include "../ipa_rm_i.h"
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_MAX_RULE_IN_TBL 128
@@ -1470,7 +1470,7 @@ static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
{
int result, nbytes, cnt = 0;
- result = ipa3_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+ result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"Error in printing RM stat %d\n", result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index d545de10296d..d3f24b9403f0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2225,7 +2225,6 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
int pad_len_byte;
int len;
unsigned char *buf;
- bool drop_packet;
int src_pipe;
unsigned int used = *(unsigned int *)skb->cb;
unsigned int used_align = ALIGN(used, 32);
@@ -2247,6 +2246,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
memcpy(buf, sys->prev_skb->data, sys->len_partial);
sys->len_partial = 0;
sys->free_skb(sys->prev_skb);
+ sys->prev_skb = NULL;
goto begin;
}
@@ -2266,9 +2266,13 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
skb2->len - sys->len_pad);
skb2->truesize = skb2->len +
sizeof(struct sk_buff);
- sys->ep->client_notify(sys->ep->priv,
- IPA_RECEIVE,
- (unsigned long)(skb2));
+ if (sys->drop_packet)
+ dev_kfree_skb_any(skb2);
+ else
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
} else {
IPAERR("copy expand failed\n");
}
@@ -2300,7 +2304,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
begin:
pkt_status_sz = ipahal_pkt_status_get_size();
while (skb->len) {
- drop_packet = false;
+ sys->drop_packet = false;
IPADBG_LOW("LEN_REM %d\n", skb->len);
if (skb->len < pkt_status_sz) {
@@ -2339,9 +2343,11 @@ begin:
IPA_STATS_EXCP_CNT(status.exception,
ipa3_ctx->stats.rx_excp_pkts);
if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
- status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
- status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+ status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
IPAERR("status fields invalid\n");
+ IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status.status_opcode, status.endp_src_idx,
+ status.endp_dest_idx, status.pkt_len);
WARN_ON(1);
BUG();
}
@@ -2389,7 +2395,7 @@ begin:
if (status.exception ==
IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
status.rt_rule_id == IPA_RULE_ID_INVALID)
- drop_packet = true;
+ sys->drop_packet = true;
if (skb->len == pkt_status_sz &&
status.exception ==
@@ -2413,8 +2419,7 @@ begin:
IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
IPADBG_LOW(
"Dropping packet on DeAggr Exception\n");
- skb_pull(skb, len + pkt_status_sz);
- continue;
+ sys->drop_packet = true;
}
skb2 = ipa3_skb_copy_for_client(skb,
@@ -2433,9 +2438,20 @@ begin:
pkt_status_sz);
IPADBG_LOW("rx avail for %d\n",
status.endp_dest_idx);
- if (drop_packet)
+ if (sys->drop_packet) {
dev_kfree_skb_any(skb2);
- else {
+ } else if (status.pkt_len >
+ IPA_GENERIC_AGGR_BYTE_LIMIT *
+ 1024) {
+ IPAERR("packet size invalid\n");
+ IPAERR("STATUS opcode=%d\n",
+ status.status_opcode);
+ IPAERR("src=%d dst=%d len=%d\n",
+ status.endp_src_idx,
+ status.endp_dest_idx,
+ status.pkt_len);
+ BUG();
+ } else {
skb2->truesize = skb2->len +
sizeof(struct sk_buff) +
(ALIGN(len +
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3fa7d4121a1b..13639cf8491e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -35,6 +35,7 @@
#include "../ipa_api.h"
#include "ipahal/ipahal_reg.h"
#include "ipahal/ipahal.h"
+#include "../ipa_common_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -54,9 +55,6 @@
#define IPA_UC_WAII_MAX_SLEEP 1200
#define IPA_MAX_STATUS_STAT_NUM 30
-#define __FILENAME__ \
- (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
-
#define IPA_IPC_LOGGING(buf, fmt, args...) \
ipc_log_string((buf), \
@@ -213,113 +211,16 @@
#define IPA_SLEEP_CLK_RATE_KHZ (32)
-#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = EP; \
- log_info.id_string = ipa3_clients_strings[client]
-
-#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = SIMPLE; \
- log_info.id_string = __func__
-
-#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = RESOURCE; \
- log_info.id_string = resource_name
-
-#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
- log_info.file = __FILENAME__; \
- log_info.line = __LINE__; \
- log_info.type = SPECIAL; \
- log_info.id_string = id_str
-
-#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
- ipa3_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
- ipa3_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
- ipa3_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
- ipa3_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
- ipa3_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
- ipa3_dec_client_disable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
- ipa3_inc_client_enable_clks(&log_info); \
- } while (0)
-
-#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
- do { \
- struct ipa3_active_client_logging_info log_info; \
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
- ipa3_dec_client_disable_clks(&log_info); \
- } while (0)
-
#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
-extern const char *ipa3_clients_strings[];
-
-enum ipa3_active_client_log_type {
- EP,
- SIMPLE,
- RESOURCE,
- SPECIAL,
- INVALID
-};
-
-struct ipa3_active_client_logging_info {
- const char *id_string;
- char *file;
- int line;
- enum ipa3_active_client_log_type type;
-};
-
struct ipa3_active_client_htable_entry {
struct hlist_node list;
char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
int count;
- enum ipa3_active_client_log_type type;
+ enum ipa_active_client_log_type type;
};
struct ipa3_active_clients_log_ctx {
@@ -813,6 +714,7 @@ struct ipa3_sys_context {
unsigned int len_rem;
unsigned int len_pad;
unsigned int len_partial;
+ bool drop_packet;
struct work_struct work;
void (*sps_callback)(struct sps_event_notify *notify);
enum sps_option sps_option;
@@ -2044,46 +1946,6 @@ int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
int ipa3_uc_dereg_rdyCB(void);
/*
- * Resource manager
- */
-int ipa3_rm_create_resource(struct ipa_rm_create_params *create_params);
-
-int ipa3_rm_delete_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_register(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
-int ipa3_rm_deregister(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params);
-
-int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_perf_profile *profile);
-
-int ipa3_rm_add_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-int ipa3_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-int ipa3_rm_request_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_release_resource(enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_notify_completion(enum ipa_rm_event event,
- enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
- unsigned long msecs);
-
-int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_inactivity_timer_request_resource(
- enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_inactivity_timer_release_resource(
- enum ipa_rm_resource_name resource_name);
-
-/*
* Tethering bridge (Rmnet / MBIM)
*/
int ipa3_teth_bridge_init(struct teth_bridge_init_params *params);
@@ -2228,13 +2090,13 @@ int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
struct ipa3_context *ipa3_get_ctx(void);
void ipa3_enable_clks(void);
void ipa3_disable_clks(void);
-void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id);
-int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id);
-void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id);
-void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id,
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx);
-void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
bool int_ctx);
int ipa3_active_clients_log_print_buffer(char *buf, int size);
int ipa3_active_clients_log_print_table(char *buf, int size);
@@ -2370,8 +2232,6 @@ int ipa3_ap_suspend(struct device *dev);
int ipa3_ap_resume(struct device *dev);
int ipa3_init_interrupts(void);
struct iommu_domain *ipa3_get_smmu_domain(void);
-int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
@@ -2393,7 +2253,6 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
struct ipa3_debugfs_rt_entry entry[],
int *num_entry);
int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib);
-const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name);
int ipa3_restore_suspend_handler(void);
int ipa3_inject_dma_task_for_gsi(void);
int ipa3_uc_panic_notifier(struct notifier_block *this,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index a3058a010354..82f63d3cf5a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -36,7 +36,7 @@
#define IPA_MHI_RM_TIMEOUT_MSEC 10000
-#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 5
+#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
#define IPA_MHI_MAX_UL_CHANNELS 1
#define IPA_MHI_MAX_DL_CHANNELS 1
@@ -82,9 +82,34 @@ enum ipa_mhi_dma_dir {
IPA_MHI_DMA_FROM_HOST,
};
+/**
+ * enum ipa3_mhi_burst_mode - MHI channel burst mode state
+ *
+ * Values are according to MHI specification
+ * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
+ * disabled for SW channels
+ * @IPA_MHI_BURST_MODE_RESERVED:
+ * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
+ * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
+ *
+ */
+enum ipa3_mhi_burst_mode {
+ IPA_MHI_BURST_MODE_DEFAULT,
+ IPA_MHI_BURST_MODE_RESERVED,
+ IPA_MHI_BURST_MODE_DISABLE,
+ IPA_MHI_BURST_MODE_ENABLE,
+};
+
+enum ipa3_mhi_polling_mode {
+ IPA_MHI_POLLING_MODE_DB_MODE,
+ IPA_MHI_POLLING_MODE_POLL_MODE,
+};
struct ipa3_mhi_ch_ctx {
- u32 chstate;
+ u8 chstate;/*0-7*/
+ u8 brstmode:2;/*8-9*/
+ u8 pollcfg:6;/*10-15*/
+ u16 rsvd;/*16-31*/
u32 chtype;
u32 erindex;
u64 rbase;
@@ -118,6 +143,8 @@ struct ipa3_mhi_ev_ctx {
* @event_context_addr: the event context address in host address space
* @ev_ctx_host: MHI event context
* @cached_gsi_evt_ring_hdl: GSI channel event ring handle
+ * @brstmode_enabled: is burst mode enabled for this channel?
+ * @ch_scratch: the channel scratch configuration
*/
struct ipa3_mhi_channel_ctx {
bool valid;
@@ -132,6 +159,8 @@ struct ipa3_mhi_channel_ctx {
u64 event_context_addr;
struct ipa3_mhi_ev_ctx ev_ctx_host;
unsigned long cached_gsi_evt_ring_hdl;
+ bool brstmode_enabled;
+ union __packed gsi_channel_scratch ch_scratch;
};
enum ipa3_mhi_rm_state {
@@ -350,6 +379,8 @@ static int ipa3_mhi_print_host_channel_ctx_info(
nbytes += scnprintf(&buff[nbytes], len - nbytes,
"chstate: 0x%x\n", ch_ctx_host.chstate);
nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "brstmode: 0x%x\n", ch_ctx_host.brstmode);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
"chtype: 0x%x\n", ch_ctx_host.chtype);
nbytes += scnprintf(&buff[nbytes], len - nbytes,
"erindex: 0x%x\n", ch_ctx_host.erindex);
@@ -664,7 +695,7 @@ static int ipa3_mhi_set_state(enum ipa3_mhi_state new_state)
ipa3_mhi_ctx->trigger_wakeup = false;
if (ipa3_mhi_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
- ipa3_rm_notify_completion(
+ ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa3_mhi_ctx->rm_cons_state =
@@ -691,7 +722,7 @@ static int ipa3_mhi_set_state(enum ipa3_mhi_state new_state)
ipa3_mhi_ctx->wakeup_notified = false;
if (ipa3_mhi_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
- ipa3_rm_notify_completion(
+ ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa3_mhi_ctx->rm_cons_state =
@@ -848,7 +879,7 @@ static int ipa3_mhi_request_prod(void)
reinit_completion(&ipa3_mhi_ctx->rm_prod_granted_comp);
IPA_MHI_DBG("requesting mhi prod\n");
- res = ipa3_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
if (res) {
if (res != -EINPROGRESS) {
IPA_MHI_ERR("failed to request mhi prod %d\n", res);
@@ -875,7 +906,7 @@ static int ipa3_mhi_release_prod(void)
IPA_MHI_FUNC_ENTRY();
- res = ipa3_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
IPA_MHI_FUNC_EXIT();
return res;
@@ -1037,7 +1068,6 @@ static bool ipa3_mhi_sps_channel_empty(struct ipa3_mhi_channel_ctx *channel)
static bool ipa3_mhi_gsi_channel_empty(struct ipa3_mhi_channel_ctx *channel)
{
int res;
-
IPA_MHI_FUNC_ENTRY();
if (!channel->stop_in_proc) {
@@ -1102,7 +1132,13 @@ static bool ipa3_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
IPA_MHI_DBG("timeout waiting for UL empty\n");
break;
}
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI &&
+ IPA_MHI_MAX_UL_CHANNELS == 1)
+ usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
}
+
IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
IPA_MHI_FUNC_EXIT();
@@ -1354,7 +1390,7 @@ static int ipa3_mhi_reset_channel(struct ipa3_mhi_channel_ctx *channel)
res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
&channel->state, channel->channel_context_addr +
offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(channel->state));
+ sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
if (res) {
IPAERR("ipa_mhi_read_write_host failed %d\n", res);
return res;
@@ -1407,6 +1443,8 @@ static void ipa_mhi_dump_ch_ctx(struct ipa3_mhi_channel_ctx *channel)
{
IPA_MHI_DBG("ch_id %d\n", channel->id);
IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
+ IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
+ IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
@@ -1518,6 +1556,22 @@ static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
}
+static int ipa3_mhi_get_ch_poll_cfg(struct ipa3_mhi_channel_ctx *channel,
+ int ring_size)
+{
+ switch (channel->ch_ctx_host.pollcfg) {
+ case 0:
+ /*set default polling configuration according to MHI spec*/
+ if (IPA_CLIENT_IS_PROD(channel->ep->client))
+ return 7;
+ else
+ return (ring_size/2)/8;
+ break;
+ default:
+ return channel->ch_ctx_host.pollcfg;
+ }
+}
+
static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
int ipa_ep_idx)
{
@@ -1525,7 +1579,6 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
struct ipa3_ep_context *ep;
struct gsi_evt_ring_props ev_props;
struct ipa_mhi_msi_info msi;
- union __packed gsi_evt_scratch ev_scratch;
struct gsi_chan_props ch_props;
union __packed gsi_channel_scratch ch_scratch;
struct ipa_gsi_ep_config *ep_cfg;
@@ -1588,14 +1641,6 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
channel->cached_gsi_evt_ring_hdl =
channel->ep->gsi_evt_ring_hdl;
- memset(&ev_scratch, 0, sizeof(ev_scratch));
- res = gsi_write_evt_ring_scratch(channel->ep->gsi_evt_ring_hdl,
- ev_scratch);
- if (res) {
- IPA_MHI_ERR("gsi_write_evt_ring_scratch failed %d\n",
- res);
- goto fail_evt_scratch;
- }
}
memset(&ch_props, 0, sizeof(ch_props));
@@ -1608,7 +1653,7 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
ch_props.ring_len = channel->ch_ctx_host.rlen;
ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
channel->ch_ctx_host.rbase);
- ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+ ch_props.use_db_eng = GSI_CHAN_DB_MODE;
ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
ch_props.low_weight = 1;
ch_props.err_cb = ipa_mhi_gsi_ch_err_cb;
@@ -1626,10 +1671,20 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
channel->channel_context_addr +
offsetof(struct ipa3_mhi_ch_ctx, wp));
ch_scratch.mhi.assert_bit40 = ipa3_mhi_ctx->assert_bit40;
- ch_scratch.mhi.max_outstanding_tre = ep_cfg->ipa_if_aos *
- GSI_CHAN_RE_SIZE_16B;
+ ch_scratch.mhi.max_outstanding_tre = 0;
ch_scratch.mhi.outstanding_threshold =
4 * GSI_CHAN_RE_SIZE_16B;
+ ch_scratch.mhi.oob_mod_threshold = 4;
+ if (channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+ channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+ ch_scratch.mhi.burst_mode_enabled = true;
+ ch_scratch.mhi.polling_configuration =
+ ipa3_mhi_get_ch_poll_cfg(channel,
+ (ch_props.ring_len / ch_props.re_size));
+ ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
+ } else {
+ ch_scratch.mhi.burst_mode_enabled = false;
+ }
res = gsi_write_channel_scratch(channel->ep->gsi_chan_hdl,
ch_scratch);
if (res) {
@@ -1637,6 +1692,8 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
res);
goto fail_ch_scratch;
}
+ channel->brstmode_enabled = ch_scratch.mhi.burst_mode_enabled;
+ channel->ch_scratch.mhi = ch_scratch.mhi;
IPA_MHI_DBG("Starting channel\n");
res = gsi_start_channel(channel->ep->gsi_chan_hdl);
@@ -1652,7 +1709,6 @@ fail_ch_start:
fail_ch_scratch:
gsi_dealloc_channel(channel->ep->gsi_chan_hdl);
fail_alloc_ch:
-fail_evt_scratch:
gsi_dealloc_evt_ring(channel->ep->gsi_evt_ring_hdl);
channel->ep->gsi_evt_ring_hdl = ~0;
fail_alloc_evt:
@@ -1724,7 +1780,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params)
ipa3_mhi_ctx->cb_priv = params->priv;
ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
ipa3_mhi_ctx->qmi_req_id = 0;
- ipa3_mhi_ctx->use_ipadma = 1;
+ ipa3_mhi_ctx->use_ipadma = true;
ipa3_mhi_ctx->assert_bit40 = !!params->assert_bit40;
ipa3_mhi_ctx->test_mode = params->test_mode;
init_completion(&ipa3_mhi_ctx->rm_prod_granted_comp);
@@ -1743,7 +1799,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params)
mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_prod_params.reg_params.notify_cb = ipa3_mhi_rm_prod_notify;
- res = ipa3_rm_create_resource(&mhi_prod_params);
+ res = ipa_rm_create_resource(&mhi_prod_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
goto fail_create_rm_prod;
@@ -1755,7 +1811,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params)
mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_cons_params.request_resource = ipa3_mhi_rm_cons_request;
mhi_cons_params.release_resource = ipa3_mhi_rm_cons_release;
- res = ipa3_rm_create_resource(&mhi_cons_params);
+ res = ipa_rm_create_resource(&mhi_cons_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
goto fail_create_rm_cons;
@@ -1779,7 +1835,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params)
return 0;
fail_create_rm_cons:
- ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
fail_create_rm_prod:
destroy_workqueue(ipa3_mhi_ctx->wq);
fail_create_wq:
@@ -1845,14 +1901,14 @@ int ipa3_mhi_start(struct ipa_mhi_start_params *params)
ipa3_mhi_ctx->event_context_array_addr);
/* Add MHI <-> Q6 dependencies to IPA RM */
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
goto fail_add_mhi_q6_dep;
}
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
@@ -1910,10 +1966,10 @@ int ipa3_mhi_start(struct ipa_mhi_start_params *params)
fail_init_engine:
ipa3_mhi_release_prod();
fail_request_prod:
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
fail_add_q6_mhi_dep:
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
fail_add_mhi_q6_dep:
ipa3_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
@@ -2021,7 +2077,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
&channel->state, channel->channel_context_addr +
offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(channel->state));
+ sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
if (res) {
IPAERR("ipa_mhi_read_write_host failed\n");
return res;
@@ -2200,11 +2256,11 @@ static int ipa3_mhi_suspend_ul_channels(void)
IPA_MHI_FUNC_EXIT();
return 0;
}
-
static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected)
{
int i;
int res;
+ struct ipa3_mhi_channel_ctx *channel;
IPA_MHI_FUNC_ENTRY();
for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
@@ -2213,16 +2269,30 @@ static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected)
if (ipa3_mhi_ctx->ul_channels[i].state !=
IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
continue;
- IPA_MHI_DBG("resuming channel %d\n",
- ipa3_mhi_ctx->ul_channels[i].id);
+ channel = &ipa3_mhi_ctx->ul_channels[i];
+ IPA_MHI_DBG("resuming channel %d\n", channel->id);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- res = gsi_start_channel(
- ipa3_mhi_ctx->ul_channels[i].ep->gsi_chan_hdl);
- else
- res = ipa3_uc_mhi_resume_channel(
- ipa3_mhi_ctx->ul_channels[i].index,
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ if (channel->brstmode_enabled &&
+ !LPTransitionRejected) {
+ /*
+ * set polling mode bit to DB mode before
+ * resuming the channel
+ */
+ res = gsi_write_channel_scratch(
+ channel->ep->gsi_chan_hdl,
+ channel->ch_scratch);
+ if (res) {
+ IPA_MHI_ERR("write ch scratch fail %d\n"
+ , res);
+ return res;
+ }
+ }
+ res = gsi_start_channel(channel->ep->gsi_chan_hdl);
+ } else {
+ res = ipa3_uc_mhi_resume_channel(channel->index,
LPTransitionRejected);
+ }
if (res) {
IPA_MHI_ERR("failed to resume channel %d error %d\n",
@@ -2230,9 +2300,8 @@ static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected)
return res;
}
- ipa3_mhi_ctx->ul_channels[i].stop_in_proc = false;
- ipa3_mhi_ctx->ul_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_RUN;
+ channel->stop_in_proc = false;
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
}
IPA_MHI_FUNC_EXIT();
@@ -2306,6 +2375,7 @@ static int ipa3_mhi_resume_dl_channels(bool LPTransitionRejected)
{
int i;
int res;
+ struct ipa3_mhi_channel_ctx *channel;
IPA_MHI_FUNC_ENTRY();
for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
@@ -2314,23 +2384,37 @@ static int ipa3_mhi_resume_dl_channels(bool LPTransitionRejected)
if (ipa3_mhi_ctx->dl_channels[i].state !=
IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
continue;
- IPA_MHI_DBG("resuming channel %d\n",
- ipa3_mhi_ctx->dl_channels[i].id);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- res = gsi_start_channel(
- ipa3_mhi_ctx->dl_channels[i].ep->gsi_chan_hdl);
- else
- res = ipa3_uc_mhi_resume_channel(
- ipa3_mhi_ctx->dl_channels[i].index,
+ channel = &ipa3_mhi_ctx->dl_channels[i];
+ IPA_MHI_DBG("resuming channel %d\n", channel->id);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ if (channel->brstmode_enabled &&
+ !LPTransitionRejected) {
+ /*
+ * set polling mode bit to DB mode before
+ * resuming the channel
+ */
+ res = gsi_write_channel_scratch(
+ channel->ep->gsi_chan_hdl,
+ channel->ch_scratch);
+ if (res) {
+ IPA_MHI_ERR("write ch scratch fail %d\n"
+ , res);
+ return res;
+ }
+ }
+ res = gsi_start_channel(channel->ep->gsi_chan_hdl);
+ } else {
+ res = ipa3_uc_mhi_resume_channel(channel->index,
LPTransitionRejected);
+ }
if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+ IPA_MHI_ERR("failed to resume channel %d error %d\n",
i, res);
return res;
}
- ipa3_mhi_ctx->dl_channels[i].stop_in_proc = false;
- ipa3_mhi_ctx->dl_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_RUN;
+ channel->stop_in_proc = false;
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
}
IPA_MHI_FUNC_EXIT();
@@ -2439,7 +2523,7 @@ static void ipa3_mhi_update_host_ch_state(bool update_rp)
res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
&channel->state, channel->channel_context_addr +
offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(channel->state));
+ sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
if (res) {
IPAERR("ipa_mhi_read_write_host failed\n");
BUG();
@@ -2476,7 +2560,7 @@ static void ipa3_mhi_update_host_ch_state(bool update_rp)
res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
&channel->state, channel->channel_context_addr +
offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(channel->state));
+ sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
if (res) {
IPAERR("ipa_mhi_read_write_host failed\n");
BUG();
@@ -2567,6 +2651,16 @@ int ipa3_mhi_suspend(bool force)
empty = ipa3_mhi_wait_for_ul_empty_timeout(
IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
IPADBG("empty=%d\n", empty);
+ if (!empty && ipa3_ctx->transport_prototype
+ == IPA_TRANSPORT_TYPE_GSI) {
+ IPA_MHI_ERR("Failed to suspend UL channels\n");
+ if (ipa3_mhi_ctx->test_mode) {
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+
+ BUG();
+ }
} else {
IPA_MHI_DBG("IPA not empty\n");
res = -EAGAIN;
@@ -2672,6 +2766,14 @@ fail_release_prod:
fail_suspend_ul_channel:
ipa3_mhi_resume_ul_channels(true);
ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (force_clear) {
+ if (ipa3_mhi_disable_force_clear(ipa3_mhi_ctx->qmi_req_id)) {
+ IPA_MHI_ERR("failed to disable force clear\n");
+ BUG();
+ }
+ IPA_MHI_DBG("force clear datapath disabled\n");
+ ipa3_mhi_ctx->qmi_req_id++;
+ }
return res;
}
@@ -2712,7 +2814,7 @@ int ipa3_mhi_resume(void)
}
dl_channel_resumed = true;
- ipa3_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
}
@@ -2731,7 +2833,7 @@ int ipa3_mhi_resume(void)
}
if (!dl_channel_resumed) {
- res = ipa3_mhi_resume_dl_channels(true);
+ res = ipa3_mhi_resume_dl_channels(false);
if (res) {
IPA_MHI_ERR("ipa3_mhi_resume_dl_channels failed %d\n",
res);
@@ -2871,7 +2973,7 @@ void ipa3_mhi_destroy(void)
IPA_MHI_SUSPEND_SLEEP_MAX);
IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
- res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
if (res) {
IPAERR("Error deleting dependency %d->%d, res=%d\n",
@@ -2879,7 +2981,7 @@ void ipa3_mhi_destroy(void)
goto fail;
}
IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
- res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res) {
IPAERR("Error deleting dependency %d->%d, res=%d\n",
@@ -2888,14 +2990,14 @@ void ipa3_mhi_destroy(void)
}
}
- res = ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
if (res) {
IPAERR("Error deleting resource %d, res=%d\n",
IPA_RM_RESOURCE_MHI_PROD, res);
goto fail;
}
- res = ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
if (res) {
IPAERR("Error deleting resource %d, res=%d\n",
IPA_RM_RESOURCE_MHI_CONS, res);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c
deleted file mode 100644
index 662d2699a39a..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c
+++ /dev/null
@@ -1,1039 +0,0 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/ipa.h>
-#include "ipa_i.h"
-#include "ipa_rm_dependency_graph.h"
-#include "ipa_rm_i.h"
-
-static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
- __stringify(IPA_RM_RESOURCE_Q6_PROD),
- __stringify(IPA_RM_RESOURCE_USB_PROD),
- __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
- __stringify(IPA_RM_RESOURCE_HSIC_PROD),
- __stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
- __stringify(IPA_RM_RESOURCE_RNDIS_PROD),
- __stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
- __stringify(IPA_RM_RESOURCE_WLAN_PROD),
- __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
- __stringify(IPA_RM_RESOURCE_MHI_PROD),
- __stringify(IPA_RM_RESOURCE_Q6_CONS),
- __stringify(IPA_RM_RESOURCE_USB_CONS),
- __stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
- __stringify(IPA_RM_RESOURCE_HSIC_CONS),
- __stringify(IPA_RM_RESOURCE_WLAN_CONS),
- __stringify(IPA_RM_RESOURCE_APPS_CONS),
- __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
- __stringify(IPA_RM_RESOURCE_MHI_CONS),
-};
-
-struct ipa3_rm_profile_vote_type {
- enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX];
- enum ipa_voltage_level curr_volt;
- u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX];
- u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX];
- u32 curr_bw;
-};
-
-struct ipa3_rm_context_type {
- struct ipa3_rm_dep_graph *dep_graph;
- struct workqueue_struct *ipa_rm_wq;
- spinlock_t ipa_rm_lock;
- struct ipa3_rm_profile_vote_type prof_vote;
-};
-static struct ipa3_rm_context_type *ipa3_rm_ctx;
-
-struct ipa3_rm_notify_ipa_work_type {
- struct work_struct work;
- enum ipa_voltage_level volt;
- u32 bandwidth_mbps;
-};
-
-/**
- * ipa3_rm_create_resource() - create resource
- * @create_params: [in] parameters needed
- * for resource initialization
- *
- * Returns: 0 on success, negative on failure
- *
- * This function is called by IPA RM client to initialize client's resources.
- * This API should be called before any other IPA RM API on a given resource
- * name.
- *
- */
-int ipa3_rm_create_resource(struct ipa_rm_create_params *create_params)
-{
- struct ipa_rm_resource *resource;
- unsigned long flags;
- int result;
-
- if (!create_params) {
- IPA_RM_ERR("invalid args\n");
- return -EINVAL;
- }
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(create_params->name));
-
- if (create_params->floor_voltage < 0 ||
- create_params->floor_voltage >= IPA_VOLTAGE_MAX) {
- IPA_RM_ERR("invalid voltage %d\n",
- create_params->floor_voltage);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- create_params->name,
- &resource) == 0) {
- IPA_RM_ERR("resource already exists\n");
- result = -EEXIST;
- goto bail;
- }
- result = ipa3_rm_resource_create(create_params,
- &resource);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_create() failed\n");
- goto bail;
- }
- result = ipa3_rm_dep_graph_add(ipa3_rm_ctx->dep_graph, resource);
- if (result) {
- IPA_RM_ERR("ipa3_rm_dep_graph_add() failed\n");
- ipa3_rm_resource_delete(resource);
- goto bail;
- }
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_delete_resource() - delete resource
- * @resource_name: name of resource to be deleted
- *
- * Returns: 0 on success, negative on failure
- *
- * This function is called by IPA RM client to delete client's resources.
- *
- */
-int ipa3_rm_delete_resource(enum ipa_rm_resource_name resource_name)
-{
- struct ipa_rm_resource *resource;
- unsigned long flags;
- int result;
-
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exist\n");
- result = -EINVAL;
- goto bail;
- }
- result = ipa3_rm_resource_delete(resource);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_delete() failed\n");
- goto bail;
- }
- result = ipa3_rm_dep_graph_remove(ipa3_rm_ctx->dep_graph,
- resource_name);
- if (result) {
- IPA_RM_ERR("ipa3_rm_dep_graph_remove() failed\n");
- goto bail;
- }
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_add_dependency() - create dependency
- * between 2 resources
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: IPA_RM_RESORCE_GRANTED could be generated
- * in case client registered with IPA RM
- */
-int ipa3_rm_add_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- unsigned long flags;
- int result;
-
- IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name),
- ipa3_rm_resource_str(depends_on_name));
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- result = ipa3_rm_dep_graph_add_dependency(
- ipa3_rm_ctx->dep_graph,
- resource_name,
- depends_on_name);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
- * in a synchronized fashion. In case a producer resource is in GRANTED state
- * and the newly added consumer resource is in RELEASED state, the consumer
- * entity will be requested and the function will block until the consumer
- * is granted.
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: May block. See documentation above.
- */
-int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- int result;
- struct ipa_rm_resource *consumer;
- unsigned long time;
- unsigned long flags;
-
- IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name),
- ipa3_rm_resource_str(depends_on_name));
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- result = ipa3_rm_dep_graph_add_dependency(
- ipa3_rm_ctx->dep_graph,
- resource_name,
- depends_on_name);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (result == -EINPROGRESS) {
- ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- depends_on_name,
- &consumer);
- IPA_RM_DBG("%s waits for GRANT of %s.\n",
- ipa3_rm_resource_str(resource_name),
- ipa3_rm_resource_str(depends_on_name));
- time = wait_for_completion_timeout(
- &((struct ipa3_rm_resource_cons *)consumer)->
- request_consumer_in_progress,
- HZ);
- result = 0;
- if (!time) {
- IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
- ipa3_rm_resource_str(depends_on_name));
- result = -ETIME;
- }
- IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
- ipa3_rm_resource_str(resource_name),
- ipa3_rm_resource_str(depends_on_name),
- time);
- }
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_delete_dependency() - create dependency
- * between 2 resources
- * @resource_name: name of dependent resource
- * @depends_on_name: name of its dependency
- *
- * Returns: 0 on success, negative on failure
- *
- * Side effects: IPA_RM_RESORCE_GRANTED could be generated
- * in case client registered with IPA RM
- */
-int ipa3_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- unsigned long flags;
- int result;
-
- IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name),
- ipa3_rm_resource_str(depends_on_name));
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- result = ipa3_rm_dep_graph_delete_dependency(
- ipa3_rm_ctx->dep_graph,
- resource_name,
- depends_on_name);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_request_resource() - request resource
- * @resource_name: [in] name of the requested resource
- *
- * Returns: 0 on success, negative on failure
- *
- * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
- * on successful completion of this operation.
- */
-int ipa3_rm_request_resource(enum ipa_rm_resource_name resource_name)
-{
- struct ipa_rm_resource *resource;
- unsigned long flags;
- int result;
-
- if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
- IPA_RM_ERR("can be called on PROD only\n");
- return -EINVAL;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_producer_request(
- (struct ipa3_rm_resource_prod *)resource);
-
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-
- return result;
-}
-
-void ipa3_delayed_release_work_func(struct work_struct *work)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- struct ipa3_rm_delayed_release_work_type *rwork = container_of(
- to_delayed_work(work),
- struct ipa3_rm_delayed_release_work_type,
- work);
-
- if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) {
- IPA_RM_ERR("can be called on CONS only\n");
- kfree(rwork);
- return;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- rwork->resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- goto bail;
- }
-
- ipa3_rm_resource_consumer_release(
- (struct ipa3_rm_resource_cons *)resource, rwork->needed_bw,
- rwork->dec_usage_count);
-
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- kfree(rwork);
-
-}
-
-/**
- * ipa3_rm_request_resource_with_timer() - requests the specified consumer
- * resource and releases it after 1 second
- * @resource_name: name of the requested resource
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- struct ipa3_rm_delayed_release_work_type *release_work;
- int result;
-
- if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
- IPA_RM_ERR("can be called on CONS only\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_consumer_request(
- (struct ipa3_rm_resource_cons *)resource, 0, false, true);
- if (result != 0 && result != -EINPROGRESS) {
- IPA_RM_ERR("consumer request returned error %d\n", result);
- result = -EPERM;
- goto bail;
- }
-
- release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
- if (!release_work) {
- result = -ENOMEM;
- goto bail;
- }
- release_work->resource_name = resource->name;
- release_work->needed_bw = 0;
- release_work->dec_usage_count = false;
- INIT_DELAYED_WORK(&release_work->work, ipa3_delayed_release_work_func);
- schedule_delayed_work(&release_work->work,
- msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC));
- result = 0;
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-
- return result;
-}
-
-/**
- * ipa3_rm_release_resource() - release resource
- * @resource_name: [in] name of the requested resource
- *
- * Returns: 0 on success, negative on failure
- *
- * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
- * on successful completion of this operation.
- */
-int ipa3_rm_release_resource(enum ipa_rm_resource_name resource_name)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- int result;
-
- if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
- IPA_RM_ERR("can be called on PROD only\n");
- return -EINVAL;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_producer_release(
- (struct ipa3_rm_resource_prod *)resource);
-
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-
- return result;
-}
-
-/**
- * ipa3_rm_register() - register for event
- * @resource_name: resource name
- * @reg_params: [in] registration parameters
- *
- * Returns: 0 on success, negative on failure
- *
- * Registration parameters provided here should be the same
- * as provided later in ipa3_rm_deregister() call.
- */
-int ipa3_rm_register(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params)
-{
- int result;
- unsigned long flags;
- struct ipa_rm_resource *resource;
-
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
-
- if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
- IPA_RM_ERR("can be called on PROD only\n");
- return -EINVAL;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_producer_register(
- (struct ipa3_rm_resource_prod *)resource,
- reg_params,
- true);
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_deregister() - cancel the registration
- * @resource_name: resource name
- * @reg_params: [in] registration parameters
- *
- * Returns: 0 on success, negative on failure
- *
- * Registration parameters provided here should be the same
- * as provided in ipa3_rm_register() call.
- */
-int ipa3_rm_deregister(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_register_params *reg_params)
-{
- int result;
- unsigned long flags;
- struct ipa_rm_resource *resource;
-
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
-
- if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
- IPA_RM_ERR("can be called on PROD only\n");
- return -EINVAL;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_producer_deregister(
- (struct ipa3_rm_resource_prod *)resource,
- reg_params);
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_set_perf_profile() - set performance profile
- * @resource_name: resource name
- * @profile: [in] profile information.
- *
- * Returns: 0 on success, negative on failure
- *
- * Set resource performance profile.
- * Updates IPA driver if performance level changed.
- */
-int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
- struct ipa_rm_perf_profile *profile)
-{
- int result;
- unsigned long flags;
- struct ipa_rm_resource *resource;
-
- IPADBG("resource: %s ", ipa3_rm_resource_str(resource_name));
- if (profile)
- IPADBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
-
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- result = -EPERM;
- goto bail;
- }
- result = ipa3_rm_resource_set_perf_profile(resource, profile);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_set_perf_profile failed %d\n",
- result);
- goto bail;
- }
-
- result = 0;
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_notify_completion() -
- * consumer driver notification for
- * request_resource / release_resource operations
- * completion
- * @event: notified event
- * @resource_name: resource name
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_notify_completion(enum ipa_rm_event event,
- enum ipa_rm_resource_name resource_name)
-{
- int result;
-
- IPA_RM_DBG("event %d on %s\n", event,
- ipa3_rm_resource_str(resource_name));
- if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
- IPA_RM_ERR("can be called on CONS only\n");
- result = -EINVAL;
- goto bail;
- }
- ipa3_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
- resource_name,
- event,
- false);
- result = 0;
-bail:
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-static void ipa3_rm_wq_handler(struct work_struct *work)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- struct ipa3_rm_wq_work_type *ipa_rm_work =
- container_of(work,
- struct ipa3_rm_wq_work_type,
- work);
- IPA_RM_DBG("%s cmd=%d event=%d notify_registered_only=%d\n",
- ipa3_rm_resource_str(ipa_rm_work->resource_name),
- ipa_rm_work->wq_cmd,
- ipa_rm_work->event,
- ipa_rm_work->notify_registered_only);
- switch (ipa_rm_work->wq_cmd) {
- case IPA_RM_WQ_NOTIFY_PROD:
- if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) {
- IPA_RM_ERR("resource is not PROD\n");
- goto free_work;
- }
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- ipa_rm_work->resource_name,
- &resource) != 0){
- IPA_RM_ERR("resource does not exists\n");
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock,
- flags);
- goto free_work;
- }
- ipa3_rm_resource_producer_notify_clients(
- (struct ipa3_rm_resource_prod *)resource,
- ipa_rm_work->event,
- ipa_rm_work->notify_registered_only);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- break;
- case IPA_RM_WQ_NOTIFY_CONS:
- break;
- case IPA_RM_WQ_RESOURCE_CB:
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- ipa_rm_work->resource_name,
- &resource) != 0){
- IPA_RM_ERR("resource does not exists\n");
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock,
- flags);
- goto free_work;
- }
- ipa3_rm_resource_consumer_handle_cb(
- (struct ipa3_rm_resource_cons *)resource,
- ipa_rm_work->event);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- break;
- default:
- break;
- }
-
-free_work:
- kfree((void *) work);
-}
-
-static void ipa3_rm_wq_resume_handler(struct work_struct *work)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- struct ipa3_rm_wq_suspend_resume_work_type *ipa_rm_work =
- container_of(work,
- struct ipa3_rm_wq_suspend_resume_work_type,
- work);
- IPA_RM_DBG("resume work handler: %s",
- ipa3_rm_resource_str(ipa_rm_work->resource_name));
-
- if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
- IPA_RM_ERR("resource is not CONS\n");
- return;
- }
- IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa3_rm_resource_str(
- ipa_rm_work->resource_name));
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- ipa_rm_work->resource_name,
- &resource) != 0){
- IPA_RM_ERR("resource does not exists\n");
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(
- ipa_rm_work->resource_name));
- goto bail;
- }
- ipa3_rm_resource_consumer_request_work(
- (struct ipa3_rm_resource_cons *)resource,
- ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-bail:
- kfree(ipa_rm_work);
-}
-
-
-static void ipa3_rm_wq_suspend_handler(struct work_struct *work)
-{
- unsigned long flags;
- struct ipa_rm_resource *resource;
- struct ipa3_rm_wq_suspend_resume_work_type *ipa_rm_work =
- container_of(work,
- struct ipa3_rm_wq_suspend_resume_work_type,
- work);
- IPA_RM_DBG("suspend work handler: %s",
- ipa3_rm_resource_str(ipa_rm_work->resource_name));
-
- if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
- IPA_RM_ERR("resource is not CONS\n");
- return;
- }
- ipa3_suspend_resource_sync(ipa_rm_work->resource_name);
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- ipa_rm_work->resource_name,
- &resource) != 0){
- IPA_RM_ERR("resource does not exists\n");
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- return;
- }
- ipa3_rm_resource_consumer_release_work(
- (struct ipa3_rm_resource_cons *)resource,
- ipa_rm_work->prev_state,
- true);
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-
- kfree(ipa_rm_work);
-}
-
-/**
- * ipa3_rm_wq_send_cmd() - send a command for deferred work
- * @wq_cmd: command that should be executed
- * @resource_name: resource on which command should be executed
- * @notify_registered_only: notify only clients registered by
- * ipa3_rm_register()
- *
- * Returns: 0 on success, negative otherwise
- */
-int ipa3_rm_wq_send_cmd(enum ipa3_rm_wq_cmd wq_cmd,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_event event,
- bool notify_registered_only)
-{
- int result = -ENOMEM;
- struct ipa3_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC);
-
- if (work) {
- INIT_WORK((struct work_struct *)work, ipa3_rm_wq_handler);
- work->wq_cmd = wq_cmd;
- work->resource_name = resource_name;
- work->event = event;
- work->notify_registered_only = notify_registered_only;
- result = queue_work(ipa3_rm_ctx->ipa_rm_wq,
- (struct work_struct *)work);
- } else {
- IPA_RM_ERR("no mem\n");
- }
-
- return result;
-}
-
-int ipa3_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
- enum ipa3_rm_resource_state prev_state,
- u32 needed_bw)
-{
- int result = -ENOMEM;
- struct ipa3_rm_wq_suspend_resume_work_type *work =
- kzalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *)work,
- ipa3_rm_wq_suspend_handler);
- work->resource_name = resource_name;
- work->prev_state = prev_state;
- work->needed_bw = needed_bw;
- result = queue_work(ipa3_rm_ctx->ipa_rm_wq,
- (struct work_struct *)work);
- } else {
- IPA_RM_ERR("no mem\n");
- }
-
- return result;
-}
-
-int ipa3_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
- enum ipa3_rm_resource_state prev_state,
- u32 needed_bw)
-{
- int result = -ENOMEM;
- struct ipa3_rm_wq_suspend_resume_work_type *work =
- kzalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *)work,
- ipa3_rm_wq_resume_handler);
- work->resource_name = resource_name;
- work->prev_state = prev_state;
- work->needed_bw = needed_bw;
- result = queue_work(ipa3_rm_ctx->ipa_rm_wq,
- (struct work_struct *)work);
- } else {
- IPA_RM_ERR("no mem\n");
- }
-
- return result;
-}
-/**
- * ipa3_rm_initialize() - initialize IPA RM component
- *
- * Returns: 0 on success, negative otherwise
- */
-int ipa3_rm_initialize(void)
-{
- int result;
-
- ipa3_rm_ctx = kzalloc(sizeof(*ipa3_rm_ctx), GFP_KERNEL);
- if (!ipa3_rm_ctx) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
- ipa3_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
- if (!ipa3_rm_ctx->ipa_rm_wq) {
- IPA_RM_ERR("create workqueue failed\n");
- result = -ENOMEM;
- goto create_wq_fail;
- }
- result = ipa3_rm_dep_graph_create(&(ipa3_rm_ctx->dep_graph));
- if (result) {
- IPA_RM_ERR("create dependency graph failed\n");
- goto graph_alloc_fail;
- }
- spin_lock_init(&ipa3_rm_ctx->ipa_rm_lock);
- IPA_RM_DBG("SUCCESS\n");
-
- return 0;
-graph_alloc_fail:
- destroy_workqueue(ipa3_rm_ctx->ipa_rm_wq);
-create_wq_fail:
- kfree(ipa3_rm_ctx);
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_stat() - print RM stat
- * @buf: [in] The user buff used to print
- * @size: [in] The size of buf
- * Returns: number of bytes used on success, negative on failure
- *
- * This function is called by ipa_debugfs in order to receive
- * a full picture of the current state of the RM
- */
-
-int ipa3_rm_stat(char *buf, int size)
-{
- unsigned long flags;
- int i, cnt = 0, result = EINVAL;
- struct ipa_rm_resource *resource = NULL;
-
- if (!buf || size < 0)
- return result;
-
- spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
- for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) {
- result = ipa3_rm_dep_graph_get_resource(
- ipa3_rm_ctx->dep_graph,
- i,
- &resource);
- if (!result) {
- result = ipa3_rm_resource_producer_print_stat(
- resource, buf + cnt,
- size-cnt);
- if (result < 0)
- goto bail;
- cnt += result;
- }
- }
- result = cnt;
-bail:
- spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
-
- return result;
-}
-
-/**
- * ipa3_rm_resource_str() - returns string that represent the resource
- * @resource_name: [in] resource name
- */
-const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name)
-{
- if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX)
- return "INVALID RESOURCE";
-
- return ipa3_resource_name_to_str[resource_name];
-};
-
-static void ipa3_rm_perf_profile_notify_to_ipa_work(struct work_struct *work)
-{
- struct ipa3_rm_notify_ipa_work_type *notify_work = container_of(work,
- struct ipa3_rm_notify_ipa_work_type,
- work);
- int res;
-
- IPA_RM_DBG("calling to IPA driver. voltage %d bandwidth %d\n",
- notify_work->volt, notify_work->bandwidth_mbps);
-
- res = ipa3_set_required_perf_profile(notify_work->volt,
- notify_work->bandwidth_mbps);
- if (res) {
- IPA_RM_ERR("ipa3_set_required_perf_profile failed %d\n", res);
- goto bail;
- }
-
- IPA_RM_DBG("IPA driver notified\n");
-bail:
- kfree(notify_work);
-}
-
-static void ipa3_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt,
- u32 bandwidth)
-{
- struct ipa3_rm_notify_ipa_work_type *work;
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- IPA_RM_ERR("no mem\n");
- return;
- }
-
- INIT_WORK(&work->work, ipa3_rm_perf_profile_notify_to_ipa_work);
- work->volt = volt;
- work->bandwidth_mbps = bandwidth;
- queue_work(ipa3_rm_ctx->ipa_rm_wq, &work->work);
-}
-
-/**
- * ipa3_rm_perf_profile_change() - change performance profile vote for resource
- * @resource_name: [in] resource name
- *
- * change bandwidth and voltage vote based on resource state.
- */
-void ipa3_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
-{
- enum ipa_voltage_level old_volt;
- u32 *bw_ptr;
- u32 old_bw;
- struct ipa_rm_resource *resource;
- int i;
- u32 sum_bw_prod = 0;
- u32 sum_bw_cons = 0;
-
- IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
-
- if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
- resource_name,
- &resource) != 0) {
- IPA_RM_ERR("resource does not exists\n");
- WARN_ON(1);
- return;
- }
-
- old_volt = ipa3_rm_ctx->prof_vote.curr_volt;
- old_bw = ipa3_rm_ctx->prof_vote.curr_bw;
-
- if (IPA_RM_RESORCE_IS_PROD(resource_name)) {
- bw_ptr = &ipa3_rm_ctx->prof_vote.bw_prods[resource_name];
- } else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
- bw_ptr = &ipa3_rm_ctx->prof_vote.bw_cons[
- resource_name - IPA_RM_RESOURCE_PROD_MAX];
- } else {
- IPAERR("Invalid resource_name\n");
- return;
- }
-
- switch (resource->state) {
- case IPA_RM_GRANTED:
- case IPA_RM_REQUEST_IN_PROGRESS:
- IPA_RM_DBG("max_bw = %d, needed_bw = %d\n",
- resource->max_bw, resource->needed_bw);
- *bw_ptr = min(resource->max_bw, resource->needed_bw);
- ipa3_rm_ctx->prof_vote.volt[resource_name] =
- resource->floor_voltage;
- break;
-
- case IPA_RM_RELEASE_IN_PROGRESS:
- case IPA_RM_RELEASED:
- *bw_ptr = 0;
- ipa3_rm_ctx->prof_vote.volt[resource_name] = 0;
- break;
-
- default:
- IPA_RM_ERR("unknown state %d\n", resource->state);
- WARN_ON(1);
- return;
- }
- IPA_RM_DBG("resource bandwidth: %d voltage: %d\n", *bw_ptr,
- resource->floor_voltage);
-
- ipa3_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED;
- for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
- if (ipa3_rm_ctx->prof_vote.volt[i] >
- ipa3_rm_ctx->prof_vote.curr_volt) {
- ipa3_rm_ctx->prof_vote.curr_volt =
- ipa3_rm_ctx->prof_vote.volt[i];
- }
- }
-
- for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
- sum_bw_prod += ipa3_rm_ctx->prof_vote.bw_prods[i];
-
- for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
- sum_bw_cons += ipa3_rm_ctx->prof_vote.bw_cons[i];
-
- IPA_RM_DBG("all prod bandwidth: %d all cons bandwidth: %d\n",
- sum_bw_prod, sum_bw_cons);
- ipa3_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons);
-
- if (ipa3_rm_ctx->prof_vote.curr_volt == old_volt &&
- ipa3_rm_ctx->prof_vote.curr_bw == old_bw) {
- IPA_RM_DBG("same voting\n");
- return;
- }
-
- IPA_RM_DBG("new voting: voltage %d bandwidth %d\n",
- ipa3_rm_ctx->prof_vote.curr_volt,
- ipa3_rm_ctx->prof_vote.curr_bw);
-
- ipa3_rm_perf_profile_notify_to_ipa(ipa3_rm_ctx->prof_vote.curr_volt,
- ipa3_rm_ctx->prof_vote.curr_bw);
-
- return;
-};
-
-/**
- * ipa3_rm_exit() - free all IPA RM resources
- */
-void ipa3_rm_exit(void)
-{
- IPA_RM_DBG("ENTER\n");
- ipa3_rm_dep_graph_delete(ipa3_rm_ctx->dep_graph);
- destroy_workqueue(ipa3_rm_ctx->ipa_rm_wq);
- kfree(ipa3_rm_ctx);
- ipa3_rm_ctx = NULL;
- IPA_RM_DBG("EXIT\n");
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c
deleted file mode 100644
index dabb9a63b39f..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/slab.h>
-#include "ipa_rm_dependency_graph.h"
-#include "ipa_rm_i.h"
-
-static int ipa3_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
-{
- int resource_index = IPA_RM_INDEX_INVALID;
-
- if (IPA_RM_RESORCE_IS_PROD(resource_name))
- resource_index = ipa3_rm_prod_index(resource_name);
- else if (IPA_RM_RESORCE_IS_CONS(resource_name))
- resource_index = ipa3_rm_cons_index(resource_name);
-
- return resource_index;
-}
-
-/**
- * ipa3_rm_dep_graph_create() - creates graph
- * @dep_graph: [out] created dependency graph
- *
- * Returns: dependency graph on success, NULL on failure
- */
-int ipa3_rm_dep_graph_create(struct ipa3_rm_dep_graph **dep_graph)
-{
- int result = 0;
-
- *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
- if (!*dep_graph) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_dep_graph_delete() - destroyes the graph
- * @graph: [in] dependency graph
- *
- * Frees all resources.
- */
-void ipa3_rm_dep_graph_delete(struct ipa3_rm_dep_graph *graph)
-{
- int resource_index;
-
- if (!graph) {
- IPA_RM_ERR("invalid params\n");
- return;
- }
- for (resource_index = 0;
- resource_index < IPA_RM_RESOURCE_MAX;
- resource_index++)
- kfree(graph->resource_table[resource_index]);
- memset(graph->resource_table, 0, sizeof(graph->resource_table));
-}
-
-/**
- * ipa3_rm_dep_graph_get_resource() - provides a resource by name
- * @graph: [in] dependency graph
- * @name: [in] name of the resource
- * @resource: [out] resource in case of success
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_dep_graph_get_resource(
- struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name,
- struct ipa_rm_resource **resource)
-{
- int result;
- int resource_index;
-
- if (!graph) {
- result = -EINVAL;
- goto bail;
- }
- resource_index = ipa3_rm_dep_get_index(resource_name);
- if (resource_index == IPA_RM_INDEX_INVALID) {
- result = -EINVAL;
- goto bail;
- }
- *resource = graph->resource_table[resource_index];
- if (!*resource) {
- result = -EINVAL;
- goto bail;
- }
- result = 0;
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_dep_graph_add() - adds resource to graph
- * @graph: [in] dependency graph
- * @resource: [in] resource to add
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_dep_graph_add(struct ipa3_rm_dep_graph *graph,
- struct ipa_rm_resource *resource)
-{
- int result = 0;
- int resource_index;
-
- if (!graph || !resource) {
- result = -EINVAL;
- goto bail;
- }
- resource_index = ipa3_rm_dep_get_index(resource->name);
- if (resource_index == IPA_RM_INDEX_INVALID) {
- result = -EINVAL;
- goto bail;
- }
- graph->resource_table[resource_index] = resource;
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_dep_graph_remove() - removes resource from graph
- * @graph: [in] dependency graph
- * @resource: [in] resource to add
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_dep_graph_remove(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name)
-{
- if (!graph)
- return -EINVAL;
- graph->resource_table[resource_name] = NULL;
-
- return 0;
-}
-
-/**
- * ipa3_rm_dep_graph_add_dependency() - adds dependency between
- * two nodes in graph
- * @graph: [in] dependency graph
- * @resource_name: [in] resource to add
- * @depends_on_name: [in] resource to add
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_dep_graph_add_dependency(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- struct ipa_rm_resource *dependent = NULL;
- struct ipa_rm_resource *dependency = NULL;
- int result;
-
- if (!graph ||
- !IPA_RM_RESORCE_IS_PROD(resource_name) ||
- !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
- IPA_RM_ERR("invalid params\n");
- result = -EINVAL;
- goto bail;
- }
- if (ipa3_rm_dep_graph_get_resource(graph,
- resource_name,
- &dependent)) {
- IPA_RM_ERR("%s does not exist\n",
- ipa3_rm_resource_str(resource_name));
- result = -EINVAL;
- goto bail;
- }
- if (ipa3_rm_dep_graph_get_resource(graph,
- depends_on_name,
- &dependency)) {
- IPA_RM_ERR("%s does not exist\n",
- ipa3_rm_resource_str(depends_on_name));
- result = -EINVAL;
- goto bail;
- }
- result = ipa3_rm_resource_add_dependency(dependent, dependency);
-bail:
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_dep_graph_delete_dependency() - deleted dependency between
- * two nodes in graph
- * @graph: [in] dependency graph
- * @resource_name: [in] resource to delete
- * @depends_on_name: [in] resource to delete
- *
- * Returns: 0 on success, negative on failure
- *
- */
-int ipa3_rm_dep_graph_delete_dependency(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name)
-{
- struct ipa_rm_resource *dependent = NULL;
- struct ipa_rm_resource *dependency = NULL;
- int result;
-
- if (!graph ||
- !IPA_RM_RESORCE_IS_PROD(resource_name) ||
- !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
- IPA_RM_ERR("invalid params\n");
- result = -EINVAL;
- goto bail;
- }
-
- if (ipa3_rm_dep_graph_get_resource(graph,
- resource_name,
- &dependent)) {
- IPA_RM_ERR("%s does not exist\n",
- ipa3_rm_resource_str(resource_name));
- result = -EINVAL;
- goto bail;
- }
-
- if (ipa3_rm_dep_graph_get_resource(graph,
- depends_on_name,
- &dependency)) {
- IPA_RM_ERR("%s does not exist\n",
- ipa3_rm_resource_str(depends_on_name));
- result = -EINVAL;
- goto bail;
- }
-
- result = ipa3_rm_resource_delete_dependency(dependent, dependency);
-bail:
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h
deleted file mode 100644
index 2a68ce91814f..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
-#define _IPA_RM_DEPENDENCY_GRAPH_H_
-
-#include <linux/list.h>
-#include <linux/ipa.h>
-#include "ipa_rm_resource.h"
-
-struct ipa3_rm_dep_graph {
- struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
-};
-
-int ipa3_rm_dep_graph_get_resource(
- struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name name,
- struct ipa_rm_resource **resource);
-
-int ipa3_rm_dep_graph_create(struct ipa3_rm_dep_graph **dep_graph);
-
-void ipa3_rm_dep_graph_delete(struct ipa3_rm_dep_graph *graph);
-
-int ipa3_rm_dep_graph_add(struct ipa3_rm_dep_graph *graph,
- struct ipa_rm_resource *resource);
-
-int ipa3_rm_dep_graph_remove(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_dep_graph_add_dependency(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-int ipa3_rm_dep_graph_delete_dependency(struct ipa3_rm_dep_graph *graph,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_resource_name depends_on_name);
-
-#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h
deleted file mode 100644
index 4650babddaaa..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_RM_I_H_
-#define _IPA_RM_I_H_
-
-#include <linux/workqueue.h>
-#include <linux/ipa.h>
-#include "ipa_rm_resource.h"
-
-#define IPA_RM_DRV_NAME "ipa_rm"
-
-#define IPA_RM_DBG(fmt, args...) \
- pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
-#define IPA_RM_ERR(fmt, args...) \
- pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
-
-#define IPA_RM_RESOURCE_CONS_MAX \
- (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
-#define IPA_RM_RESORCE_IS_PROD(x) \
- (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
-#define IPA_RM_RESORCE_IS_CONS(x) \
- (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
-#define IPA_RM_INDEX_INVALID (-1)
-#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
-
-int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name);
-int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name);
-
-/**
- * struct ipa3_rm_delayed_release_work_type - IPA RM delayed resource release
- * work type
- * @delayed_work: work struct
- * @ipa_rm_resource_name: name of the resource on which this work should be done
- * @needed_bw: bandwidth required for resource in Mbps
- * @dec_usage_count: decrease usage count on release ?
- */
-struct ipa3_rm_delayed_release_work_type {
- struct delayed_work work;
- enum ipa_rm_resource_name resource_name;
- u32 needed_bw;
- bool dec_usage_count;
-
-};
-
-/**
- * enum ipa3_rm_wq_cmd - workqueue commands
- */
-enum ipa3_rm_wq_cmd {
- IPA_RM_WQ_NOTIFY_PROD,
- IPA_RM_WQ_NOTIFY_CONS,
- IPA_RM_WQ_RESOURCE_CB
-};
-
-/**
- * struct ipa3_rm_wq_work_type - IPA RM worqueue specific
- * work type
- * @work: work struct
- * @wq_cmd: command that should be processed in workqueue context
- * @resource_name: name of the resource on which this work
- * should be done
- * @dep_graph: data structure to search for resource if exists
- * @event: event to notify
- * @notify_registered_only: notify only clients registered by
- * ipa3_rm_register()
- */
-struct ipa3_rm_wq_work_type {
- struct work_struct work;
- enum ipa3_rm_wq_cmd wq_cmd;
- enum ipa_rm_resource_name resource_name;
- enum ipa_rm_event event;
- bool notify_registered_only;
-};
-
-/**
- * struct ipa3_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or
- * suspend work type
- * @work: work struct
- * @resource_name: name of the resource on which this work
- * should be done
- * @prev_state:
- * @needed_bw:
- */
-struct ipa3_rm_wq_suspend_resume_work_type {
- struct work_struct work;
- enum ipa_rm_resource_name resource_name;
- enum ipa3_rm_resource_state prev_state;
- u32 needed_bw;
-
-};
-
-int ipa3_rm_wq_send_cmd(enum ipa3_rm_wq_cmd wq_cmd,
- enum ipa_rm_resource_name resource_name,
- enum ipa_rm_event event,
- bool notify_registered_only);
-
-int ipa3_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
- enum ipa3_rm_resource_state prev_state,
- u32 needed_bw);
-
-int ipa3_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
- enum ipa3_rm_resource_state prev_state,
- u32 needed_bw);
-
-int ipa3_rm_initialize(void);
-
-int ipa3_rm_stat(char *buf, int size);
-
-const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name);
-
-void ipa3_rm_perf_profile_change(enum ipa_rm_resource_name resource_name);
-
-int ipa3_rm_request_resource_with_timer(enum ipa_rm_resource_name
- resource_name);
-
-void ipa3_delayed_release_work_func(struct work_struct *work);
-
-void ipa3_rm_exit(void);
-
-#endif /* _IPA_RM_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c
deleted file mode 100644
index cd72b058b00d..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/unistd.h>
-#include <linux/workqueue.h>
-#include <linux/ipa.h>
-#include "ipa_i.h"
-
-/**
- * struct ipa3_rm_it_private - IPA RM Inactivity Timer private
- * data
- * @initied: indicates if instance was initialized
- * @lock - spinlock for mutual exclusion
- * @resource_name - resource name
- * @work: delayed work object for running delayed releas
- * function
- * @resource_requested: boolean flag indicates if resource was requested
- * @reschedule_work: boolean flag indicates to not release and to
- * reschedule the release work.
- * @work_in_progress: boolean flag indicates is release work was scheduled.
- * @jiffies: number of jiffies for timeout
- *
- * WWAN private - holds all relevant info about WWAN driver
- */
-struct ipa3_rm_it_private {
- bool initied;
- enum ipa_rm_resource_name resource_name;
- spinlock_t lock;
- struct delayed_work work;
- bool resource_requested;
- bool reschedule_work;
- bool work_in_progress;
- unsigned long jiffies;
-};
-
-static struct ipa3_rm_it_private ipa3_rm_it_handles[IPA_RM_RESOURCE_MAX];
-
-/**
- * ipa3_rm_inactivity_timer_func() - called when timer expired in
- * the context of the shared workqueue. Checks internally if
- * reschedule_work flag is set. In case it is not set this function calls to
- * ipa_rm_release_resource(). In case reschedule_work is set this function
- * reschedule the work. This flag is cleared cleared when
- * calling to ipa_rm_inactivity_timer_release_resource().
- *
- * @work: work object provided by the work queue
- *
- * Return codes:
- * None
- */
-static void ipa3_rm_inactivity_timer_func(struct work_struct *work)
-{
-
- struct ipa3_rm_it_private *me = container_of(to_delayed_work(work),
- struct ipa3_rm_it_private,
- work);
- unsigned long flags;
-
- IPADBG_LOW("%s: timer expired for resource %d!\n", __func__,
- me->resource_name);
-
- spin_lock_irqsave(
- &ipa3_rm_it_handles[me->resource_name].lock, flags);
- if (ipa3_rm_it_handles[me->resource_name].reschedule_work) {
- IPADBG_LOW("%s: setting delayed work\n", __func__);
- ipa3_rm_it_handles[me->resource_name].reschedule_work = false;
- schedule_delayed_work(
- &ipa3_rm_it_handles[me->resource_name].work,
- ipa3_rm_it_handles[me->resource_name].jiffies);
- } else if (ipa3_rm_it_handles[me->resource_name].resource_requested) {
- IPADBG_LOW("%s: not calling release\n", __func__);
- ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
- } else {
- IPADBG_LOW("%s: calling release_resource on resource %d!\n",
- __func__, me->resource_name);
- ipa3_rm_release_resource(me->resource_name);
- ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
- }
- spin_unlock_irqrestore(
- &ipa3_rm_it_handles[me->resource_name].lock, flags);
-}
-
-/**
-* ipa3_rm_inactivity_timer_init() - Init function for IPA RM
-* inactivity timer. This function shall be called prior calling
-* any other API of IPA RM inactivity timer.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-* @msecs: time in miliseccond, that IPA RM inactivity timer
-* shall wait prior calling to ipa3_rm_release_resource().
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
- unsigned long msecs)
-{
- IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
-
- if (resource_name < 0 ||
- resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (ipa3_rm_it_handles[resource_name].initied) {
- IPAERR("%s: resource %d already inited\n",
- __func__, resource_name);
- return -EINVAL;
- }
-
- spin_lock_init(&ipa3_rm_it_handles[resource_name].lock);
- ipa3_rm_it_handles[resource_name].resource_name = resource_name;
- ipa3_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
- ipa3_rm_it_handles[resource_name].resource_requested = false;
- ipa3_rm_it_handles[resource_name].reschedule_work = false;
- ipa3_rm_it_handles[resource_name].work_in_progress = false;
-
- INIT_DELAYED_WORK(&ipa3_rm_it_handles[resource_name].work,
- ipa3_rm_inactivity_timer_func);
- ipa3_rm_it_handles[resource_name].initied = 1;
-
- return 0;
-}
-
-/**
-* ipa3_rm_inactivity_timer_destroy() - De-Init function for IPA
-* RM inactivity timer.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
-{
- IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
-
- if (resource_name < 0 ||
- resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (!ipa3_rm_it_handles[resource_name].initied) {
- IPAERR("%s: resource %d already inited\n",
- __func__, resource_name);
- return -EINVAL;
- }
-
- cancel_delayed_work_sync(&ipa3_rm_it_handles[resource_name].work);
-
- memset(&ipa3_rm_it_handles[resource_name], 0,
- sizeof(struct ipa3_rm_it_private));
-
- return 0;
-}
-
-/**
-* ipa3_rm_inactivity_timer_request_resource() - Same as
-* ipa3_rm_request_resource(), with a difference that calling to
-* this function will also cancel the inactivity timer, if
-* ipa3_rm_inactivity_timer_release_resource() was called earlier.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa3_rm_inactivity_timer_request_resource(
- enum ipa_rm_resource_name resource_name)
-{
- int ret;
- unsigned long flags;
-
- IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
-
- if (resource_name < 0 ||
- resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (!ipa3_rm_it_handles[resource_name].initied) {
- IPAERR("%s: Not initialized\n", __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags);
- ipa3_rm_it_handles[resource_name].resource_requested = true;
- spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);
- ret = ipa3_rm_request_resource(resource_name);
- IPADBG_LOW("%s: resource %d: returning %d\n", __func__,
- resource_name, ret);
-
- return ret;
-}
-
-/**
-* ipa3_rm_inactivity_timer_release_resource() - Sets the
-* inactivity timer to the timeout set by
-* ipa3_rm_inactivity_timer_init(). When the timeout expires, IPA
-* RM inactivity timer will call to ipa3_rm_release_resource().
-* If a call to ipa3_rm_inactivity_timer_request_resource() was
-* made BEFORE the timout has expired, rge timer will be
-* cancelled.
-*
-* @resource_name: Resource name. @see ipa_rm.h
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int ipa3_rm_inactivity_timer_release_resource(
- enum ipa_rm_resource_name resource_name)
-{
- unsigned long flags;
-
- IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
-
- if (resource_name < 0 ||
- resource_name >= IPA_RM_RESOURCE_MAX) {
- IPAERR("%s: Invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (!ipa3_rm_it_handles[resource_name].initied) {
- IPAERR("%s: Not initialized\n", __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags);
- ipa3_rm_it_handles[resource_name].resource_requested = false;
- if (ipa3_rm_it_handles[resource_name].work_in_progress) {
- IPADBG_LOW("%s: Timer already set, not scheduling again %d\n",
- __func__, resource_name);
- ipa3_rm_it_handles[resource_name].reschedule_work = true;
- spin_unlock_irqrestore(
- &ipa3_rm_it_handles[resource_name].lock, flags);
- return 0;
- }
- ipa3_rm_it_handles[resource_name].work_in_progress = true;
- ipa3_rm_it_handles[resource_name].reschedule_work = false;
- IPADBG_LOW("%s: setting delayed work\n", __func__);
- schedule_delayed_work(&ipa3_rm_it_handles[resource_name].work,
- ipa3_rm_it_handles[resource_name].jiffies);
- spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);
-
- return 0;
-}
-
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c
deleted file mode 100644
index 7386135d59ff..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/slab.h>
-#include "ipa_i.h"
-#include "ipa_rm_i.h"
-
-/**
- * ipa3_rm_peers_list_get_resource_index() - resource name to index
- * of this resource in corresponding peers list
- * @resource_name: [in] resource name
- *
- * Returns: resource index mapping, IPA_RM_INDEX_INVALID
- * in case provided resource name isn't contained in enum
- * ipa_rm_resource_name.
- *
- */
-static int ipa3_rm_peers_list_get_resource_index(
- enum ipa_rm_resource_name resource_name)
-{
- int resource_index = IPA_RM_INDEX_INVALID;
-
- if (IPA_RM_RESORCE_IS_PROD(resource_name))
- resource_index = ipa3_rm_prod_index(resource_name);
- else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
- resource_index = ipa3_rm_cons_index(resource_name);
- if (resource_index != IPA_RM_INDEX_INVALID)
- resource_index =
- resource_index - IPA_RM_RESOURCE_PROD_MAX;
- }
-
- return resource_index;
-}
-
-static bool ipa3_rm_peers_list_check_index(int index,
- struct ipa3_rm_peers_list *peers_list)
-{
- return !(index > peers_list->max_peers || index < 0);
-}
-
-/**
- * ipa3_rm_peers_list_create() - creates the peers list
- *
- * @max_peers: maximum number of peers in new list
- * @peers_list: [out] newly created peers list
- *
- * Returns: 0 in case of SUCCESS, negative otherwise
- */
-int ipa3_rm_peers_list_create(int max_peers,
- struct ipa3_rm_peers_list **peers_list)
-{
- int result;
-
- *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC);
- if (!*peers_list) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
-
- (*peers_list)->max_peers = max_peers;
- (*peers_list)->peers = kzalloc((*peers_list)->max_peers *
- sizeof(struct ipa_rm_resource *), GFP_ATOMIC);
- if (!((*peers_list)->peers)) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto list_alloc_fail;
- }
-
- return 0;
-
-list_alloc_fail:
- kfree(*peers_list);
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_peers_list_delete() - deletes the peers list
- *
- * @peers_list: peers list
- *
- */
-void ipa3_rm_peers_list_delete(struct ipa3_rm_peers_list *peers_list)
-{
- if (peers_list) {
- kfree(peers_list->peers);
- kfree(peers_list);
- }
-}
-
-/**
- * ipa3_rm_peers_list_remove_peer() - removes peer from the list
- *
- * @peers_list: peers list
- * @resource_name: name of the resource to remove
- *
- */
-void ipa3_rm_peers_list_remove_peer(
- struct ipa3_rm_peers_list *peers_list,
- enum ipa_rm_resource_name resource_name)
-{
- if (!peers_list)
- return;
-
- peers_list->peers[ipa3_rm_peers_list_get_resource_index(
- resource_name)] = NULL;
- peers_list->peers_count--;
-}
-
-/**
- * ipa3_rm_peers_list_add_peer() - adds peer to the list
- *
- * @peers_list: peers list
- * @resource: resource to add
- *
- */
-void ipa3_rm_peers_list_add_peer(
- struct ipa3_rm_peers_list *peers_list,
- struct ipa_rm_resource *resource)
-{
- if (!peers_list || !resource)
- return;
-
- peers_list->peers[ipa3_rm_peers_list_get_resource_index(
- resource->name)] =
- resource;
- peers_list->peers_count++;
-}
-
-/**
- * ipa3_rm_peers_list_is_empty() - checks
- * if resource peers list is empty
- *
- * @peers_list: peers list
- *
- * Returns: true if the list is empty, false otherwise
- */
-bool ipa3_rm_peers_list_is_empty(struct ipa3_rm_peers_list *peers_list)
-{
- bool result = true;
-
- if (!peers_list)
- goto bail;
-
- if (peers_list->peers_count > 0)
- result = false;
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_peers_list_has_last_peer() - checks
- * if resource peers list has exactly one peer
- *
- * @peers_list: peers list
- *
- * Returns: true if the list has exactly one peer, false otherwise
- */
-bool ipa3_rm_peers_list_has_last_peer(
- struct ipa3_rm_peers_list *peers_list)
-{
- bool result = false;
-
- if (!peers_list)
- goto bail;
-
- if (peers_list->peers_count == 1)
- result = true;
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_peers_list_check_dependency() - check dependency
- * between 2 peer lists
- * @resource_peers: first peers list
- * @resource_name: first peers list resource name
- * @depends_on_peers: second peers list
- * @depends_on_name: second peers list resource name
- *
- * Returns: true if there is dependency, false otherwise
- *
- */
-bool ipa3_rm_peers_list_check_dependency(
- struct ipa3_rm_peers_list *resource_peers,
- enum ipa_rm_resource_name resource_name,
- struct ipa3_rm_peers_list *depends_on_peers,
- enum ipa_rm_resource_name depends_on_name)
-{
- bool result = false;
-
- if (!resource_peers || !depends_on_peers)
- return result;
-
- if (resource_peers->peers[ipa3_rm_peers_list_get_resource_index(
- depends_on_name)] != NULL)
- result = true;
-
- if (depends_on_peers->peers[ipa3_rm_peers_list_get_resource_index(
- resource_name)] != NULL)
- result = true;
-
- return result;
-}
-
-/**
- * ipa3_rm_peers_list_get_resource() - get resource by
- * resource index
- * @resource_index: resource index
- * @resource_peers: peers list
- *
- * Returns: the resource if found, NULL otherwise
- */
-struct ipa_rm_resource *ipa3_rm_peers_list_get_resource(int resource_index,
- struct ipa3_rm_peers_list *resource_peers)
-{
- struct ipa_rm_resource *result = NULL;
-
- if (!ipa3_rm_peers_list_check_index(resource_index, resource_peers))
- goto bail;
-
- result = resource_peers->peers[resource_index];
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_peers_list_get_size() - get peers list sise
- *
- * @peers_list: peers list
- *
- * Returns: the size of the peers list
- */
-int ipa3_rm_peers_list_get_size(struct ipa3_rm_peers_list *peers_list)
-{
- return peers_list->max_peers;
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h
deleted file mode 100644
index dc58e5d23fa6..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_RM_PEERS_LIST_H_
-#define _IPA_RM_PEERS_LIST_H_
-
-#include "ipa_rm_resource.h"
-
-/**
- * struct ipa3_rm_peers_list - IPA RM resource peers list
- * @peers: the list of references to resources dependent on this resource
- * in case of producer or list of dependencies in case of consumer
- * @max_peers: maximum number of peers for this resource
- * @peers_count: actual number of peers for this resource
- */
-struct ipa3_rm_peers_list {
- struct ipa_rm_resource **peers;
- int max_peers;
- int peers_count;
-};
-
-int ipa3_rm_peers_list_create(int max_peers,
- struct ipa3_rm_peers_list **peers_list);
-void ipa3_rm_peers_list_delete(struct ipa3_rm_peers_list *peers_list);
-void ipa3_rm_peers_list_remove_peer(
- struct ipa3_rm_peers_list *peers_list,
- enum ipa_rm_resource_name resource_name);
-void ipa3_rm_peers_list_add_peer(
- struct ipa3_rm_peers_list *peers_list,
- struct ipa_rm_resource *resource);
-bool ipa3_rm_peers_list_check_dependency(
- struct ipa3_rm_peers_list *resource_peers,
- enum ipa_rm_resource_name resource_name,
- struct ipa3_rm_peers_list *depends_on_peers,
- enum ipa_rm_resource_name depends_on_name);
-struct ipa_rm_resource *ipa3_rm_peers_list_get_resource(int resource_index,
- struct ipa3_rm_peers_list *peers_list);
-int ipa3_rm_peers_list_get_size(struct ipa3_rm_peers_list *peers_list);
-bool ipa3_rm_peers_list_is_empty(struct ipa3_rm_peers_list *peers_list);
-bool ipa3_rm_peers_list_has_last_peer(
- struct ipa3_rm_peers_list *peers_list);
-
-
-#endif /* _IPA_RM_PEERS_LIST_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c
deleted file mode 100644
index 4566b8c4ea84..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c
+++ /dev/null
@@ -1,1176 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/slab.h>
-#include "ipa_i.h"
-#include "ipa_rm_resource.h"
-#include "ipa_rm_i.h"
-
-/**
- * ipa_rm_dep_prod_index() - producer name to producer index mapping
- * @resource_name: [in] resource name (should be of producer)
- *
- * Returns: resource index mapping, IPA_RM_INDEX_INVALID
- * in case provided resource name isn't contained
- * in enum ipa_rm_resource_name or is not of producers.
- *
- */
-int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name)
-{
- int result = resource_name;
-
- switch (resource_name) {
- case IPA_RM_RESOURCE_Q6_PROD:
- case IPA_RM_RESOURCE_USB_PROD:
- case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
- case IPA_RM_RESOURCE_HSIC_PROD:
- case IPA_RM_RESOURCE_STD_ECM_PROD:
- case IPA_RM_RESOURCE_RNDIS_PROD:
- case IPA_RM_RESOURCE_WWAN_0_PROD:
- case IPA_RM_RESOURCE_WLAN_PROD:
- case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
- case IPA_RM_RESOURCE_MHI_PROD:
- break;
- default:
- result = IPA_RM_INDEX_INVALID;
- break;
- }
-
- return result;
-}
-
-/**
- * ipa3_rm_cons_index() - consumer name to consumer index mapping
- * @resource_name: [in] resource name (should be of consumer)
- *
- * Returns: resource index mapping, IPA_RM_INDEX_INVALID
- * in case provided resource name isn't contained
- * in enum ipa_rm_resource_name or is not of consumers.
- *
- */
-int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name)
-{
- int result = resource_name;
-
- switch (resource_name) {
- case IPA_RM_RESOURCE_Q6_CONS:
- case IPA_RM_RESOURCE_USB_CONS:
- case IPA_RM_RESOURCE_HSIC_CONS:
- case IPA_RM_RESOURCE_WLAN_CONS:
- case IPA_RM_RESOURCE_APPS_CONS:
- case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
- case IPA_RM_RESOURCE_MHI_CONS:
- case IPA_RM_RESOURCE_USB_DPL_CONS:
- break;
- default:
- result = IPA_RM_INDEX_INVALID;
- break;
- }
-
- return result;
-}
-
-int ipa3_rm_resource_consumer_release_work(
- struct ipa3_rm_resource_cons *consumer,
- enum ipa3_rm_resource_state prev_state,
- bool notify_completion)
-{
- int driver_result;
-
- IPA_RM_DBG("calling driver CB\n");
- driver_result = consumer->release_resource();
- IPA_RM_DBG("driver CB returned with %d\n", driver_result);
- /*
- * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED
- * for CONS which remains in RELEASE_IN_PROGRESS.
- */
- if (driver_result == -EINPROGRESS)
- driver_result = 0;
- if (driver_result != 0 && driver_result != -EINPROGRESS) {
- IPA_RM_ERR("driver CB returned error %d\n", driver_result);
- consumer->resource.state = prev_state;
- goto bail;
- }
- if (driver_result == 0) {
- if (notify_completion)
- ipa3_rm_resource_consumer_handle_cb(consumer,
- IPA_RM_RESOURCE_RELEASED);
- else
- consumer->resource.state = IPA_RM_RELEASED;
- }
- complete_all(&consumer->request_consumer_in_progress);
-
- ipa3_rm_perf_profile_change(consumer->resource.name);
-bail:
- return driver_result;
-}
-
-int ipa3_rm_resource_consumer_request_work(struct ipa3_rm_resource_cons
- *consumer,
- enum ipa3_rm_resource_state prev_state,
- u32 prod_needed_bw,
- bool notify_completion)
-{
- int driver_result;
-
- IPA_RM_DBG("calling driver CB\n");
- driver_result = consumer->request_resource();
- IPA_RM_DBG("driver CB returned with %d\n", driver_result);
- if (driver_result == 0) {
- if (notify_completion) {
- ipa3_rm_resource_consumer_handle_cb(consumer,
- IPA_RM_RESOURCE_GRANTED);
- } else {
- consumer->resource.state = IPA_RM_GRANTED;
- ipa3_rm_perf_profile_change(consumer->resource.name);
- ipa3_resume_resource(consumer->resource.name);
- }
- } else if (driver_result != -EINPROGRESS) {
- consumer->resource.state = prev_state;
- consumer->resource.needed_bw -= prod_needed_bw;
- consumer->usage_count--;
- }
-
- return driver_result;
-}
-
-int ipa3_rm_resource_consumer_request(
- struct ipa3_rm_resource_cons *consumer,
- u32 prod_needed_bw,
- bool inc_usage_count,
- bool wake_client)
-{
- int result = 0;
- enum ipa3_rm_resource_state prev_state;
- struct ipa3_active_client_logging_info log_info;
-
- IPA_RM_DBG("%s state: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state);
-
- prev_state = consumer->resource.state;
- consumer->resource.needed_bw += prod_needed_bw;
- switch (consumer->resource.state) {
- case IPA_RM_RELEASED:
- case IPA_RM_RELEASE_IN_PROGRESS:
- reinit_completion(&consumer->request_consumer_in_progress);
- consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
- IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
- ipa3_rm_resource_str(consumer->resource.name));
- if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
- ipa3_inc_client_enable_clks_no_block(&log_info) != 0) {
- IPA_RM_DBG("async resume work for %s\n",
- ipa3_rm_resource_str(consumer->resource.name));
- ipa3_rm_wq_send_resume_cmd(consumer->resource.name,
- prev_state,
- prod_needed_bw);
- result = -EINPROGRESS;
- break;
- }
- result = ipa3_rm_resource_consumer_request_work(consumer,
- prev_state,
- prod_needed_bw,
- false);
- break;
- case IPA_RM_GRANTED:
- if (wake_client) {
- result = ipa3_rm_resource_consumer_request_work(
- consumer, prev_state, prod_needed_bw, false);
- break;
- }
- ipa3_rm_perf_profile_change(consumer->resource.name);
- break;
- case IPA_RM_REQUEST_IN_PROGRESS:
- result = -EINPROGRESS;
- break;
- default:
- consumer->resource.needed_bw -= prod_needed_bw;
- result = -EPERM;
- goto bail;
- }
- if (inc_usage_count)
- consumer->usage_count++;
-bail:
- IPA_RM_DBG("%s new state: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-int ipa3_rm_resource_consumer_release(
- struct ipa3_rm_resource_cons *consumer,
- u32 prod_needed_bw,
- bool dec_usage_count)
-{
- int result = 0;
- enum ipa3_rm_resource_state save_state;
-
- IPA_RM_DBG("%s state: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state);
- save_state = consumer->resource.state;
- consumer->resource.needed_bw -= prod_needed_bw;
- switch (consumer->resource.state) {
- case IPA_RM_RELEASED:
- break;
- case IPA_RM_GRANTED:
- case IPA_RM_REQUEST_IN_PROGRESS:
- if (dec_usage_count && consumer->usage_count > 0)
- consumer->usage_count--;
- if (consumer->usage_count == 0) {
- consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
- if (save_state == IPA_RM_REQUEST_IN_PROGRESS ||
- ipa3_suspend_resource_no_block(
- consumer->resource.name) != 0) {
- ipa3_rm_wq_send_suspend_cmd(
- consumer->resource.name,
- save_state,
- prod_needed_bw);
- result = -EINPROGRESS;
- goto bail;
- }
- result = ipa3_rm_resource_consumer_release_work(
- consumer,
- save_state,
- false);
- goto bail;
- } else if (consumer->resource.state == IPA_RM_GRANTED) {
- ipa3_rm_perf_profile_change(consumer->resource.name);
- }
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- if (dec_usage_count && consumer->usage_count > 0)
- consumer->usage_count--;
- result = -EINPROGRESS;
- break;
- default:
- result = -EPERM;
- goto bail;
- }
-bail:
- IPA_RM_DBG("%s new state: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_resource_producer_notify_clients() - notify
- * all registered clients of given producer
- * @producer: producer
- * @event: event to notify
- * @notify_registered_only: notify only clients registered by
- * ipa3_rm_register()
- */
-void ipa3_rm_resource_producer_notify_clients(
- struct ipa3_rm_resource_prod *producer,
- enum ipa_rm_event event,
- bool notify_registered_only)
-{
- struct ipa3_rm_notification_info *reg_info;
-
- IPA_RM_DBG("%s event: %d notify_registered_only: %d\n",
- ipa3_rm_resource_str(producer->resource.name),
- event,
- notify_registered_only);
-
- list_for_each_entry(reg_info, &(producer->event_listeners), link) {
- if (notify_registered_only && !reg_info->explicit)
- continue;
-
- IPA_RM_DBG("Notifying %s event: %d\n",
- ipa3_rm_resource_str(producer->resource.name),
- event);
- reg_info->reg_params.notify_cb(reg_info->reg_params.user_data,
- event,
- 0);
- IPA_RM_DBG("back from client CB\n");
- }
-}
-
-static int ipa3_rm_resource_producer_create(struct ipa_rm_resource **resource,
- struct ipa3_rm_resource_prod **producer,
- struct ipa_rm_create_params *create_params,
- int *max_peers)
-{
- int result = 0;
-
- *producer = kzalloc(sizeof(**producer), GFP_ATOMIC);
- if (*producer == NULL) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
-
- INIT_LIST_HEAD(&((*producer)->event_listeners));
- result = ipa3_rm_resource_producer_register(*producer,
- &(create_params->reg_params),
- false);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_producer_register() failed\n");
- goto register_fail;
- }
-
- (*resource) = (struct ipa_rm_resource *) (*producer);
- (*resource)->type = IPA_RM_PRODUCER;
- *max_peers = IPA_RM_RESOURCE_CONS_MAX;
- goto bail;
-register_fail:
- kfree(*producer);
-bail:
- return result;
-}
-
-static void ipa3_rm_resource_producer_delete(
- struct ipa3_rm_resource_prod *producer)
-{
- struct ipa3_rm_notification_info *reg_info;
- struct list_head *pos, *q;
-
- ipa3_rm_resource_producer_release(producer);
- list_for_each_safe(pos, q, &(producer->event_listeners)) {
- reg_info = list_entry(pos,
- struct ipa3_rm_notification_info,
- link);
- list_del(pos);
- kfree(reg_info);
- }
-}
-
-static int ipa3_rm_resource_consumer_create(struct ipa_rm_resource **resource,
- struct ipa3_rm_resource_cons **consumer,
- struct ipa_rm_create_params *create_params,
- int *max_peers)
-{
- int result = 0;
-
- *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC);
- if (*consumer == NULL) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
-
- (*consumer)->request_resource = create_params->request_resource;
- (*consumer)->release_resource = create_params->release_resource;
- (*resource) = (struct ipa_rm_resource *) (*consumer);
- (*resource)->type = IPA_RM_CONSUMER;
- init_completion(&((*consumer)->request_consumer_in_progress));
- *max_peers = IPA_RM_RESOURCE_PROD_MAX;
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_resource_create() - creates resource
- * @create_params: [in] parameters needed
- * for resource initialization with IPA RM
- * @resource: [out] created resource
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_resource_create(
- struct ipa_rm_create_params *create_params,
- struct ipa_rm_resource **resource)
-{
- struct ipa3_rm_resource_cons *consumer;
- struct ipa3_rm_resource_prod *producer;
- int max_peers;
- int result = 0;
-
- if (!create_params) {
- result = -EINVAL;
- goto bail;
- }
-
- if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
- result = ipa3_rm_resource_producer_create(resource,
- &producer,
- create_params,
- &max_peers);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_producer_create failed\n");
- goto bail;
- }
- } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
- result = ipa3_rm_resource_consumer_create(resource,
- &consumer,
- create_params,
- &max_peers);
- if (result) {
- IPA_RM_ERR("ipa3_rm_resource_producer_create failed\n");
- goto bail;
- }
- } else {
- IPA_RM_ERR("invalied resource\n");
- result = -EPERM;
- goto bail;
- }
-
- result = ipa3_rm_peers_list_create(max_peers,
- &((*resource)->peers_list));
- if (result) {
- IPA_RM_ERR("ipa3_rm_peers_list_create failed\n");
- goto peers_alloc_fail;
- }
- (*resource)->name = create_params->name;
- (*resource)->floor_voltage = create_params->floor_voltage;
- (*resource)->state = IPA_RM_RELEASED;
- goto bail;
-
-peers_alloc_fail:
- ipa3_rm_resource_delete(*resource);
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_resource_delete() - deletes resource
- * @resource: [in] resource
- * for resource initialization with IPA RM
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_resource_delete(struct ipa_rm_resource *resource)
-{
- struct ipa_rm_resource *consumer;
- struct ipa_rm_resource *producer;
- int peers_index;
- int result = 0;
- int list_size;
-
- if (!resource) {
- IPA_RM_ERR("invalid params\n");
- return -EINVAL;
- }
-
- IPA_RM_DBG("ipa3_rm_resource_delete ENTER with resource %d\n",
- resource->name);
- if (resource->type == IPA_RM_PRODUCER) {
- if (resource->peers_list) {
- list_size = ipa3_rm_peers_list_get_size(
- resource->peers_list);
- for (peers_index = 0;
- peers_index < list_size;
- peers_index++) {
- consumer = ipa3_rm_peers_list_get_resource(
- peers_index,
- resource->peers_list);
- if (consumer)
- ipa3_rm_resource_delete_dependency(
- resource,
- consumer);
- }
- }
-
- ipa3_rm_resource_producer_delete(
- (struct ipa3_rm_resource_prod *) resource);
- } else if (resource->type == IPA_RM_CONSUMER) {
- if (resource->peers_list) {
- list_size = ipa3_rm_peers_list_get_size(
- resource->peers_list);
- for (peers_index = 0;
- peers_index < list_size;
- peers_index++){
- producer = ipa3_rm_peers_list_get_resource(
- peers_index,
- resource->peers_list);
- if (producer)
- ipa3_rm_resource_delete_dependency(
- producer,
- resource);
- }
- }
- }
- ipa3_rm_peers_list_delete(resource->peers_list);
- kfree(resource);
- return result;
-}
-
-/**
- * ipa_rm_resource_register() - register resource
- * @resource: [in] resource
- * @reg_params: [in] registration parameters
- * @explicit: [in] registered explicitly by ipa3_rm_register()
- *
- * Returns: 0 on success, negative on failure
- *
- * Producer resource is expected for this call.
- *
- */
-int ipa3_rm_resource_producer_register(struct ipa3_rm_resource_prod *producer,
- struct ipa_rm_register_params *reg_params,
- bool explicit)
-{
- int result = 0;
- struct ipa3_rm_notification_info *reg_info;
- struct list_head *pos;
-
- if (!producer || !reg_params) {
- IPA_RM_ERR("invalid params\n");
- result = -EPERM;
- goto bail;
- }
-
- list_for_each(pos, &(producer->event_listeners)) {
- reg_info = list_entry(pos,
- struct ipa3_rm_notification_info,
- link);
- if (reg_info->reg_params.notify_cb ==
- reg_params->notify_cb) {
- IPA_RM_ERR("already registered\n");
- result = -EPERM;
- goto bail;
- }
-
- }
-
- reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (reg_info == NULL) {
- IPA_RM_ERR("no mem\n");
- result = -ENOMEM;
- goto bail;
- }
-
- reg_info->reg_params.user_data = reg_params->user_data;
- reg_info->reg_params.notify_cb = reg_params->notify_cb;
- reg_info->explicit = explicit;
- INIT_LIST_HEAD(&reg_info->link);
- list_add(&reg_info->link, &producer->event_listeners);
-bail:
- return result;
-}
-
-/**
- * ipa_rm_resource_deregister() - register resource
- * @resource: [in] resource
- * @reg_params: [in] registration parameters
- *
- * Returns: 0 on success, negative on failure
- *
- * Producer resource is expected for this call.
- * This function deleted only single instance of
- * registration info.
- *
- */
-int ipa3_rm_resource_producer_deregister(struct ipa3_rm_resource_prod *producer,
- struct ipa_rm_register_params *reg_params)
-{
- int result = -EINVAL;
- struct ipa3_rm_notification_info *reg_info;
- struct list_head *pos, *q;
-
- if (!producer || !reg_params) {
- IPA_RM_ERR("invalid params\n");
- return -EINVAL;
- }
-
- list_for_each_safe(pos, q, &(producer->event_listeners)) {
- reg_info = list_entry(pos,
- struct ipa3_rm_notification_info,
- link);
- if (reg_info->reg_params.notify_cb ==
- reg_params->notify_cb) {
- list_del(pos);
- kfree(reg_info);
- result = 0;
- goto bail;
- }
- }
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_resource_add_dependency() - add dependency between two
- * given resources
- * @resource: [in] resource resource
- * @depends_on: [in] depends_on resource
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_resource_add_dependency(struct ipa_rm_resource *resource,
- struct ipa_rm_resource *depends_on)
-{
- int result = 0;
- int consumer_result;
-
- if (!resource || !depends_on) {
- IPA_RM_ERR("invalid params\n");
- return -EINVAL;
- }
-
- if (ipa3_rm_peers_list_check_dependency(resource->peers_list,
- resource->name,
- depends_on->peers_list,
- depends_on->name)) {
- IPA_RM_ERR("dependency already exists\n");
- return -EEXIST;
- }
-
- ipa3_rm_peers_list_add_peer(resource->peers_list, depends_on);
- ipa3_rm_peers_list_add_peer(depends_on->peers_list, resource);
- IPA_RM_DBG("%s state: %d\n", ipa3_rm_resource_str(resource->name),
- resource->state);
-
- resource->needed_bw += depends_on->max_bw;
- switch (resource->state) {
- case IPA_RM_RELEASED:
- case IPA_RM_RELEASE_IN_PROGRESS:
- break;
- case IPA_RM_GRANTED:
- case IPA_RM_REQUEST_IN_PROGRESS:
- {
- enum ipa3_rm_resource_state prev_state = resource->state;
-
- resource->state = IPA_RM_REQUEST_IN_PROGRESS;
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_request++;
- consumer_result = ipa3_rm_resource_consumer_request(
- (struct ipa3_rm_resource_cons *)depends_on,
- resource->max_bw,
- true, false);
- if (consumer_result != -EINPROGRESS) {
- resource->state = prev_state;
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_request--;
- ipa3_rm_perf_profile_change(resource->name);
- }
- result = consumer_result;
- break;
- }
- default:
- IPA_RM_ERR("invalid state\n");
- result = -EPERM;
- goto bail;
- }
-bail:
- IPA_RM_DBG("%s new state: %d\n", ipa3_rm_resource_str(resource->name),
- resource->state);
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_resource_delete_dependency() - add dependency between two
- * given resources
- * @resource: [in] resource resource
- * @depends_on: [in] depends_on resource
- *
- * Returns: 0 on success, negative on failure
- * In case the resource state was changed, a notification
- * will be sent to the RM client
- */
-int ipa3_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
- struct ipa_rm_resource *depends_on)
-{
- int result = 0;
- bool state_changed = false;
- bool release_consumer = false;
- enum ipa_rm_event evt;
-
- if (!resource || !depends_on) {
- IPA_RM_ERR("invalid params\n");
- return -EINVAL;
- }
-
- if (!ipa3_rm_peers_list_check_dependency(resource->peers_list,
- resource->name,
- depends_on->peers_list,
- depends_on->name)) {
- IPA_RM_ERR("dependency does not exist\n");
- return -EINVAL;
- }
- IPA_RM_DBG("%s state: %d\n", ipa3_rm_resource_str(resource->name),
- resource->state);
-
- resource->needed_bw -= depends_on->max_bw;
- switch (resource->state) {
- case IPA_RM_RELEASED:
- break;
- case IPA_RM_GRANTED:
- ipa3_rm_perf_profile_change(resource->name);
- release_consumer = true;
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- if (((struct ipa3_rm_resource_prod *)
- resource)->pending_release > 0)
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_release--;
- if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS &&
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_release == 0) {
- resource->state = IPA_RM_RELEASED;
- state_changed = true;
- evt = IPA_RM_RESOURCE_RELEASED;
- ipa3_rm_perf_profile_change(resource->name);
- }
- break;
- case IPA_RM_REQUEST_IN_PROGRESS:
- release_consumer = true;
- if (((struct ipa3_rm_resource_prod *)
- resource)->pending_request > 0)
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_request--;
- if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS &&
- ((struct ipa3_rm_resource_prod *)
- resource)->pending_request == 0) {
- resource->state = IPA_RM_GRANTED;
- state_changed = true;
- evt = IPA_RM_RESOURCE_GRANTED;
- ipa3_rm_perf_profile_change(resource->name);
- }
- break;
- default:
- result = -EINVAL;
- goto bail;
- }
- if (state_changed) {
- (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
- resource->name,
- evt,
- false);
- }
- IPA_RM_DBG("%s new state: %d\n", ipa3_rm_resource_str(resource->name),
- resource->state);
- ipa3_rm_peers_list_remove_peer(resource->peers_list,
- depends_on->name);
- ipa3_rm_peers_list_remove_peer(depends_on->peers_list,
- resource->name);
- if (release_consumer)
- (void) ipa3_rm_resource_consumer_release(
- (struct ipa3_rm_resource_cons *)depends_on,
- resource->max_bw,
- true);
-bail:
- IPA_RM_DBG("EXIT with %d\n", result);
-
- return result;
-}
-
-/**
- * ipa3_rm_resource_producer_request() - producer resource request
- * @producer: [in] producer
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_rm_resource_producer_request(struct ipa3_rm_resource_prod *producer)
-{
- int peers_index;
- int result = 0;
- struct ipa_rm_resource *consumer;
- int consumer_result;
- enum ipa3_rm_resource_state state;
-
- state = producer->resource.state;
- switch (producer->resource.state) {
- case IPA_RM_RELEASED:
- case IPA_RM_RELEASE_IN_PROGRESS:
- producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
- break;
- case IPA_RM_GRANTED:
- goto unlock_and_bail;
- case IPA_RM_REQUEST_IN_PROGRESS:
- result = -EINPROGRESS;
- goto unlock_and_bail;
- default:
- result = -EINVAL;
- goto unlock_and_bail;
- }
-
- producer->pending_request = 0;
- for (peers_index = 0;
- peers_index < ipa3_rm_peers_list_get_size(
- producer->resource.peers_list);
- peers_index++) {
- consumer = ipa3_rm_peers_list_get_resource(peers_index,
- producer->resource.peers_list);
- if (consumer) {
- producer->pending_request++;
- consumer_result = ipa3_rm_resource_consumer_request(
- (struct ipa3_rm_resource_cons *)consumer,
- producer->resource.max_bw,
- true, false);
- if (consumer_result == -EINPROGRESS) {
- result = -EINPROGRESS;
- } else {
- producer->pending_request--;
- if (consumer_result != 0) {
- result = consumer_result;
- goto bail;
- }
- }
- }
- }
-
- if (producer->pending_request == 0) {
- producer->resource.state = IPA_RM_GRANTED;
- ipa3_rm_perf_profile_change(producer->resource.name);
- (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
- producer->resource.name,
- IPA_RM_RESOURCE_GRANTED,
- true);
- result = 0;
- }
-unlock_and_bail:
- if (state != producer->resource.state)
- IPA_RM_DBG("%s state changed %d->%d\n",
- ipa3_rm_resource_str(producer->resource.name),
- state,
- producer->resource.state);
-bail:
- return result;
-}
-
-/**
- * ipa3_rm_resource_producer_release() - producer resource release
- * producer: [in] producer resource
- *
- * Returns: 0 on success, negative on failure
- *
- */
-int ipa3_rm_resource_producer_release(struct ipa3_rm_resource_prod *producer)
-{
- int peers_index;
- int result = 0;
- struct ipa_rm_resource *consumer;
- int consumer_result;
- enum ipa3_rm_resource_state state;
-
- state = producer->resource.state;
- switch (producer->resource.state) {
- case IPA_RM_RELEASED:
- goto bail;
- case IPA_RM_GRANTED:
- case IPA_RM_REQUEST_IN_PROGRESS:
- producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- result = -EINPROGRESS;
- goto bail;
- default:
- result = -EPERM;
- goto bail;
- }
-
- producer->pending_release = 0;
- for (peers_index = 0;
- peers_index < ipa3_rm_peers_list_get_size(
- producer->resource.peers_list);
- peers_index++) {
- consumer = ipa3_rm_peers_list_get_resource(peers_index,
- producer->resource.peers_list);
- if (consumer) {
- producer->pending_release++;
- consumer_result = ipa3_rm_resource_consumer_release(
- (struct ipa3_rm_resource_cons *)consumer,
- producer->resource.max_bw,
- true);
- producer->pending_release--;
- }
- }
-
- if (producer->pending_release == 0) {
- producer->resource.state = IPA_RM_RELEASED;
- ipa3_rm_perf_profile_change(producer->resource.name);
- (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
- producer->resource.name,
- IPA_RM_RESOURCE_RELEASED,
- true);
- }
-bail:
- if (state != producer->resource.state)
- IPA_RM_DBG("%s state changed %d->%d\n",
- ipa3_rm_resource_str(producer->resource.name),
- state,
- producer->resource.state);
-
- return result;
-}
-
-static void ipa3_rm_resource_producer_handle_cb(
- struct ipa3_rm_resource_prod *producer,
- enum ipa_rm_event event)
-{
- IPA_RM_DBG("%s state: %d event: %d pending_request: %d\n",
- ipa3_rm_resource_str(producer->resource.name),
- producer->resource.state,
- event,
- producer->pending_request);
-
- switch (producer->resource.state) {
- case IPA_RM_REQUEST_IN_PROGRESS:
- if (event != IPA_RM_RESOURCE_GRANTED)
- goto unlock_and_bail;
- if (producer->pending_request > 0) {
- producer->pending_request--;
- if (producer->pending_request == 0) {
- producer->resource.state =
- IPA_RM_GRANTED;
- ipa3_rm_perf_profile_change(
- producer->resource.name);
- ipa3_rm_resource_producer_notify_clients(
- producer,
- IPA_RM_RESOURCE_GRANTED,
- false);
- goto bail;
- }
- }
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- if (event != IPA_RM_RESOURCE_RELEASED)
- goto unlock_and_bail;
- if (producer->pending_release > 0) {
- producer->pending_release--;
- if (producer->pending_release == 0) {
- producer->resource.state =
- IPA_RM_RELEASED;
- ipa3_rm_perf_profile_change(
- producer->resource.name);
- ipa3_rm_resource_producer_notify_clients(
- producer,
- IPA_RM_RESOURCE_RELEASED,
- false);
- goto bail;
- }
- }
- break;
- case IPA_RM_GRANTED:
- case IPA_RM_RELEASED:
- default:
- goto unlock_and_bail;
- }
-unlock_and_bail:
- IPA_RM_DBG("%s new state: %d\n",
- ipa3_rm_resource_str(producer->resource.name),
- producer->resource.state);
-bail:
- return;
-}
-
-/**
- * ipa3_rm_resource_consumer_handle_cb() - propagates resource
- * notification to all dependent producers
- * @consumer: [in] notifying resource
- *
- */
-void ipa3_rm_resource_consumer_handle_cb(struct ipa3_rm_resource_cons *consumer,
- enum ipa_rm_event event)
-{
- int peers_index;
- struct ipa_rm_resource *producer;
-
- if (!consumer) {
- IPA_RM_ERR("invalid params\n");
- return;
- }
- IPA_RM_DBG("%s state: %d event: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state,
- event);
-
- switch (consumer->resource.state) {
- case IPA_RM_REQUEST_IN_PROGRESS:
- if (event == IPA_RM_RESOURCE_RELEASED)
- goto bail;
- consumer->resource.state = IPA_RM_GRANTED;
- ipa3_rm_perf_profile_change(consumer->resource.name);
- ipa3_resume_resource(consumer->resource.name);
- complete_all(&consumer->request_consumer_in_progress);
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- if (event == IPA_RM_RESOURCE_GRANTED)
- goto bail;
- consumer->resource.state = IPA_RM_RELEASED;
- break;
- case IPA_RM_GRANTED:
- case IPA_RM_RELEASED:
- default:
- goto bail;
- }
-
- for (peers_index = 0;
- peers_index < ipa3_rm_peers_list_get_size(
- consumer->resource.peers_list);
- peers_index++) {
- producer = ipa3_rm_peers_list_get_resource(peers_index,
- consumer->resource.peers_list);
- if (producer)
- ipa3_rm_resource_producer_handle_cb(
- (struct ipa3_rm_resource_prod *)
- producer,
- event);
- }
-
- return;
-bail:
- IPA_RM_DBG("%s new state: %d\n",
- ipa3_rm_resource_str(consumer->resource.name),
- consumer->resource.state);
-}
-
-/*
- * ipa3_rm_resource_set_perf_profile() - sets the performance profile to
- * resource.
- *
- * @resource: [in] resource
- * @profile: [in] profile to be set
- *
- * sets the profile to the given resource, In case the resource is
- * granted, update bandwidth vote of the resource
- */
-int ipa3_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
- struct ipa_rm_perf_profile *profile)
-{
- int peers_index;
- struct ipa_rm_resource *peer;
-
- if (!resource || !profile) {
- IPA_RM_ERR("invalid params\n");
- return -EINVAL;
- }
-
- if (profile->max_supported_bandwidth_mbps == resource->max_bw) {
- IPA_RM_DBG("same profile\n");
- return 0;
- }
-
- if ((resource->type == IPA_RM_PRODUCER &&
- (resource->state == IPA_RM_GRANTED ||
- resource->state == IPA_RM_REQUEST_IN_PROGRESS)) ||
- resource->type == IPA_RM_CONSUMER) {
- for (peers_index = 0;
- peers_index < ipa3_rm_peers_list_get_size(
- resource->peers_list);
- peers_index++) {
- peer = ipa3_rm_peers_list_get_resource(peers_index,
- resource->peers_list);
- if (!peer)
- continue;
- peer->needed_bw -= resource->max_bw;
- peer->needed_bw +=
- profile->max_supported_bandwidth_mbps;
- if (peer->state == IPA_RM_GRANTED)
- ipa3_rm_perf_profile_change(peer->name);
- }
- }
-
- resource->max_bw = profile->max_supported_bandwidth_mbps;
- if (resource->state == IPA_RM_GRANTED)
- ipa3_rm_perf_profile_change(resource->name);
-
- return 0;
-}
-
-
-/*
- * ipa3_rm_resource_producer_print_stat() - print the
- * resource status and all his dependencies
- *
- * @resource: [in] Resource resource
- * @buff: [in] The buf used to print
- * @size: [in] Buf size
- *
- * Returns: number of bytes used on success, negative on failure
- */
-int ipa3_rm_resource_producer_print_stat(
- struct ipa_rm_resource *resource,
- char *buf,
- int size){
-
- int i;
- int nbytes;
- int cnt = 0;
- struct ipa_rm_resource *consumer;
-
- if (!buf || size < 0)
- return -EINVAL;
-
- nbytes = scnprintf(buf + cnt, size - cnt,
- ipa3_rm_resource_str(resource->name));
- cnt += nbytes;
- nbytes = scnprintf(buf + cnt, size - cnt, "[");
- cnt += nbytes;
-
- switch (resource->state) {
- case IPA_RM_RELEASED:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Released] -> ");
- cnt += nbytes;
- break;
- case IPA_RM_REQUEST_IN_PROGRESS:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Request In Progress] -> ");
- cnt += nbytes;
- break;
- case IPA_RM_GRANTED:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Granted] -> ");
- cnt += nbytes;
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Release In Progress] -> ");
- cnt += nbytes;
- break;
- default:
- return -EPERM;
- }
-
- for (i = 0; i < resource->peers_list->max_peers; ++i) {
- consumer =
- ipa3_rm_peers_list_get_resource(
- i,
- resource->peers_list);
- if (consumer) {
- nbytes = scnprintf(buf + cnt, size - cnt,
- ipa3_rm_resource_str(consumer->name));
- cnt += nbytes;
- nbytes = scnprintf(buf + cnt, size - cnt, "[");
- cnt += nbytes;
-
- switch (consumer->state) {
- case IPA_RM_RELEASED:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Released], ");
- cnt += nbytes;
- break;
- case IPA_RM_REQUEST_IN_PROGRESS:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Request In Progress], ");
- cnt += nbytes;
- break;
- case IPA_RM_GRANTED:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Granted], ");
- cnt += nbytes;
- break;
- case IPA_RM_RELEASE_IN_PROGRESS:
- nbytes = scnprintf(buf + cnt, size - cnt,
- "Release In Progress], ");
- cnt += nbytes;
- break;
- default:
- return -EPERM;
- }
- }
- }
- nbytes = scnprintf(buf + cnt, size - cnt, "\n");
- cnt += nbytes;
-
- return cnt;
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h
deleted file mode 100644
index 34d228ea3666..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_RM_RESOURCE_H_
-#define _IPA_RM_RESOURCE_H_
-
-#include <linux/list.h>
-#include <linux/ipa.h>
-#include "ipa_rm_peers_list.h"
-
-/**
- * enum ipa3_rm_resource_state - resource state
- */
-enum ipa3_rm_resource_state {
- IPA_RM_RELEASED,
- IPA_RM_REQUEST_IN_PROGRESS,
- IPA_RM_GRANTED,
- IPA_RM_RELEASE_IN_PROGRESS
-};
-
-/**
- * enum ipa3_rm_resource_type - IPA resource manager resource type
- */
-enum ipa3_rm_resource_type {
- IPA_RM_PRODUCER,
- IPA_RM_CONSUMER
-};
-
-/**
- * struct ipa3_rm_notification_info - notification information
- * of IPA RM client
- * @reg_params: registration parameters
- * @explicit: registered explicitly by ipa3_rm_register()
- * @link: link to the list of all registered clients information
- */
-struct ipa3_rm_notification_info {
- struct ipa_rm_register_params reg_params;
- bool explicit;
- struct list_head link;
-};
-
-/**
- * struct ipa_rm_resource - IPA RM resource
- * @name: name identifying resource
- * @type: type of resource (PRODUCER or CONSUMER)
- * @floor_voltage: minimum voltage level for operation
- * @max_bw: maximum bandwidth required for resource in Mbps
- * @state: state of the resource
- * @peers_list: list of the peers of the resource
- */
-struct ipa_rm_resource {
- enum ipa_rm_resource_name name;
- enum ipa3_rm_resource_type type;
- enum ipa_voltage_level floor_voltage;
- u32 max_bw;
- u32 needed_bw;
- enum ipa3_rm_resource_state state;
- struct ipa3_rm_peers_list *peers_list;
-};
-
-/**
- * struct ipa3_rm_resource_cons - IPA RM consumer
- * @resource: resource
- * @usage_count: number of producers in GRANTED / REQUESTED state
- * using this consumer
- * @request_consumer_in_progress: when set, the consumer is during its request
- * phase
- * @request_resource: function which should be called to request resource
- * from resource manager
- * @release_resource: function which should be called to release resource
- * from resource manager
- * Add new fields after @resource only.
- */
-struct ipa3_rm_resource_cons {
- struct ipa_rm_resource resource;
- int usage_count;
- struct completion request_consumer_in_progress;
- int (*request_resource)(void);
- int (*release_resource)(void);
-};
-
-/**
- * struct ipa3_rm_resource_prod - IPA RM producer
- * @resource: resource
- * @event_listeners: clients registered with this producer
- * for notifications in resource state
- * list Add new fields after @resource only.
- */
-struct ipa3_rm_resource_prod {
- struct ipa_rm_resource resource;
- struct list_head event_listeners;
- int pending_request;
- int pending_release;
-};
-
-int ipa3_rm_resource_create(
- struct ipa_rm_create_params *create_params,
- struct ipa_rm_resource **resource);
-
-int ipa3_rm_resource_delete(struct ipa_rm_resource *resource);
-
-int ipa3_rm_resource_producer_register(struct ipa3_rm_resource_prod *producer,
- struct ipa_rm_register_params *reg_params,
- bool explicit);
-
-int ipa3_rm_resource_producer_deregister(struct ipa3_rm_resource_prod *producer,
- struct ipa_rm_register_params *reg_params);
-
-int ipa3_rm_resource_add_dependency(struct ipa_rm_resource *resource,
- struct ipa_rm_resource *depends_on);
-
-int ipa3_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
- struct ipa_rm_resource *depends_on);
-
-int ipa3_rm_resource_producer_request(struct ipa3_rm_resource_prod *producer);
-
-int ipa3_rm_resource_producer_release(struct ipa3_rm_resource_prod *producer);
-
-int ipa3_rm_resource_consumer_request(struct ipa3_rm_resource_cons *consumer,
- u32 needed_bw,
- bool inc_usage_count,
- bool wake_client);
-
-int ipa3_rm_resource_consumer_release(struct ipa3_rm_resource_cons *consumer,
- u32 needed_bw,
- bool dec_usage_count);
-
-int ipa3_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
- struct ipa_rm_perf_profile *profile);
-
-void ipa3_rm_resource_consumer_handle_cb(struct ipa3_rm_resource_cons *consumer,
- enum ipa_rm_event event);
-
-void ipa3_rm_resource_producer_notify_clients(
- struct ipa3_rm_resource_prod *producer,
- enum ipa_rm_event event,
- bool notify_registered_only);
-
-int ipa3_rm_resource_producer_print_stat(
- struct ipa_rm_resource *resource,
- char *buf,
- int size);
-
-int ipa3_rm_resource_consumer_request_work(struct ipa3_rm_resource_cons
- *consumer,
- enum ipa3_rm_resource_state prev_state,
- u32 needed_bw,
- bool notify_completion);
-
-int ipa3_rm_resource_consumer_release_work(
- struct ipa3_rm_resource_cons *consumer,
- enum ipa3_rm_resource_state prev_state,
- bool notify_completion);
-
-#endif /* _IPA_RM_RESOURCE_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 1e03e6497ad6..7bc11a339633 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -383,7 +383,7 @@ int ipa3_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
int result = 0;
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 1c4f812bc40f..5ea39b732ee6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -20,6 +20,7 @@
#include <linux/elf.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
+#include "../ipa_rm_i.h"
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
@@ -608,7 +609,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
/* before gating IPA clocks do TAG process */
ipa3_ctx->tag_process_before_gating = true;
- IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(resource));
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
return 0;
}
@@ -631,7 +632,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
unsigned long flags;
- struct ipa3_active_client_logging_info log_info;
+ struct ipa_active_client_logging_info log_info;
if (ipa3_active_clients_trylock(&flags) == 0)
return -EPERM;
@@ -671,7 +672,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
if (res == 0) {
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
- ipa3_rm_resource_str(resource));
+ ipa_rm_resource_str(resource));
ipa3_active_clients_log_dec(&log_info, true);
ipa3_ctx->ipa3_active_clients.cnt--;
IPADBG("active clients = %d\n",
@@ -4519,24 +4520,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
- api_ctrl->ipa_rm_create_resource = ipa3_rm_create_resource;
- api_ctrl->ipa_rm_delete_resource = ipa3_rm_delete_resource;
- api_ctrl->ipa_rm_register = ipa3_rm_register;
- api_ctrl->ipa_rm_deregister = ipa3_rm_deregister;
- api_ctrl->ipa_rm_set_perf_profile = ipa3_rm_set_perf_profile;
- api_ctrl->ipa_rm_add_dependency = ipa3_rm_add_dependency;
- api_ctrl->ipa_rm_delete_dependency = ipa3_rm_delete_dependency;
- api_ctrl->ipa_rm_request_resource = ipa3_rm_request_resource;
- api_ctrl->ipa_rm_release_resource = ipa3_rm_release_resource;
- api_ctrl->ipa_rm_notify_completion = ipa3_rm_notify_completion;
- api_ctrl->ipa_rm_inactivity_timer_init =
- ipa3_rm_inactivity_timer_init;
- api_ctrl->ipa_rm_inactivity_timer_destroy =
- ipa3_rm_inactivity_timer_destroy;
- api_ctrl->ipa_rm_inactivity_timer_request_resource =
- ipa3_rm_inactivity_timer_request_resource;
- api_ctrl->ipa_rm_inactivity_timer_release_resource =
- ipa3_rm_inactivity_timer_release_resource;
api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
@@ -4577,13 +4560,22 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
api_ctrl->ipa_disable_apps_wan_cons_deaggr =
ipa3_disable_apps_wan_cons_deaggr;
- api_ctrl->ipa_rm_add_dependency_sync = ipa3_rm_add_dependency_sync;
api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
+ api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
+ api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
+ api_ctrl->ipa_inc_client_enable_clks_no_block =
+ ipa3_inc_client_enable_clks_no_block;
+ api_ctrl->ipa_suspend_resource_no_block =
+ ipa3_suspend_resource_no_block;
+ api_ctrl->ipa_resume_resource = ipa3_resume_resource;
+ api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
+ api_ctrl->ipa_set_required_perf_profile =
+ ipa3_set_required_perf_profile;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 7e7848dea52c..8a34f006d3ee 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1086,7 +1086,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
send:
/* IPA_RM checking start */
- ret = ipa3_rm_inactivity_timer_request_resource(
+ ret = ipa_rm_inactivity_timer_request_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret == -EINPROGRESS) {
netif_stop_queue(dev);
@@ -1119,7 +1119,7 @@ send:
dev->stats.tx_bytes += skb->len;
ret = NETDEV_TX_OK;
out:
- ipa3_rm_inactivity_timer_release_resource(
+ ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
return ret;
}
@@ -1171,7 +1171,7 @@ static void apps_ipa_tx_complete_notify(void *priv,
}
__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
dev_kfree_skb_any(skb);
- ipa3_rm_inactivity_timer_release_resource(
+ ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
}
@@ -1691,9 +1691,9 @@ static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
{
int ret = 0;
- ret = ipa3_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
- IPAWANERR("%s: ipa3_rm_request_resource failed %d\n", __func__,
+ IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
ret);
return;
}
@@ -1710,9 +1710,9 @@ static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
{
int ret = 0;
- ret = ipa3_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
- IPAWANERR("%s: ipa3_rm_release_resource failed %d\n", __func__,
+ IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
ret);
return;
}
@@ -1756,44 +1756,44 @@ static int ipa3_q6_initialize_rm(void)
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_PROD;
create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
- result = ipa3_rm_create_resource(&create_params);
+ result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err1;
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_CONS;
create_params.release_resource = &ipa3_q6_rm_release_resource;
create_params.request_resource = &ipa3_q6_rm_request_resource;
- result = ipa3_rm_create_resource(&create_params);
+ result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err2;
/* add dependency*/
- result = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = 100;
- result = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
&profile);
if (result)
goto set_perf_err;
- result = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
&profile);
if (result)
goto set_perf_err;
return result;
set_perf_err:
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
add_dpnd_err:
- result = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, result);
create_rsrc_err2:
- result = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, result);
@@ -1806,17 +1806,17 @@ void ipa3_q6_deinitialize_rm(void)
{
int ret;
- ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
ret);
- ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, ret);
- ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
@@ -2056,13 +2056,13 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
ipa_rm_params.reg_params.user_data = dev;
ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
- ret = ipa3_rm_create_resource(&ipa_rm_params);
+ ret = ipa_rm_create_resource(&ipa_rm_params);
if (ret) {
pr_err("%s: unable to create resourse %d in IPA RM\n",
__func__, IPA_RM_RESOURCE_WWAN_0_PROD);
goto create_rsrc_err;
}
- ret = ipa3_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_INACTIVITY_TIMER);
if (ret) {
pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
@@ -2070,14 +2070,14 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
goto timer_init_err;
}
/* add dependency */
- ret = ipa3_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
- ret = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
&profile);
if (ret)
goto set_perf_err;
@@ -2108,20 +2108,20 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
config_err:
unregister_netdev(dev);
set_perf_err:
- ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
ret);
add_dpnd_err:
- ret = ipa3_rm_inactivity_timer_destroy(
+ ret = ipa_rm_inactivity_timer_destroy(
IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
if (ret)
- IPAWANERR("Error ipa3_rm_inactivity_timer_destroy %d, ret=%d\n",
+ IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
timer_init_err:
- ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
@@ -2155,18 +2155,18 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
unregister_netdev(IPA_NETDEV());
- ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
ret);
- ret = ipa3_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR(
- "Error ipa3_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+ "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
- ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
@@ -2229,7 +2229,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev)
/* Make sure that there is no Tx operation ongoing */
netif_tx_lock_bh(netdev);
- ipa3_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
netif_tx_unlock_bh(netdev);
IPAWANDBG("Exit\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index b629ec740b1e..9aa0ff3d4445 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -120,9 +120,9 @@ int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
{
TETH_DBG_FUNC_ENTRY();
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
TETH_DBG_FUNC_EXIT();
@@ -148,10 +148,10 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
* in order to make sure the IPA clocks are up before we continue
* and notify the USB driver it may continue.
*/
- res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+ res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0) {
- TETH_ERR("ipa3_rm_add_dependency() failed.\n");
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
@@ -160,12 +160,12 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
* bridge is connected), the clocks are already up so the call doesn't
* need to block.
*/
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
if (res < 0 && res != -EINPROGRESS) {
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
- TETH_ERR("ipa3_rm_add_dependency() failed.\n");
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
diff --git a/drivers/platform/msm/mhi_dev/Makefile b/drivers/platform/msm/mhi_dev/Makefile
new file mode 100644
index 000000000000..c1969e20426d
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/Makefile
@@ -0,0 +1,6 @@
+# Makefile for MHI driver
+obj-y += mhi_mmio.o
+obj-y += mhi.o
+obj-y += mhi_ring.o
+obj-y += mhi_uci.o
+obj-y += mhi_sm.o
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
new file mode 100644
index 000000000000..142263be23aa
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -0,0 +1,1952 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ep_pcie.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+/* Wait time on the device for Host to set M0 state */
+#define MHI_M0_WAIT_MIN_USLEEP 20000000
+#define MHI_M0_WAIT_MAX_USLEEP 25000000
+#define MHI_DEV_M0_MAX_CNT 30
+/* Wait time before suspend/resume is complete */
+#define MHI_SUSPEND_WAIT_MIN 3100
+#define MHI_SUSPEND_WAIT_MAX 3200
+#define MHI_SUSPEND_WAIT_TIMEOUT 500
+#define MHI_MASK_CH_EV_LEN 32
+#define MHI_RING_CMD_ID 0
+#define MHI_RING_PRIMARY_EVT_ID 1
+#define MHI_1K_SIZE 0x1000
+/* Updated Specification for event start is NER - 2 and end - NER -1 */
+#define MHI_HW_ACC_EVT_RING_START 2
+#define MHI_HW_ACC_EVT_RING_END 1
+
+#define MHI_HOST_REGION_NUM 2
+
+#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
+#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
+
+#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32))
+#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF)
+#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF)
+
+#define MHI_IPC_LOG_PAGES (100)
+enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
+enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
+void *mhi_ipc_log;
+
+static struct mhi_dev *mhi_ctx;
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data);
+static void mhi_ring_init_cb(void *user_data);
+
+void mhi_dev_read_from_host(struct mhi_addr *host, dma_addr_t dev, size_t size)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ host_addr_pa = ((u64) host->host_pa) | bit_40;
+
+ mhi_log(MHI_MSG_ERROR, "device 0x%x <<-- host 0x%llx, size %d\n",
+ dev, host_addr_pa, size);
+
+ rc = ipa_dma_sync_memcpy((u64) dev, host_addr_pa, (int) size);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_read_from_host);
+
+void mhi_dev_write_to_host(struct mhi_addr *host, void *dev, size_t size,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi) {
+ pr_err("invalid MHI ctx\n");
+ return;
+ }
+
+ host_addr_pa = ((u64) host->host_pa) | bit_40;
+ /* Copy the device content to a local device physical address */
+ memcpy(mhi->dma_cache, dev, size);
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx --> host 0x%llx, size %d\n",
+ (uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) size);
+
+ rc = ipa_dma_sync_memcpy(host_addr_pa, (u64) mhi->cache_dma_handle,
+ (int) size);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_write_to_host);
+
+int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi) {
+ pr_err("Invalid mhi device\n");
+ return -EINVAL;
+ }
+
+ if (!dev) {
+ pr_err("Invalid virt device\n");
+ return -EINVAL;
+ }
+
+ if (!host_pa) {
+ pr_err("Invalid host pa device\n");
+ return -EINVAL;
+ }
+
+ host_addr_pa = host_pa | bit_40;
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx <-- host 0x%llx, size %d\n",
+ (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
+ rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
+ host_addr_pa, (int) len);
+ if (rc) {
+ pr_err("error while reading from host:%d\n", rc);
+ return rc;
+ }
+
+ memcpy(dev, mhi->read_handle, len);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_host_to_device);
+
+int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi || !dev || !host_addr) {
+ pr_err("%sInvalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ host_addr_pa = host_addr | bit_40;
+ memcpy(mhi->write_handle, dev, len);
+
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx ---> host 0x%llx, size %d\n",
+ (uint64_t) mhi->write_dma_handle, host_addr_pa, (int) len);
+ rc = ipa_dma_sync_memcpy(host_addr_pa,
+ (u64) mhi->write_dma_handle,
+ (int) len);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_device_to_host);
+
+int mhi_dev_is_list_empty(void)
+{
+
+ if (list_empty(&mhi_ctx->event_ring_list) &&
+ list_empty(&mhi_ctx->process_ring_list))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(mhi_dev_is_list_empty);
+
+static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
+ struct ep_pcie_db_config *erdb_cfg)
+{
+ switch (mhi->cfg.event_rings) {
+ case NUM_CHANNELS:
+ erdb_cfg->base = HW_CHANNEL_BASE;
+ erdb_cfg->end = HW_CHANNEL_END;
+ break;
+ default:
+ erdb_cfg->base = mhi->cfg.event_rings -
+ MHI_HW_ACC_EVT_RING_START;
+ erdb_cfg->end = mhi->cfg.event_rings -
+ MHI_HW_ACC_EVT_RING_END;
+ break;
+ }
+}
+
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ep_pcie_db_config chdb_cfg, erdb_cfg;
+
+ if (!mhi) {
+ pr_err("Invalid MHI context\n");
+ return -EINVAL;
+ }
+
+ /* Configure Doorbell routing */
+ chdb_cfg.base = HW_CHANNEL_BASE;
+ chdb_cfg.end = HW_CHANNEL_END;
+ chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
+
+ mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+
+ mhi_log(MHI_MSG_ERROR,
+ "Event rings 0x%x => er_base 0x%x, er_end %d\n",
+ mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+ erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+ ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_pcie_config_db_routing);
+
+static int mhi_hwc_init(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ep_pcie_msi_config cfg;
+ struct ipa_mhi_init_params ipa_init_params;
+ struct ep_pcie_db_config erdb_cfg;
+
+ /* Call IPA HW_ACC Init with MSI Address and db routing info */
+ rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
+ if (rc) {
+ pr_err("Error retrieving pcie msi logic\n");
+ return rc;
+ }
+
+ rc = mhi_pcie_config_db_routing(mhi);
+ if (rc) {
+ pr_err("Error configuring DB routing\n");
+ return rc;
+ }
+
+ mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+ mhi_log(MHI_MSG_ERROR,
+ "Event rings 0x%x => er_base 0x%x, er_end %d\n",
+ mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+
+ erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+ memset(&ipa_init_params, 0, sizeof(ipa_init_params));
+ ipa_init_params.msi.addr_hi = cfg.upper;
+ ipa_init_params.msi.addr_low = cfg.lower;
+ ipa_init_params.msi.data = cfg.data;
+ ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
+ ipa_init_params.first_er_idx = erdb_cfg.base;
+ ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
+ ipa_init_params.mmio_addr = ((uint32_t) mhi_ctx->mmio_base_pa_addr);
+ ipa_init_params.assert_bit40 = true;
+
+ mhi_log(MHI_MSG_ERROR,
+ "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
+ ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
+ ipa_init_params.notify = mhi_hwc_cb;
+ ipa_init_params.priv = mhi;
+
+ rc = ipa_mhi_init(&ipa_init_params);
+ if (rc) {
+ pr_err("Error initializing IPA\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int mhi_hwc_start(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ipa_mhi_start_params ipa_start_params;
+
+ memset(&ipa_start_params, 0, sizeof(ipa_start_params));
+
+ ipa_start_params.channel_context_array_addr =
+ mhi->ch_ctx_shadow.host_pa;
+ ipa_start_params.event_context_array_addr =
+ mhi->ev_ctx_shadow.host_pa;
+
+ rc = ipa_mhi_start(&ipa_start_params);
+ if (rc)
+ pr_err("Error starting IPA (rc = 0x%X)\n", rc);
+
+ return rc;
+}
+
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data)
+{
+ int rc = 0;
+
+ switch (event) {
+ case IPA_MHI_EVENT_READY:
+ mhi_log(MHI_MSG_ERROR,
+ "HW Channel uC is ready event=0x%X\n", event);
+ rc = mhi_hwc_start(mhi_ctx);
+ if (rc) {
+ pr_err("hwc_init start failed with %d\n", rc);
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
+ if (rc) {
+ pr_err("Failed to enable channel db\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
+ if (rc) {
+ pr_err("Failed to enable control interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
+
+ if (rc) {
+ pr_err("Failed to enable command db\n");
+ return;
+ }
+ break;
+ case IPA_MHI_EVENT_DATA_AVAILABLE:
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+ if (rc) {
+ pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
+ return;
+ }
+ break;
+ default:
+ pr_err("HW Channel uC unknown event 0x%X\n", event);
+ break;
+ }
+}
+
+static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
+ enum mhi_dev_ring_element_type_id type)
+{
+ int rc = 0;
+ struct ipa_mhi_connect_params connect_params;
+
+ memset(&connect_params, 0, sizeof(connect_params));
+
+ switch (type) {
+ case MHI_DEV_RING_EL_STOP:
+ rc = ipa_mhi_disconnect_pipe(
+ mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+ if (rc)
+ pr_err("Stopping HW Channel%d failed 0x%X\n",
+ chid, rc);
+ break;
+ case MHI_DEV_RING_EL_START:
+ connect_params.channel_id = chid;
+ connect_params.sys.skip_ep_cfg = true;
+ if ((chid % 2) == 0x0)
+ connect_params.sys.client = IPA_CLIENT_MHI_PROD;
+ else
+ connect_params.sys.client = IPA_CLIENT_MHI_CONS;
+
+ rc = ipa_mhi_connect_pipe(&connect_params,
+ &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+ if (rc)
+ pr_err("HW Channel%d start failed 0x%X\n",
+ chid, rc);
+ break;
+ case MHI_DEV_RING_EL_INVALID:
+ default:
+ pr_err("Invalid Ring Element type = 0x%X\n", type);
+ break;
+ }
+
+ return rc;
+}
+
+static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
+ uint32_t *int_value)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
+ if (rc) {
+ pr_err("Failed to read A7 status\n");
+ return;
+ }
+
+ mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
+ if (rc) {
+ pr_err("Failed to clear A7 status\n");
+ return;
+ }
+}
+
+static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
+{
+ struct mhi_addr addr;
+
+ addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ addr.size = sizeof(struct mhi_dev_ch_ctx);
+ /* Fetch the channel ctx (*dst, *src, size) */
+ mhi_dev_read_from_host(&addr, mhi->ch_ctx_cache_dma_handle +
+ (sizeof(struct mhi_dev_ch_ctx) * ch_id),
+ sizeof(struct mhi_dev_ch_ctx));
+}
+
+int mhi_dev_syserr(struct mhi_dev *mhi)
+{
+
+ if (!mhi) {
+ pr_err("%s: Invalid MHI ctx\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_dev_dump_mmio(mhi);
+ pr_err("MHI dev sys error\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_syserr);
+
+int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
+ union mhi_dev_ring_element_type *el)
+{
+ int rc = 0;
+ uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
+ struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
+ union mhi_dev_ring_ctx *ctx;
+ struct ep_pcie_msi_config cfg;
+ struct mhi_addr msi_addr;
+ uint32_t msi = 0;
+ struct mhi_addr host_rp_addr;
+
+ rc = ep_pcie_get_msi_config(mhi->phandle,
+ &cfg);
+ if (rc) {
+ pr_err("Error retrieving pcie msi logic\n");
+ return rc;
+ }
+
+ if (evnt_ring_idx > mhi->cfg.event_rings) {
+ pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
+ return -EINVAL;
+ }
+
+ if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
+ ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
+ rc = mhi_ring_start(ring, ctx, mhi);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error starting event ring %d\n", evnt_ring);
+ return rc;
+ }
+ }
+
+ mutex_lock(&mhi->mhi_event_lock);
+ /* add the ring element */
+ mhi_dev_add_element(ring, el);
+
+ ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
+ sizeof(union mhi_dev_ring_element_type)) +
+ ring->ring_ctx->generic.rbase;
+
+ mhi_log(MHI_MSG_ERROR, "ev.rp = %llx for %lld\n",
+ ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
+
+ host_rp_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ev_ctx) *
+ evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
+ (uint32_t) ring->ring_ctx;
+ mhi_dev_write_to_host(&host_rp_addr, &ring->ring_ctx_shadow->ev.rp,
+ sizeof(uint64_t),
+ mhi);
+
+ /*
+ * rp update in host memory should be flushed
+ * before sending a MSI to the host
+ */
+ wmb();
+
+ mutex_unlock(&mhi->mhi_event_lock);
+ mhi_log(MHI_MSG_ERROR, "event sent:\n");
+ mhi_log(MHI_MSG_ERROR, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
+ mhi_log(MHI_MSG_ERROR, "evnt len : 0x%x\n", el->evt_tr_comp.len);
+ mhi_log(MHI_MSG_ERROR, "evnt code :0x%x\n", el->evt_tr_comp.code);
+ mhi_log(MHI_MSG_ERROR, "evnt type :0x%x\n", el->evt_tr_comp.type);
+ mhi_log(MHI_MSG_ERROR, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
+
+ msi_addr.host_pa = (uint64_t)((uint64_t)cfg.upper << 32) |
+ (uint64_t)cfg.lower;
+ msi = cfg.data + mhi_ctx->mhi_ep_msi_num;
+ mhi_log(MHI_MSG_ERROR, "Sending MSI %d to 0x%llx as data = 0x%x\n",
+ mhi_ctx->mhi_ep_msi_num, msi_addr.host_pa, msi);
+ mhi_dev_write_to_host(&msi_addr, &msi, 4, mhi);
+
+ return rc;
+}
+
+static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
+ uint32_t rd_ofst, uint32_t len,
+ enum mhi_dev_cmd_completion_code code)
+{
+ int rc = 0;
+ union mhi_dev_ring_element_type compl_event;
+ struct mhi_dev *mhi = ch->ring->mhi_dev;
+
+ compl_event.evt_tr_comp.chid = ch->ch_id;
+ compl_event.evt_tr_comp.type =
+ MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
+ compl_event.evt_tr_comp.len = len;
+ compl_event.evt_tr_comp.code = code;
+ compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
+ rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
+
+ rc = mhi_dev_send_event(mhi,
+ mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
+
+ return rc;
+}
+
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+ enum mhi_dev_state state)
+{
+ union mhi_dev_ring_element_type event;
+ int rc = 0;
+
+ event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
+ event.evt_state_change.mhistate = state;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("Sending state change event failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_state_change_event);
+
+int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
+{
+ union mhi_dev_ring_element_type event;
+ int rc = 0;
+
+ event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
+ event.evt_ee_state.execenv = exec_env;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("Sending EE change event failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_ee_event);
+
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
+{
+ int rc = 0;
+
+ /*
+ * Expected usuage is when there is HW ACC traffic IPA uC notifes
+ * Q6 -> IPA A7 -> MHI core -> MHI SM
+ */
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+ if (rc) {
+ pr_err("error sending SM event\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
+
+static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ union mhi_dev_ring_element_type event;
+
+ /* send the command completion event to the host */
+ event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
+ + (mhi->ring[MHI_RING_CMD_ID].rd_offset *
+ (sizeof(union mhi_dev_ring_element_type)));
+ mhi_log(MHI_MSG_ERROR, "evt cmd comp ptr :%d\n",
+ (uint32_t) event.evt_cmd_comp.ptr);
+ event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+ event.evt_cmd_comp.code = MHI_CMD_COMPL_CODE_SUCCESS;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc)
+ pr_err("channel start command faied\n");
+
+ return rc;
+}
+
+static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct mhi_addr host_addr;
+
+ if (ring->rd_offset != ring->wr_offset &&
+ mhi->ch_ctx_cache[ch_id].ch_type ==
+ MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
+ mhi_log(MHI_MSG_INFO, "Pending transaction to be processed\n");
+ return 0;
+ } else if (mhi->ch_ctx_cache[ch_id].ch_type ==
+ MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
+ mhi->ch[ch_id].wr_request_active) {
+ return 0;
+ }
+
+ /* set the channel to stop */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr, &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ /* send the completion event to the host */
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+
+ return rc;
+}
+
+static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
+ union mhi_dev_ring_element_type *el, void *ctx)
+{
+ int rc = 0;
+ uint32_t ch_id = 0;
+ union mhi_dev_ring_element_type event;
+ struct mhi_addr host_addr;
+
+ mhi_log(MHI_MSG_ERROR, "for channel:%d and cmd:%d\n",
+ ch_id, el->generic.type);
+ ch_id = el->generic.chid;
+
+ switch (el->generic.type) {
+ case MHI_DEV_RING_EL_START:
+ mhi_log(MHI_MSG_ERROR, "recived start cmd for channel %d\n",
+ ch_id);
+ if (ch_id >= (HW_CHANNEL_BASE)) {
+ rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+ if (rc) {
+ pr_err("Error with HW channel cmd :%d\n", rc);
+ return;
+ }
+ goto send_start_completion_event;
+ }
+
+ /* fetch the channel context from host */
+ mhi_dev_fetch_ch_ctx(mhi, ch_id);
+
+ /* Initialize and configure the corresponding channel ring */
+ rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
+ (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
+ mhi);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "start ring failed for ch %d\n", ch_id);
+ return;
+ }
+
+ mhi->ring[mhi->ch_ring_start + ch_id].state =
+ RING_STATE_PENDING;
+
+ /* set the channel to running */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+ mhi->ch[ch_id].ch_id = ch_id;
+ mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
+ mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
+
+ /* enable DB for event ring */
+ rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
+ if (rc) {
+ pr_err("Failed to enable channel db\n");
+ return;
+ }
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+send_start_completion_event:
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+
+ break;
+ case MHI_DEV_RING_EL_STOP:
+ if (ch_id >= HW_CHANNEL_BASE) {
+ rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "send channel stop cmd event failed\n");
+ return;
+ }
+
+ /* send the completion event to the host */
+ event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
+ (mhi->ring[MHI_RING_CMD_ID].rd_offset *
+ (sizeof(union mhi_dev_ring_element_type)));
+ event.evt_cmd_comp.type =
+ MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+ if (rc == 0)
+ event.evt_cmd_comp.code =
+ MHI_CMD_COMPL_CODE_SUCCESS;
+ else
+ event.evt_cmd_comp.code =
+ MHI_CMD_COMPL_CODE_UNDEFINED;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("stop event send failed\n");
+ return;
+ }
+ } else {
+ /*
+ * Check if there are any pending transactions for the
+ * ring associated with the channel. If no, proceed to
+ * write disable the channel state else send stop
+ * channel command to check if one can suspend the
+ * command.
+ */
+ mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
+ rc = mhi_dev_process_stop_cmd(
+ &mhi->ring[mhi->ch_ring_start + ch_id],
+ ch_id, mhi);
+ if (rc) {
+ pr_err("stop event send failed\n");
+ return;
+ }
+ }
+ break;
+ case MHI_DEV_RING_EL_RESET:
+ /* hard stop and set the channel to stop */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ /* send the completion event to the host */
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+ break;
+ default:
+ pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
+ break;
+ }
+}
+
+static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
+ union mhi_dev_ring_element_type *el, void *ctx)
+{
+ struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_client_cb_reason reason;
+
+ if (ring->id < mhi->ch_ring_start) {
+ mhi_log(MHI_MSG_ERROR,
+ "invalid channel ring id (%d), should be < %d\n",
+ ring->id, mhi->ch_ring_start);
+ return;
+ }
+
+ ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+ reason.ch_id = ch->ch_id;
+ reason.reason = MHI_DEV_TRE_AVAILABLE;
+
+ /* Invoke a callback to let the client know its data is ready.
+ * Copy this event to the clients context so that it can be
+ * sent out once the client has fetch the data. Update the rp
+ * before sending the data as part of the event completion
+ */
+ if (ch->active_client && ch->active_client->event_trigger != NULL)
+ ch->active_client->event_trigger(&reason);
+}
+
+static void mhi_dev_process_ring_pending(struct work_struct *work)
+{
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, pending_work);
+ struct list_head *cp, *q;
+ struct mhi_dev_ring *ring;
+ struct mhi_dev_channel *ch;
+ int rc = 0;
+
+ mutex_lock(&mhi_ctx->mhi_lock);
+ rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
+ goto exit;
+ }
+
+ list_for_each_safe(cp, q, &mhi->process_ring_list) {
+ ring = list_entry(cp, struct mhi_dev_ring, list);
+ list_del(cp);
+ mhi_log(MHI_MSG_ERROR, "processing ring %d\n", ring->id);
+ rc = mhi_dev_process_ring(ring);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error processing ring %d\n", ring->id);
+ goto exit;
+ }
+
+ if (ring->id < mhi->ch_ring_start) {
+ mhi_log(MHI_MSG_ERROR,
+ "ring (%d) is not a channel ring\n", ring->id);
+ goto exit;
+ }
+
+ ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+ rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error enabling chdb interrupt for %d\n", ch->ch_id);
+ goto exit;
+ }
+ }
+
+exit:
+ mutex_unlock(&mhi_ctx->mhi_lock);
+}
+
+static int mhi_dev_get_event_notify(enum mhi_dev_state state,
+ enum mhi_dev_event *event)
+{
+ int rc = 0;
+
+ switch (state) {
+ case MHI_DEV_M0_STATE:
+ *event = MHI_DEV_EVENT_M0_STATE;
+ break;
+ case MHI_DEV_M1_STATE:
+ *event = MHI_DEV_EVENT_M1_STATE;
+ break;
+ case MHI_DEV_M2_STATE:
+ *event = MHI_DEV_EVENT_M2_STATE;
+ break;
+ case MHI_DEV_M3_STATE:
+ *event = MHI_DEV_EVENT_M3_STATE;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
+ uint32_t chintr_value, uint32_t ch_num)
+{
+ struct mhi_dev_ring *ring;
+ int rc = 0;
+
+ for (; chintr_value; ch_num++, chintr_value >>= 1) {
+ if (chintr_value & 1) {
+ ring = &mhi->ring[ch_num + mhi->ch_ring_start];
+ if (ring->state == RING_STATE_UINT) {
+ pr_err("Channel not opened for %d\n", ch_num);
+ break;
+ }
+ mhi_ring_set_state(ring, RING_STATE_PENDING);
+ list_add(&ring->list, &mhi->process_ring_list);
+ rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
+ if (rc) {
+ pr_err("Error disabling chdb\n");
+ return;
+ }
+ queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+ }
+ }
+}
+
+static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
+{
+ int i, rc = 0;
+ uint32_t chintr_value = 0, ch_num = 0;
+
+ rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
+ if (rc) {
+ pr_err("Read channel db\n");
+ return;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ ch_num = i * MHI_MASK_CH_EV_LEN;
+ chintr_value = mhi->chdb[i].status;
+ if (chintr_value) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing id: %d, ch interrupt 0x%x\n",
+ i, chintr_value);
+ mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
+ rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
+ mhi->chdb[i].status);
+ if (rc) {
+ pr_err("Error writing interrupt clear for A7\n");
+ return;
+ }
+ }
+ }
+}
+
+static void mhi_dev_scheduler(struct work_struct *work)
+{
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, chdb_ctrl_work);
+ int rc = 0;
+ uint32_t int_value = 0;
+ struct mhi_dev_ring *ring;
+ enum mhi_dev_state state;
+ enum mhi_dev_event event = 0;
+
+ mutex_lock(&mhi_ctx->mhi_lock);
+ /* Check for interrupts */
+ mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
+
+ if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing ctrl interrupt with %d\n", int_value);
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+
+ rc = mhi_dev_get_event_notify(state, &event);
+ if (rc) {
+ pr_err("unsupported state :%d\n", state);
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+
+ rc = mhi_dev_notify_sm_event(event);
+ if (rc) {
+ pr_err("error sending SM event\n");
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+ }
+
+ if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing cmd db interrupt with %d\n", int_value);
+ ring = &mhi->ring[MHI_RING_CMD_ID];
+ ring->state = RING_STATE_PENDING;
+ queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+ }
+
+ /* get the specific channel interrupts */
+ mhi_dev_check_channel_interrupt(mhi);
+
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ ep_pcie_mask_irq_event(mhi->phandle,
+ EP_PCIE_INT_EVT_MHI_A7, true);
+}
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
+{
+ schedule_work(&mhi->chdb_ctrl_work);
+ mhi_log(MHI_MSG_ERROR, "mhi irq triggered\n");
+}
+EXPORT_SYMBOL(mhi_dev_notify_a7_event);
+
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
+{
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
+
+static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+ uint64_t addr1 = 0;
+
+ pdev = mhi->pdev;
+
+ /* Get host memory region configuration */
+ mhi_dev_get_mhi_addr(mhi);
+
+ mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
+ mhi->host_addr.ctrl_base_msb);
+ mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb,
+ mhi->host_addr.data_base_msb);
+
+ addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
+ mhi->host_addr.ctrl_limit_msb);
+ mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
+ addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
+ mhi->host_addr.data_limit_msb);
+ mhi->data_base.size = addr1 - mhi->data_base.host_pa;
+
+ /* Get Channel, event and command context base pointer */
+ rc = mhi_dev_mmio_get_chc_base(mhi);
+ if (rc) {
+ pr_err("Fetching channel context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_get_erc_base(mhi);
+ if (rc) {
+ pr_err("Fetching event ring context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_get_crc_base(mhi);
+ if (rc) {
+ pr_err("Fetching command ring context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_update_ner(mhi);
+ if (rc) {
+ pr_err("Fetching NER failed\n");
+ return rc;
+ }
+
+ mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
+ mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
+ mhi->cfg.event_rings;
+ mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
+ mhi->cfg.channels;
+
+ mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_cmd_ctx),
+ &mhi->cmd_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi->cmd_ctx_cache) {
+ pr_err("no memory while allocating cmd ctx\n");
+ return -ENOMEM;
+ }
+ memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
+
+ mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_ev_ctx) *
+ mhi->cfg.event_rings,
+ &mhi->ev_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi->ev_ctx_cache)
+ return -ENOMEM;
+
+ mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_ch_ctx) *
+ mhi->cfg.channels,
+ &mhi->ch_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->ch_ctx_cache)
+ return -ENOMEM;
+
+ /* Cache the command and event context */
+ mhi_dev_read_from_host(&mhi->cmd_ctx_shadow,
+ mhi->cmd_ctx_cache_dma_handle,
+ mhi->cmd_ctx_shadow.size);
+
+ mhi_dev_read_from_host(&mhi->ev_ctx_shadow,
+ mhi->ev_ctx_cache_dma_handle,
+ mhi->ev_ctx_shadow.size);
+
+ mhi_log(MHI_MSG_ERROR,
+ "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi->cmd_ctx_cache->rbase,
+ mhi->cmd_ctx_cache->rp,
+ mhi->cmd_ctx_cache->wp);
+ mhi_log(MHI_MSG_ERROR,
+ "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi_ctx->ev_ctx_cache->rbase,
+ mhi->ev_ctx_cache->rp,
+ mhi->ev_ctx_cache->wp);
+
+ rc = mhi_ring_start(&mhi->ring[0],
+ (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
+ if (rc) {
+ pr_err("error in ring start\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+int mhi_dev_suspend(struct mhi_dev *mhi)
+{
+ int ch_id = 0, rc = 0;
+ struct mhi_addr host_addr;
+
+ mutex_lock(&mhi_ctx->mhi_write_test);
+ atomic_set(&mhi->is_suspended, 1);
+
+ for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+ if (mhi->ch_ctx_cache[ch_id].ch_state !=
+ MHI_DEV_CH_STATE_RUNNING)
+ continue;
+
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ }
+
+ rc = ipa_dma_disable();
+ if (rc)
+ pr_err("Disable IPA failed\n");
+
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_suspend);
+
+int mhi_dev_resume(struct mhi_dev *mhi)
+{
+ int ch_id = 0, rc = 0;
+ struct mhi_addr host_addr;
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ pr_err("IPA enable failed\n");
+ return rc;
+ }
+
+ for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+ if (mhi->ch_ctx_cache[ch_id].ch_state !=
+ MHI_DEV_CH_STATE_SUSPENDED)
+ continue;
+
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+ }
+
+ atomic_set(&mhi->is_suspended, 0);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_resume);
+
+static int mhi_dev_ring_init(struct mhi_dev *dev)
+{
+ int i = 0;
+
+ mhi_log(MHI_MSG_INFO, "initializing all rings");
+ dev->cmd_ring_idx = 0;
+ dev->ev_ring_start = 1;
+ dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
+
+ /* Initialize CMD ring */
+ mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
+ RING_TYPE_CMD, dev->cmd_ring_idx);
+
+ mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
+ mhi_dev_process_cmd_ring);
+
+ /* Initialize Event ring */
+ for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
+ + dev->ev_ring_start); i++)
+ mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
+
+ /* Initialize CH */
+ for (i = dev->ch_ring_start; i < (dev->cfg.channels
+ + dev->ch_ring_start); i++) {
+ mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
+ mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
+ }
+
+
+ return 0;
+}
+
+int mhi_dev_open_channel(uint32_t chan_id,
+ struct mhi_dev_client **handle_client,
+ void (*mhi_dev_client_cb_reason)
+ (struct mhi_dev_client_cb_reason *cb))
+{
+ int rc = 0;
+ struct mhi_dev_channel *ch;
+ struct platform_device *pdev;
+
+ pdev = mhi_ctx->pdev;
+ ch = &mhi_ctx->ch[chan_id];
+
+ mutex_lock(&ch->ch_lock);
+
+ if (ch->active_client) {
+ mhi_log(MHI_MSG_ERROR,
+ "Channel (%d) already opened by client\n", chan_id);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* Initialize the channel, client and state information */
+ *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
+ if (!(*handle_client)) {
+ dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ ch->active_client = (*handle_client);
+ (*handle_client)->channel = ch;
+ (*handle_client)->event_trigger = mhi_dev_client_cb_reason;
+
+ if (ch->state == MHI_DEV_CH_UNINT) {
+ ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
+ ch->state = MHI_DEV_CH_PENDING_START;
+ } else if (ch->state == MHI_DEV_CH_CLOSED)
+ ch->state = MHI_DEV_CH_STARTED;
+ else if (ch->state == MHI_DEV_CH_STOPPED)
+ ch->state = MHI_DEV_CH_PENDING_START;
+
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_open_channel);
+
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
+{
+ struct mhi_dev_channel *ch;
+ int rc;
+
+ ch = handle->channel;
+
+ rc = ch->ring->rd_offset == ch->ring->wr_offset;
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_channel_isempty);
+
+int mhi_dev_close_channel(struct mhi_dev_client *handle)
+{
+ struct mhi_dev_channel *ch;
+ int rc = 0;
+
+ ch = handle->channel;
+
+ mutex_lock(&ch->ch_lock);
+ if (ch->state != MHI_DEV_CH_PENDING_START) {
+ if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
+ !mhi_dev_channel_isempty(handle)) {
+ mhi_log(MHI_MSG_ERROR,
+ "Trying to close an active channel (%d)\n",
+ ch->ch_id);
+ mutex_unlock(&ch->ch_lock);
+ rc = -EAGAIN;
+ goto exit;
+ } else if (ch->tre_loc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Trying to close channel (%d) when a TRE is active",
+ ch->ch_id);
+ mutex_unlock(&ch->ch_lock);
+ rc = -EAGAIN;
+ goto exit;
+ }
+ }
+
+ ch->state = MHI_DEV_CH_CLOSED;
+ ch->active_client = NULL;
+ kfree(handle);
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_close_channel);
+
+static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
+ struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
+ uint32_t *chain)
+{
+ uint32_t td_done = 0;
+
+ /*
+ * A full TRE worth of data was consumed.
+ * Check if we are at a TD boundary.
+ */
+ if (ch->tre_bytes_left == 0) {
+ if (el->tre.chain) {
+ if (el->tre.ieob)
+ mhi_dev_send_completion_event(ch,
+ ring->rd_offset, el->tre.len,
+ MHI_CMD_COMPL_CODE_EOB);
+ *chain = 1;
+ } else {
+ if (el->tre.ieot)
+ mhi_dev_send_completion_event(
+ ch, ring->rd_offset, el->tre.len,
+ MHI_CMD_COMPL_CODE_EOT);
+ td_done = 1;
+ *chain = 0;
+ }
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ ch->tre_bytes_left = 0;
+ ch->tre_loc = 0;
+ }
+
+ return td_done;
+}
+
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+ void *buf, uint32_t buf_size, uint32_t *chain)
+{
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_ring *ring;
+ union mhi_dev_ring_element_type *el;
+ uint32_t ch_id;
+ size_t bytes_to_read, addr_offset;
+ uint64_t read_from_loc;
+ ssize_t bytes_read = 0;
+ uint32_t write_to_loc = 0;
+ size_t usr_buf_remaining = buf_size;
+ int td_done = 0, rc = 0;
+
+ if (!handle_client) {
+ mhi_log(MHI_MSG_ERROR, "invalid client handle\n");
+ return -ENXIO;
+ }
+
+ ch = handle_client->channel;
+ ring = ch->ring;
+ ch_id = ch->ch_id;
+ *chain = 0;
+
+ mutex_lock(&ch->ch_lock);
+
+ do {
+ el = &ring->ring_cache[ring->rd_offset];
+ if (ch->tre_loc) {
+ bytes_to_read = min(usr_buf_remaining,
+ ch->tre_bytes_left);
+ *chain = 1;
+ mhi_log(MHI_MSG_ERROR,
+ "remaining buffered data size %d\n",
+ (int) ch->tre_bytes_left);
+ } else {
+ if (ring->rd_offset == ring->wr_offset) {
+ mhi_log(MHI_MSG_ERROR,
+ "nothing to read, returning\n");
+ bytes_read = 0;
+ goto exit;
+ }
+
+ if (ch->state == MHI_DEV_CH_STOPPED) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%d) already stopped\n",
+ ch_id);
+ bytes_read = -1;
+ goto exit;
+ }
+
+ ch->tre_loc = el->tre.data_buf_ptr;
+ ch->tre_size = el->tre.len;
+ ch->tre_bytes_left = ch->tre_size;
+
+ mhi_log(MHI_MSG_ERROR,
+ "user_buf_remaining %d, ch->tre_size %d\n",
+ usr_buf_remaining, ch->tre_size);
+ bytes_to_read = min(usr_buf_remaining, ch->tre_size);
+ }
+
+ addr_offset = ch->tre_size - ch->tre_bytes_left;
+ read_from_loc = ch->tre_loc + addr_offset;
+ write_to_loc = (uint32_t) buf + (buf_size - usr_buf_remaining);
+
+ mhi_log(MHI_MSG_ERROR, "reading %d bytes from chan %d\n",
+ bytes_to_read, ch_id);
+
+ mhi_transfer_host_to_device((void *) write_to_loc,
+ read_from_loc, bytes_to_read, mhi_ctx);
+
+ bytes_read += bytes_to_read;
+ ch->tre_bytes_left -= bytes_to_read;
+ usr_buf_remaining -= bytes_to_read;
+ td_done = mhi_dev_check_tre_bytes_left(ch, ring, el, chain);
+ } while (usr_buf_remaining && !td_done);
+
+ if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
+ ch->state = MHI_DEV_CH_STOPPED;
+ rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error while stopping channel (%d)\n", ch_id);
+ bytes_read = -1;
+ }
+ }
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return bytes_read;
+}
+EXPORT_SYMBOL(mhi_dev_read_channel);
+
+static void skip_to_next_td(struct mhi_dev_channel *ch)
+{
+ struct mhi_dev_ring *ring = ch->ring;
+ union mhi_dev_ring_element_type *el;
+ uint32_t td_boundary_reached = 0;
+
+ ch->skip_td = 1;
+ el = &ring->ring_cache[ring->rd_offset];
+ while (ring->rd_offset != ring->wr_offset) {
+ if (td_boundary_reached) {
+ ch->skip_td = 0;
+ break;
+ }
+ if (!el->tre.chain)
+ td_boundary_reached = 1;
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ el = &ring->ring_cache[ring->rd_offset];
+ }
+}
+
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client,
+ void *buf, size_t buf_size)
+{
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_ring *ring;
+ union mhi_dev_ring_element_type *el;
+ enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
+ int rc = 0;
+ uint64_t ch_id, skip_tres = 0, write_to_loc;
+ uint32_t read_from_loc;
+ size_t usr_buf_remaining = buf_size;
+ size_t usr_buf_offset = 0;
+ size_t bytes_to_write = 0;
+ size_t bytes_written = 0;
+ uint32_t tre_len = 0, suspend_wait_timeout = 0;
+
+ if (!handle_client) {
+ pr_err("%s: invalid client handle\n", __func__);
+ return -ENXIO;
+ }
+
+ if (!buf) {
+ pr_err("%s: invalid buffer to write data\n", __func__);
+ return -ENXIO;
+ }
+
+ mutex_lock(&mhi_ctx->mhi_write_test);
+
+ if (atomic_read(&mhi_ctx->is_suspended)) {
+ /*
+ * Expected usage is when there is a write
+ * to the MHI core -> notify SM.
+ */
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
+ if (rc) {
+ pr_err("error sending core wakeup event\n");
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+ return rc;
+ }
+ }
+
+ atomic_inc(&mhi_ctx->write_active);
+ while (atomic_read(&mhi_ctx->is_suspended) &&
+ suspend_wait_timeout < MHI_SUSPEND_WAIT_TIMEOUT) {
+ /* wait for the suspend to finish */
+ usleep_range(MHI_SUSPEND_WAIT_MIN, MHI_SUSPEND_WAIT_MAX);
+ suspend_wait_timeout++;
+ }
+
+ ch = handle_client->channel;
+ ch->wr_request_active = true;
+
+ ring = ch->ring;
+ ch_id = ch->ch_id;
+
+ mutex_lock(&ch->ch_lock);
+
+ if (ch->state == MHI_DEV_CH_STOPPED) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%lld) already stopped\n", ch_id);
+ bytes_written = -1;
+ goto exit;
+ }
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+ if (mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx) < 0)
+ bytes_written = -1;
+ goto exit;
+ }
+
+ if (ch->skip_td)
+ skip_to_next_td(ch);
+
+ do {
+ if (ring->rd_offset == ring->wr_offset) {
+ mhi_log(MHI_MSG_INFO, "No TREs available\n");
+ break;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = el->tre.len;
+
+ bytes_to_write = min(usr_buf_remaining, tre_len);
+ usr_buf_offset = buf_size - bytes_to_write;
+ read_from_loc = (uint32_t) buf + usr_buf_offset;
+ write_to_loc = el->tre.data_buf_ptr;
+ mhi_transfer_device_to_host(write_to_loc,
+ (void *) read_from_loc,
+ bytes_to_write, mhi_ctx);
+ bytes_written += bytes_to_write;
+ usr_buf_remaining -= bytes_to_write;
+
+ if (usr_buf_remaining) {
+ if (!el->tre.chain)
+ code = MHI_CMD_COMPL_CODE_OVERFLOW;
+ else if (el->tre.ieob)
+ code = MHI_CMD_COMPL_CODE_EOB;
+ } else {
+ if (el->tre.chain)
+ skip_tres = 1;
+ code = MHI_CMD_COMPL_CODE_EOT;
+ }
+
+ if (mhi_dev_send_completion_event(ch,
+ ring->rd_offset, bytes_to_write, code) < 0) {
+ mhi_log(MHI_MSG_ERROR,
+ "error sending completion event ch_id:%lld\n",
+ ch_id);
+ }
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP)
+ break;
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ } while (!skip_tres && usr_buf_remaining);
+
+ if (skip_tres)
+ skip_to_next_td(ch);
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+ rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%lld) stop failed\n", ch_id);
+ }
+ }
+exit:
+ mutex_unlock(&ch->ch_lock);
+ atomic_dec(&mhi_ctx->write_active);
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+ return bytes_written;
+}
+EXPORT_SYMBOL(mhi_dev_write_channel);
+
+static void mhi_dev_enable(struct work_struct *work)
+{
+ int rc = 0;
+ struct ep_pcie_msi_config msi_cfg;
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, ring_init_cb_work);
+
+ enum mhi_dev_state state;
+ uint32_t max_cnt = 0;
+
+
+ rc = ipa_dma_init();
+ if (rc) {
+ pr_err("ipa dma init failed\n");
+ return;
+ }
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ pr_err("ipa enable failed\n");
+ return;
+ }
+
+ rc = mhi_dev_ring_init(mhi);
+ if (rc) {
+ pr_err("MHI dev ring init failed\n");
+ return;
+ }
+
+ /* Invoke MHI SM when device is in RESET state */
+ mhi_dev_sm_init(mhi);
+
+ /* set the env before setting the ready bit */
+ rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+ if (rc) {
+ pr_err("%s: env setting failed\n", __func__);
+ return;
+ }
+ mhi_uci_init();
+
+ /* All set...let's notify the host */
+ mhi_dev_sm_set_ready();
+
+ rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
+ if (rc)
+ pr_warn("MHI: error geting msi configs\n");
+
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ return;
+ }
+
+ while (state != MHI_DEV_M0_STATE && max_cnt < MHI_DEV_M0_MAX_CNT) {
+ /* Wait for Host to set the M0 state */
+ usleep_range(MHI_M0_WAIT_MIN_USLEEP, MHI_M0_WAIT_MAX_USLEEP);
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ return;
+ }
+ max_cnt++;
+ }
+
+ mhi_log(MHI_MSG_INFO, "state:%d\n", state);
+
+ if (state == MHI_DEV_M0_STATE) {
+ rc = mhi_dev_cache_host_cfg(mhi);
+ if (rc) {
+ pr_err("Failed to cache the host config\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+ if (rc) {
+ pr_err("%s: env setting failed\n", __func__);
+ return;
+ }
+ } else {
+ pr_err("MHI device failed to enter M0\n");
+ return;
+ }
+
+ rc = mhi_hwc_init(mhi_ctx);
+ if (rc) {
+ pr_err("error during hwc_init\n");
+ return;
+ }
+}
+
+static void mhi_ring_init_cb(void *data)
+{
+ struct mhi_dev *mhi = data;
+
+ if (!mhi) {
+ pr_err("Invalid MHI ctx\n");
+ return;
+ }
+
+ queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
+}
+
+static int get_device_tree_data(struct platform_device *pdev)
+{
+ struct mhi_dev *mhi;
+ int rc = 0;
+ struct resource *res_mem = NULL;
+
+ mhi = devm_kzalloc(&pdev->dev,
+ sizeof(struct mhi_dev), GFP_KERNEL);
+ if (!mhi)
+ return -ENOMEM;
+
+ mhi->pdev = pdev;
+ mhi->dev = &pdev->dev;
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "mhi_mmio_base");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request MHI MMIO physical memory region failed\n");
+ return rc;
+ }
+
+ mhi->mmio_base_pa_addr = res_mem->start;
+ mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
+ if (!mhi->mmio_base_addr) {
+ pr_err("Failed to IO map MMIO registers.\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ipa_uc_mbox_crdb");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
+ return rc;
+ }
+
+ mhi->ipa_uc_mbox_crdb = res_mem->start;
+
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ipa_uc_mbox_erdb");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
+ return rc;
+ }
+
+ mhi->ipa_uc_mbox_erdb = res_mem->start;
+
+ mhi_ctx = mhi;
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-ifc-id",
+ &mhi_ctx->ifc_id);
+
+ if (rc) {
+ pr_err("qcom,mhi-ifc-id does not exist.\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-ep-msi",
+ &mhi_ctx->mhi_ep_msi_num);
+ if (rc) {
+ pr_err("qcom,mhi-ep-msi does not exist.\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-version",
+ &mhi_ctx->mhi_version);
+ if (rc) {
+ pr_err("qcom,mhi-version does not exist.\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mhi_init(struct mhi_dev *mhi)
+{
+ int rc = 0, i = 0;
+ struct platform_device *pdev = mhi->pdev;
+
+
+ rc = mhi_dev_mmio_init(mhi);
+ if (rc) {
+ pr_err("Failed to update the MMIO init\n");
+ return rc;
+ }
+
+
+ mhi->ring = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mhi_dev_ring) *
+ (mhi->cfg.channels + mhi->cfg.event_rings + 1)),
+ GFP_KERNEL);
+ if (!mhi->ring)
+ return -ENOMEM;
+
+ mhi->ch = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mhi_dev_channel) *
+ (mhi->cfg.channels)), GFP_KERNEL);
+ if (!mhi->ch)
+ return -ENOMEM;
+
+ for (i = 0; i < mhi->cfg.channels; i++)
+ mutex_init(&mhi->ch[i].ch_lock);
+
+ mhi->mmio_backup = devm_kzalloc(&pdev->dev, MHI_DEV_MMIO_RANGE,
+ GFP_KERNEL);
+ if (!mhi->mmio_backup)
+ return -ENOMEM;
+
+ mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0);
+ if (mhi_ipc_log == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to create IPC logging context\n");
+ }
+
+ return 0;
+}
+
+static int mhi_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ if (pdev->dev.of_node) {
+ rc = get_device_tree_data(pdev);
+ if (rc) {
+ pr_err("Error reading MHI Dev DT\n");
+ return rc;
+ }
+ }
+
+ mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
+ if (!mhi_ctx->phandle) {
+ pr_err("PCIe driver is not ready yet.\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (ep_pcie_get_linkstatus(mhi_ctx->phandle) != EP_PCIE_LINK_ENABLED) {
+ pr_err("PCIe link is not ready to use.\n");
+ return -EPROBE_DEFER;
+ }
+
+ INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
+
+ mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
+ WQ_HIGHPRI, 0);
+ if (!mhi_ctx->pending_ring_wq) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
+
+ INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
+
+ mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
+ WQ_HIGHPRI, 0);
+ if (!mhi_ctx->ring_init_wq) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
+ INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
+ mutex_init(&mhi_ctx->mhi_lock);
+ mutex_init(&mhi_ctx->mhi_event_lock);
+ mutex_init(&mhi_ctx->mhi_write_test);
+
+ rc = mhi_init(mhi_ctx);
+ if (rc)
+ return rc;
+
+ mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 4),
+ &mhi_ctx->cache_dma_handle, GFP_KERNEL);
+ if (!mhi_ctx->dma_cache)
+ return -ENOMEM;
+
+ mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 4),
+ &mhi_ctx->read_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->read_handle)
+ return -ENOMEM;
+
+ mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 24),
+ &mhi_ctx->write_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->write_handle)
+ return -ENOMEM;
+
+ rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
+ if (rc) {
+ pr_err("Failed to update the MHI version\n");
+ return rc;
+ }
+
+ mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
+ EP_PCIE_EVENT_PM_D3_COLD |
+ EP_PCIE_EVENT_PM_D0 |
+ EP_PCIE_EVENT_PM_RST_DEAST |
+ EP_PCIE_EVENT_MHI_A7 |
+ EP_PCIE_EVENT_LINKDOWN;
+ mhi_ctx->event_reg.user = mhi_ctx;
+ mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
+ mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
+
+ rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
+ if (rc) {
+ pr_err("Failed to register for events from PCIe\n");
+ return rc;
+ }
+
+ pr_err("Registering with IPA\n");
+
+ rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
+ if (rc < 0) {
+ if (rc == -EEXIST) {
+ mhi_ring_init_cb(mhi_ctx);
+ } else {
+ pr_err("Error calling IPA cb with %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int mhi_dev_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mhi_dev_match_table[] = {
+ { .compatible = "qcom,msm-mhi-dev" },
+ {}
+};
+
+static struct platform_driver mhi_dev_driver = {
+ .driver = {
+ .name = "qcom,msm-mhi-dev",
+ .of_match_table = mhi_dev_match_table,
+ },
+ .probe = mhi_dev_probe,
+ .remove = mhi_dev_remove,
+};
+
+module_param(mhi_msg_lvl, uint, S_IRUGO | S_IWUSR);
+module_param(mhi_ipc_msg_lvl, uint, S_IRUGO | S_IWUSR);
+
+MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
+MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
+
+static int __init mhi_dev_init(void)
+{
+ return platform_driver_register(&mhi_dev_driver);
+}
+module_init(mhi_dev_init);
+
+static void __exit mhi_dev_exit(void)
+{
+ platform_driver_unregister(&mhi_dev_driver);
+}
+module_exit(mhi_dev_exit);
+
+MODULE_DESCRIPTION("MHI device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
new file mode 100644
index 000000000000..6b3c6a8a78b2
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -0,0 +1,1126 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MHI_H
+#define __MHI_H
+
+#include <linux/msm_ep_pcie.h>
+#include <linux/types.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+
+/**
+ * MHI control data structures alloted by the host, including
+ * channel context array, event context array, command context and rings.
+ */
+
+/* Channel context state */
+enum mhi_dev_ch_ctx_state {
+ MHI_DEV_CH_STATE_DISABLED,
+ MHI_DEV_CH_STATE_ENABLED,
+ MHI_DEV_CH_STATE_RUNNING,
+ MHI_DEV_CH_STATE_SUSPENDED,
+ MHI_DEV_CH_STATE_STOP,
+ MHI_DEV_CH_STATE_ERROR,
+ MHI_DEV_CH_STATE_RESERVED,
+ MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
+};
+
+/* Channel type */
+enum mhi_dev_ch_ctx_type {
+ MHI_DEV_CH_TYPE_NONE,
+ MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
+ MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
+ MHI_DEV_CH_RESERVED
+};
+
+/* Channel context type */
+struct mhi_dev_ch_ctx {
+ enum mhi_dev_ch_ctx_state ch_state;
+ enum mhi_dev_ch_ctx_type ch_type;
+ uint32_t err_indx;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+enum mhi_dev_ring_element_type_id {
+ MHI_DEV_RING_EL_INVALID = 0,
+ MHI_DEV_RING_EL_NOOP = 1,
+ MHI_DEV_RING_EL_TRANSFER = 2,
+ MHI_DEV_RING_EL_RESET = 16,
+ MHI_DEV_RING_EL_STOP = 17,
+ MHI_DEV_RING_EL_START = 18,
+ MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
+ MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
+ MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
+ MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
+ MHI_DEV_RING_EL_UNDEF
+};
+
+enum mhi_dev_ring_state {
+ RING_STATE_UINT = 0,
+ RING_STATE_IDLE,
+ RING_STATE_PENDING,
+};
+
+enum mhi_dev_ring_type {
+ RING_TYPE_CMD = 0,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+ RING_TYPE_INVAL
+};
+
+/* Event context interrupt moderation */
+enum mhi_dev_evt_ctx_int_mod_timer {
+ MHI_DEV_EVT_INT_MODERATION_DISABLED
+};
+
+/* Event ring type */
+enum mhi_dev_evt_ctx_event_ring_type {
+ MHI_DEV_EVT_TYPE_DEFAULT,
+ MHI_DEV_EVT_TYPE_VALID,
+ MHI_DEV_EVT_RESERVED
+};
+
+/* Event ring context type */
+struct mhi_dev_ev_ctx {
+ uint32_t res1:16;
+ enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
+ enum mhi_dev_evt_ctx_event_ring_type ertype;
+ uint32_t msivec;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Command context */
+struct mhi_dev_cmd_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* generic context */
+struct mhi_dev_gen_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Transfer ring element */
+struct mhi_dev_transfer_ring_element {
+ uint64_t data_buf_ptr;
+ uint32_t len:16;
+ uint32_t res1:16;
+ uint32_t chain:1;
+ uint32_t res2:7;
+ uint32_t ieob:1;
+ uint32_t ieot:1;
+ uint32_t bei:1;
+ uint32_t res3:5;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res4:8;
+} __packed;
+
+/* Command ring element */
+/* Command ring No op command */
+struct mhi_dev_cmd_ring_op {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring reset channel command */
+struct mhi_dev_cmd_ring_reset_channel_cmd {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring stop channel command */
+struct mhi_dev_cmd_ring_stop_channel_cmd {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring start channel command */
+struct mhi_dev_cmd_ring_start_channel_cmd {
+ uint64_t res1;
+ uint32_t seqnum;
+ uint32_t reliable:1;
+ uint32_t res2:15;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+enum mhi_dev_cmd_completion_code {
+ MHI_CMD_COMPL_CODE_INVALID = 0,
+ MHI_CMD_COMPL_CODE_SUCCESS = 1,
+ MHI_CMD_COMPL_CODE_EOT = 2,
+ MHI_CMD_COMPL_CODE_OVERFLOW = 3,
+ MHI_CMD_COMPL_CODE_EOB = 4,
+ MHI_CMD_COMPL_CODE_UNDEFINED = 16,
+ MHI_CMD_COMPL_CODE_RING_EL = 17,
+ MHI_CMD_COMPL_CODE_RES
+};
+
+/* Event ring elements */
+/* Transfer completion event */
+struct mhi_dev_event_ring_transfer_completion {
+ uint64_t ptr;
+ uint32_t len:16;
+ uint32_t res1:8;
+ enum mhi_dev_cmd_completion_code code:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command completion event */
+struct mhi_dev_event_ring_cmd_completion {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_cmd_completion_code code:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+enum mhi_dev_state {
+ MHI_DEV_RESET_STATE = 0,
+ MHI_DEV_READY_STATE,
+ MHI_DEV_M0_STATE,
+ MHI_DEV_M1_STATE,
+ MHI_DEV_M2_STATE,
+ MHI_DEV_M3_STATE,
+ MHI_DEV_MAX_STATE,
+ MHI_DEV_SYSERR_STATE = 0xff
+};
+
+/* MHI state change event */
+struct mhi_dev_event_ring_state_change {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_state mhistate:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+enum mhi_dev_execenv {
+ MHI_DEV_SBL_EE = 1,
+ MHI_DEV_AMSS_EE = 2,
+ MHI_DEV_UNRESERVED
+};
+
+/* EE state change event */
+struct mhi_dev_event_ring_ee_state_change {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_execenv execenv:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+/* Generic cmd to parse common details like type and channel id */
+struct mhi_dev_ring_generic {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_state mhistate:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+struct mhi_config {
+ uint32_t mhi_reg_len;
+ uint32_t version;
+ uint32_t event_rings;
+ uint32_t channels;
+ uint32_t chdb_offset;
+ uint32_t erdb_offset;
+};
+
+#define NUM_CHANNELS 128
+#define HW_CHANNEL_BASE 100
+#define HW_CHANNEL_END 107
+#define MHI_ENV_VALUE 2
+#define MHI_MASK_ROWS_CH_EV_DB 4
+#define TRB_MAX_DATA_SIZE 4096
+
+/* Possible ring element types */
+union mhi_dev_ring_element_type {
+ struct mhi_dev_cmd_ring_op cmd_no_op;
+ struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
+ struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
+ struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
+ struct mhi_dev_transfer_ring_element tre;
+ struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
+ struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
+ struct mhi_dev_event_ring_state_change evt_state_change;
+ struct mhi_dev_event_ring_ee_state_change evt_ee_state;
+ struct mhi_dev_ring_generic generic;
+};
+
+/* Transfer ring element type */
+union mhi_dev_ring_ctx {
+ struct mhi_dev_cmd_ctx cmd;
+ struct mhi_dev_ev_ctx ev;
+ struct mhi_dev_ch_ctx ch;
+ struct mhi_dev_gen_ctx generic;
+};
+
+/* MHI host Control and data address region */
+struct mhi_host_addr {
+ uint32_t ctrl_base_lsb;
+ uint32_t ctrl_base_msb;
+ uint32_t ctrl_limit_lsb;
+ uint32_t ctrl_limit_msb;
+ uint32_t data_base_lsb;
+ uint32_t data_base_msb;
+ uint32_t data_limit_lsb;
+ uint32_t data_limit_msb;
+};
+
+/* MHI physical and virtual address region */
+struct mhi_meminfo {
+ struct device *dev;
+ uintptr_t pa_aligned;
+ uintptr_t pa_unaligned;
+ uintptr_t va_aligned;
+ uintptr_t va_unaligned;
+ uintptr_t size;
+};
+
+struct mhi_addr {
+ uint64_t host_pa;
+ uintptr_t device_pa;
+ uintptr_t device_va;
+ uint32_t size;
+};
+
+struct mhi_interrupt_state {
+ uint32_t mask;
+ uint32_t status;
+};
+
+enum mhi_dev_channel_state {
+ MHI_DEV_CH_UNINT,
+ MHI_DEV_CH_STARTED,
+ MHI_DEV_CH_PENDING_START,
+ MHI_DEV_CH_PENDING_STOP,
+ MHI_DEV_CH_STOPPED,
+ MHI_DEV_CH_CLOSED,
+};
+
+enum mhi_dev_ch_operation {
+ MHI_DEV_OPEN_CH,
+ MHI_DEV_CLOSE_CH,
+ MHI_DEV_READ_CH,
+ MHI_DEV_READ_WR,
+ MHI_DEV_POLL,
+};
+
+struct mhi_dev_channel;
+
+struct mhi_dev_ring {
+ struct list_head list;
+ struct mhi_dev *mhi_dev;
+
+ uint32_t id;
+ uint32_t rd_offset;
+ uint32_t wr_offset;
+ uint32_t ring_size;
+
+ enum mhi_dev_ring_type type;
+ enum mhi_dev_ring_state state;
+
+ /* device virtual address location of the cached host ring ctx data */
+ union mhi_dev_ring_element_type *ring_cache;
+ /* Physical address of the cached ring copy on the device side */
+ dma_addr_t ring_cache_dma_handle;
+ /* Physical address of the host where we will write/read to/from */
+ struct mhi_addr ring_shadow;
+ /* Ring type - cmd, event, transfer ring and its rp/wp... */
+ union mhi_dev_ring_ctx *ring_ctx;
+ /* ring_ctx_shadow -> tracking ring_ctx in the host */
+ union mhi_dev_ring_ctx *ring_ctx_shadow;
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el,
+ void *ctx);
+};
+
+static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
+ uint32_t rd_offset)
+{
+ ring->rd_offset++;
+ if (ring->rd_offset == ring->ring_size)
+ ring->rd_offset = 0;
+}
+
+/* trace information planned to use for read/write */
+#define TRACE_DATA_MAX 128
+#define MHI_DEV_DATA_MAX 512
+
+#define MHI_DEV_MMIO_RANGE 0xc80
+
+enum cb_reason {
+ MHI_DEV_TRE_AVAILABLE = 0,
+};
+
+struct mhi_dev_client_cb_reason {
+ uint32_t ch_id;
+ enum cb_reason reason;
+};
+
+struct mhi_dev_client {
+ struct list_head list;
+ struct mhi_dev_channel *channel;
+ void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);
+
+ /* mhi_dev calls are fully synchronous -- only one call may be
+ * active per client at a time for now.
+ */
+ struct mutex write_lock;
+ wait_queue_head_t wait;
+
+ /* trace logs */
+ spinlock_t tr_lock;
+ unsigned tr_head;
+ unsigned tr_tail;
+ struct mhi_dev_trace *tr_log;
+
+ /* client buffers */
+ struct mhi_dev_iov *iov;
+ uint32_t nr_iov;
+};
+
+struct mhi_dev_channel {
+ struct list_head list;
+ struct list_head clients;
+ /* synchronization for changing channel state,
+ * adding/removing clients, mhi_dev callbacks, etc
+ */
+ spinlock_t lock;
+
+ struct mhi_dev_ring *ring;
+
+ enum mhi_dev_channel_state state;
+ uint32_t ch_id;
+ enum mhi_dev_ch_ctx_type ch_type;
+ struct mutex ch_lock;
+ /* client which the current inbound/outbound message is for */
+ struct mhi_dev_client *active_client;
+
+ /* current TRE being processed */
+ uint64_t tre_loc;
+ /* current TRE size */
+ uint32_t tre_size;
+ /* tre bytes left to read/write */
+ uint32_t tre_bytes_left;
+ /* td size being read/written from/to so far */
+ uint32_t td_size;
+ bool wr_request_active;
+ bool skip_td;
+};
+
+/* Structure device for mhi dev */
+struct mhi_dev {
+ struct platform_device *pdev;
+ struct device *dev;
+ /* MHI MMIO related members */
+ phys_addr_t mmio_base_pa_addr;
+ void *mmio_base_addr;
+ phys_addr_t ipa_uc_mbox_crdb;
+ phys_addr_t ipa_uc_mbox_erdb;
+
+ uint32_t *mmio_backup;
+ struct mhi_config cfg;
+ bool mmio_initialized;
+
+ /* Host control base information */
+ struct mhi_host_addr host_addr;
+ struct mhi_addr ctrl_base;
+ struct mhi_addr data_base;
+ struct mhi_addr ch_ctx_shadow;
+ struct mhi_dev_ch_ctx *ch_ctx_cache;
+ dma_addr_t ch_ctx_cache_dma_handle;
+ struct mhi_addr ev_ctx_shadow;
+ struct mhi_dev_ch_ctx *ev_ctx_cache;
+ dma_addr_t ev_ctx_cache_dma_handle;
+
+ struct mhi_addr cmd_ctx_shadow;
+ struct mhi_dev_ch_ctx *cmd_ctx_cache;
+ dma_addr_t cmd_ctx_cache_dma_handle;
+ struct mhi_dev_ring *ring;
+ struct mhi_dev_channel *ch;
+
+ int ctrl_int;
+ int cmd_int;
+ /* CHDB and EVDB device interrupt state */
+ struct mhi_interrupt_state chdb[4];
+ struct mhi_interrupt_state evdb[4];
+
+ /* Scheduler work */
+ struct work_struct chdb_ctrl_work;
+ struct mutex mhi_lock;
+ struct mutex mhi_event_lock;
+
+ /* process a ring element */
+ struct workqueue_struct *pending_ring_wq;
+ struct work_struct pending_work;
+
+ struct list_head event_ring_list;
+ struct list_head process_ring_list;
+
+ uint32_t cmd_ring_idx;
+ uint32_t ev_ring_start;
+ uint32_t ch_ring_start;
+
+ /* IPA Handles */
+ u32 ipa_clnt_hndl[4];
+ struct workqueue_struct *ring_init_wq;
+ struct work_struct ring_init_cb_work;
+
+ /* EP PCIe registration */
+ struct ep_pcie_register_event event_reg;
+ u32 ifc_id;
+ struct ep_pcie_hw *phandle;
+
+ atomic_t write_active;
+ atomic_t is_suspended;
+ struct mutex mhi_write_test;
+ u32 mhi_ep_msi_num;
+ u32 mhi_version;
+ void *dma_cache;
+ void *read_handle;
+ void *write_handle;
+ /* Physical scratch buffer for writing control data to the host */
+ dma_addr_t cache_dma_handle;
+ /*
+ * Physical scratch buffer address used when picking host data
+ * from the host used in mhi_read()
+ */
+ dma_addr_t read_dma_handle;
+ /*
+ * Physical scratch buffer address used when writing to the host
+ * region from device used in mhi_write()
+ */
+ dma_addr_t write_dma_handle;
+};
+
+enum mhi_msg_level {
+ MHI_MSG_VERBOSE = 0x0,
+ MHI_MSG_INFO = 0x1,
+ MHI_MSG_DBG = 0x2,
+ MHI_MSG_WARNING = 0x3,
+ MHI_MSG_ERROR = 0x4,
+ MHI_MSG_CRITICAL = 0x5,
+ MHI_MSG_reserved = 0x80000000
+};
+
+extern enum mhi_msg_level mhi_msg_lvl;
+extern enum mhi_msg_level mhi_ipc_msg_lvl;
+extern void *mhi_ipc_log;
+
+#define mhi_log(_msg_lvl, _msg, ...) do { \
+ if (_msg_lvl >= mhi_msg_lvl) { \
+ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+ } \
+ if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
+ ipc_log_string(mhi_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+/* SW channel client list */
+enum mhi_client_channel {
+ MHI_CLIENT_LOOPBACK_OUT = 0,
+ MHI_CLIENT_LOOPBACK_IN = 1,
+ MHI_CLIENT_SAHARA_OUT = 2,
+ MHI_CLIENT_SAHARA_IN = 3,
+ MHI_CLIENT_DIAG_OUT = 4,
+ MHI_CLIENT_DIAG_IN = 5,
+ MHI_CLIENT_SSR_OUT = 6,
+ MHI_CLIENT_SSR_IN = 7,
+ MHI_CLIENT_QDSS_OUT = 8,
+ MHI_CLIENT_QDSS_IN = 9,
+ MHI_CLIENT_EFS_OUT = 10,
+ MHI_CLIENT_EFS_IN = 11,
+ MHI_CLIENT_MBIM_OUT = 12,
+ MHI_CLIENT_MBIM_IN = 13,
+ MHI_CLIENT_QMI_OUT = 14,
+ MHI_CLIENT_QMI_IN = 15,
+ MHI_CLIENT_IP_CTRL_0_OUT = 16,
+ MHI_CLIENT_IP_CTRL_0_IN = 17,
+ MHI_CLIENT_IP_CTRL_1_OUT = 18,
+ MHI_CLIENT_IP_CTRL_1_IN = 19,
+ MHI_CLIENT_DCI_OUT = 20,
+ MHI_CLIENT_DCI_IN = 21,
+ MHI_CLIENT_IP_CTRL_3_OUT = 22,
+ MHI_CLIENT_IP_CTRL_3_IN = 23,
+ MHI_CLIENT_IP_CTRL_4_OUT = 24,
+ MHI_CLIENT_IP_CTRL_4_IN = 25,
+ MHI_CLIENT_IP_CTRL_5_OUT = 26,
+ MHI_CLIENT_IP_CTRL_5_IN = 27,
+ MHI_CLIENT_IP_CTRL_6_OUT = 28,
+ MHI_CLIENT_IP_CTRL_6_IN = 29,
+ MHI_CLIENT_IP_CTRL_7_OUT = 30,
+ MHI_CLIENT_IP_CTRL_7_IN = 31,
+ MHI_CLIENT_DUN_OUT = 32,
+ MHI_CLIENT_DUN_IN = 33,
+ MHI_CLIENT_IP_SW_0_OUT = 34,
+ MHI_CLIENT_IP_SW_0_IN = 35,
+ MHI_CLIENT_IP_SW_1_OUT = 36,
+ MHI_CLIENT_IP_SW_1_IN = 37,
+ MHI_CLIENT_IP_SW_2_OUT = 38,
+ MHI_CLIENT_IP_SW_2_IN = 39,
+ MHI_CLIENT_IP_SW_3_OUT = 40,
+ MHI_CLIENT_IP_SW_3_IN = 41,
+ MHI_CLIENT_CSVT_OUT = 42,
+ MHI_CLIENT_CSVT_IN = 43,
+ MHI_CLIENT_SMCT_OUT = 44,
+ MHI_CLIENT_SMCT_IN = 45,
+ MHI_MAX_SOFTWARE_CHANNELS = 46,
+ MHI_CLIENT_TEST_OUT = 60,
+ MHI_CLIENT_TEST_IN = 61,
+ MHI_CLIENT_RESERVED_1_LOWER = 62,
+ MHI_CLIENT_RESERVED_1_UPPER = 99,
+ MHI_CLIENT_IP_HW_0_OUT = 100,
+ MHI_CLIENT_IP_HW_0_IN = 101,
+ MHI_CLIENT_RESERVED_2_LOWER = 102,
+ MHI_CLIENT_RESERVED_2_UPPER = 127,
+ MHI_MAX_CHANNELS = 102,
+};
+
+struct mhi_dev_iov {
+ void *addr;
+ uint32_t buf_size;
+};
+
+/**
+ * mhi_dev_open_channel() - Channel open for a given client done prior
+ * to read/write.
+ * @chan_id: Software Channel ID for the assigned client.
+ * @handle_client: Structure device for client handle.
+ * @notifier: Client issued callback notification.
+ */
+int mhi_dev_open_channel(uint32_t chan_id,
+ struct mhi_dev_client **handle_client,
+ void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
+/**
+ * mhi_dev_close_channel() - Channel close for a given client.
+ */
+int mhi_dev_close_channel(struct mhi_dev_client *handle_client);
+
+/**
+ * mhi_dev_read_channel() - Channel read for a given client
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data received
+ * from the Host.
+ * @buf_size: Size of the buffer pointer.
+ * @chain : Indicate if the received data is part of chained packet.
+ */
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+ void *buf, uint32_t buf_size, uint32_t *chain);
+
+/**
+ * mhi_dev_write_channel() - Channel write for a given software client.
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data from the
+ * device to the host.
+ * @buf_size: Size of the buffer pointer.
+ */
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
+ uint32_t buf_size);
+
+/**
+ * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ */
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
+
+struct mhi_dev_trace {
+ unsigned timestamp;
+ uint32_t data[TRACE_DATA_MAX];
+};
+
+/* MHI Ring related functions */
+
+/**
+ * mhi_ring_init() - Initializes the Ring id to the default un-initialized
+ * state. Once a start command is received, the respective ring
+ * is then prepared by fetching the context and updating the
+ * offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @type: Command/Event or Channel transfer ring.
+ * @id: Index to the ring id. For command its usually 1, Event rings
+ * may vary from 1 to 128. Channels vary from 1 to 256.
+ */
+void mhi_ring_init(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_type type, int id);
+
+/**
+ * mhi_ring_start() - Fetches the respective transfer ring's context from
+ * the host and updates the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @ctx: Transfer ring of type mhi_dev_ring_ctx.
+ * @dev: MHI device structure.
+ */
+int mhi_ring_start(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @wr_offset: Cache the TRE's upto the write offset value.
+ */
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);
+
+/**
+ * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
+ * and invoke the clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_process_ring(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
+ * clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @offset: Offset index into the respective ring's cache element.
+ */
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
+
+/**
+ * mhi_dev_add_element() - Copy the element to the respective transfer rings
+ * read pointer and increment the index.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @element: Transfer ring element to be copied to the host memory.
+ */
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_element_type *element);
+
+/**
+ * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
+ * from device to the host.
+ * @dst_pa: Physical destination address.
+ * @src: Source virtual address.
+ * @len: Numer of bytes to be transferred.
+ * @mhi: MHI dev structure.
+ */
+int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
+ * from host to the device.
+ * @dst: Physical destination virtual address.
+ * @src_pa: Source physical address.
+ * @len: Numer of bytes to be transferred.
+ * @mhi: MHI dev structure.
+ */
+int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_write_to_host() - memcpy equivalent API to transfer data
+ * from device to host.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be written to the host.
+ * @size: Data buffer size.
+ */
+void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ * from host to device.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be read from the host.
+ * @size: Data buffer size.
+ */
+void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ * from host to device.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be read from the host.
+ * @size: Data buffer size.
+ */
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el, void *ctx));
+
+/**
+ * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
+ * a ring is being processed, idle or uninitialized.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @state: state of type mhi_dev_ring_state.
+ */
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_state state);
+
+/**
+ * mhi_ring_get_state() - Obtains the internal state of the ring.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);
+
+/* MMIO related functions */
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @reg_val: Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t *reg_value);
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @val: Value to be written to the register offset.
+ */
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t val);
+
+/**
+ * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Register field mask shift value.
+ * @val: Value to be written to the register offset.
+ */
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t val);
+/**
+ * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Register field mask shift value.
+ * @reg_val: Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t *reg_val);
+/**
+ * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
+ * channel id.
+ * @dev: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+/**
+ * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
+ * channel id.
+ * @dev: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+
+/**
+ * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
+ * event ring id.
+ * @dev: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
+ * event ring id.
+ * @dev: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
+ @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
+ * @dev: MHI device structure.
+ * @value: Value of the EXEC EVN.
+ */
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);
+
+/**
+ * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_reset(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
+ * @dev: MHI device structure.
+ * @state: Pointer of type mhi_dev_state
+ */
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state);
+
+/**
+ * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
+ * rings, support number of channels, and offsets to the Channel
+ * and Event doorbell from the host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_init(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
+ * the host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_update_ner(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_restore_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_backup_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_dump_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
+ * unit between device and host to map the Data and Control
+ * information.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_send_state_change_event() - Send state change event to the host
+ * such as M0/M1/M2/M3.
+ * @dev: MHI device structure.
+ * @state: MHI state of type mhi_dev_state
+ */
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+ enum mhi_dev_state state);
+/**
+ * mhi_dev_send_ee_event() - Send Execution enviornment state change
+ * event to the host.
+ * @dev: MHI device structure.
+ * @state: MHI state of type mhi_dev_execenv
+ */
+int mhi_dev_send_ee_event(struct mhi_dev *mhi,
+ enum mhi_dev_execenv exec_env);
+/**
+ * mhi_dev_syserr() - System error when unexpected events are received.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_syserr(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
+ * Transfer ring boundary, update the channel state to suspended.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_suspend(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_resume() - MHI device resume to update the channel state to running.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_resume(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
+ * accelerated data to be send and prevent MHI suspend.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);
+
+/**
+ * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
+ * context with IPA when performing a MHI resume.
+ * @dev: MHI device structure.
+ */
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
+
+/**
+ * mhi_uci_init() - Initializes the User control interface (UCI) which
+ * exposes device nodes for the supported MHI software
+ * channels.
+ */
+int mhi_uci_init(void);
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
+
+#endif /* _MHI_H_ */
diff --git a/drivers/platform/msm/mhi_dev/mhi_hwio.h b/drivers/platform/msm/mhi_dev/mhi_hwio.h
new file mode 100644
index 000000000000..bcc4095575b3
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_hwio.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MHI_HWIO_
+#define _MHI_HWIO_
+
+/* MHI register definition */
+#define MHI_CTRL_INT_STATUS_A7 (0x0004)
+#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK 0xffffffff
+#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT 0x0
+
+#define MHI_CHDB_INT_STATUS_A7_n(n) (0x0028 + 0x4 * (n))
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
+
+#define MHI_ERDB_INT_STATUS_A7_n(n) (0x0038 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
+
+#define MHI_CTRL_INT_CLEAR_A7 (0x004C)
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK 0xffffffff
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT 0x0
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x0070 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
+
+#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x0080 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
+
+#define MHI_CTRL_INT_MASK_A7 (0x0094)
+#define MHI_CTRL_INT_MASK_A7_MASK_MASK 0x3
+#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT 0x0
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_MHICTRL_SHFT 0
+#define MHI_CTRL_CRDB_MASK BIT(1)
+#define MHI_CTRL_CRDB_SHFT 1
+
+#define MHI_CHDB_INT_MASK_A7_n(n) (0x00B8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
+#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT 0x0
+
+#define MHI_ERDB_INT_MASK_A7_n(n) (0x00C8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
+#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT 0x0
+
+#define MHIREGLEN (0x0100)
+#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff
+#define MHIREGLEN_MHIREGLEN_SHIFT 0x0
+
+#define MHIVER (0x0108)
+#define MHIVER_MHIVER_MASK 0xffffffff
+#define MHIVER_MHIVER_SHIFT 0x0
+
+#define MHICFG (0x0110)
+#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
+#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
+#define MHICFG_NER_MASK 0xff0000
+#define MHICFG_NER_SHIFT 0x10
+#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
+#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
+#define MHICFG_NCH_MASK 0xff
+#define MHICFG_NCH_SHIFT 0x0
+
+#define CHDBOFF (0x0118)
+#define CHDBOFF_CHDBOFF_MASK 0xffffffff
+#define CHDBOFF_CHDBOFF_SHIFT 0x0
+
+#define ERDBOFF (0x0120)
+#define ERDBOFF_ERDBOFF_MASK 0xffffffff
+#define ERDBOFF_ERDBOFF_SHIFT 0x0
+
+#define BHIOFF (0x0128)
+#define BHIOFF_BHIOFF_MASK 0xffffffff
+#define BHIOFF_BHIOFF_SHIFT 0x0
+
+#define DEBUGOFF (0x0130)
+#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff
+#define DEBUGOFF_DEBUGOFF_SHIFT 0x0
+
+#define MHICTRL (0x0138)
+#define MHICTRL_MHISTATE_MASK 0x0000FF00
+#define MHICTRL_MHISTATE_SHIFT 0x8
+#define MHICTRL_RESET_MASK 0x2
+#define MHICTRL_RESET_SHIFT 0x1
+
+#define MHISTATUS (0x0148)
+#define MHISTATUS_MHISTATE_MASK 0x0000ff00
+#define MHISTATUS_MHISTATE_SHIFT 0x8
+#define MHISTATUS_SYSERR_MASK 0x4
+#define MHISTATUS_SYSERR_SHIFT 0x2
+#define MHISTATUS_READY_MASK 0x1
+#define MHISTATUS_READY_SHIFT 0x0
+
+#define CCABAP_LOWER (0x0158)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0
+
+#define CCABAP_HIGHER (0x015C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0
+
+#define ECABAP_LOWER (0x0160)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0
+
+#define ECABAP_HIGHER (0x0164)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0
+
+#define CRCBAP_LOWER (0x0168)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0
+
+#define CRCBAP_HIGHER (0x016C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0
+
+#define CRDB_LOWER (0x0170)
+#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff
+#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0
+
+#define CRDB_HIGHER (0x0174)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0
+
+#define MHICTRLBASE_LOWER (0x0180)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0
+
+#define MHICTRLBASE_HIGHER (0x0184)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0
+
+#define MHICTRLLIMIT_LOWER (0x0188)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0
+
+#define MHICTRLLIMIT_HIGHER (0x018C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0
+
+#define MHIDATABASE_LOWER (0x0198)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0
+
+#define MHIDATABASE_HIGHER (0x019C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0
+
+#define MHIDATALIMIT_LOWER (0x01A0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0
+
+#define MHIDATALIMIT_HIGHER (0x01A4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0
+
+#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n))
+#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff
+#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0
+
+#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n))
+#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff
+#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0
+
+#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n))
+#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff
+#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0
+
+#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n))
+#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff
+#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0
+
+#define BHI_EXECENV (0x228)
+#define BHI_EXECENV_MASK 0xFFFFFFFF
+#define BHI_EXECENV_SHIFT 0
+
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c
new file mode 100644
index 000000000000..12e4a0d4851c
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c
@@ -0,0 +1,999 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t *reg_value)
+{
+ void __iomem *addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ addr = dev->mmio_base_addr + offset;
+
+ *reg_value = readl_relaxed(addr);
+
+ pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read);
+
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t val)
+{
+ void __iomem *addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ addr = dev->mmio_base_addr + offset;
+
+ writel_relaxed(val, addr);
+
+ pr_debug("reg write:0x%x with value 0x%x\n", offset, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_write);
+
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t val)
+{
+ uint32_t reg_val;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, offset, &reg_val);
+ if (rc) {
+ pr_err("Read error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ reg_val &= ~mask;
+ reg_val |= ((val << shift) & mask);
+
+ rc = mhi_dev_mmio_write(dev, offset, reg_val);
+ if (rc) {
+ pr_err("Write error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_write);
+
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t *reg_val)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, offset, reg_val);
+ if (rc) {
+ pr_err("Read error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ *reg_val &= mask;
+ *reg_val >>= shift;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_read);
+
+static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
+ uint32_t chdb_id, bool enable)
+{
+ uint32_t chid_mask, chid_idx, chid_shft, val = 0;
+ int rc = 0;
+
+ chid_shft = chdb_id%32;
+ chid_mask = (1 << chid_shft);
+ chid_idx = chdb_id/32;
+
+ if (enable)
+ val = 1;
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ chid_mask, chid_shft, val);
+ if (rc) {
+ pr_err("Write on channel db interrupt failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true);
+ if (rc) {
+ pr_err("Setting channel DB failed for ch_id:%d\n", chdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7);
+
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false);
+ if (rc) {
+ pr_err("Disabling channel DB failed for ch_id:%d\n", chdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7);
+
+static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev,
+ uint32_t erdb_ch_id, bool enable)
+{
+ uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0;
+ int rc = 0;
+
+ erdb_id_shft = erdb_ch_id%32;
+ erdb_id_mask = (1 << erdb_id_shft);
+ erdb_id_idx = erdb_ch_id/32;
+
+ if (enable)
+ val = 1;
+
+ rc = mhi_dev_mmio_masked_write(dev,
+ MHI_ERDB_INT_MASK_A7_n(erdb_id_idx),
+ erdb_id_mask, erdb_id_shft, val);
+ if (rc) {
+ pr_err("Error setting event ring db for %d\n", erdb_ch_id);
+ return rc;
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true);
+ if (rc) {
+ pr_err("Error setting event ring db for %d\n", erdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7);
+
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false);
+ if (rc) {
+ pr_err("Error disabling event ring db for %d\n", erdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7);
+
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state)
+{
+ uint32_t reg_value = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICTRL,
+ MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, MHICTRL, &reg_value);
+ if (rc)
+ return rc;
+
+ pr_debug("MHICTRL is 0x%x\n", reg_value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state);
+
+static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+ uint32_t mask = 0, i = 0;
+ int rc = 0;
+
+ if (enable)
+ mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev,
+ MHI_CHDB_INT_MASK_A7_n(i), mask);
+ if (rc) {
+ pr_err("Set channel db on row:%d failed\n", i);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_chdb_interrupts(dev, true);
+ if (rc) {
+ pr_err("Error setting channel db interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts);
+
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_chdb_interrupts(dev, false);
+ if (rc) {
+ pr_err("Error masking channel db interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts);
+
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_read(dev,
+ MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status);
+ if (rc) {
+ pr_err("Error reading chdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts);
+
+static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+ uint32_t mask = 0, i;
+ int rc = 0;
+
+ if (enable)
+ mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev,
+ MHI_ERDB_INT_MASK_A7_n(i), mask);
+ if (rc) {
+ pr_err("Error setting erdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_interrupts(dev, true);
+ if (rc) {
+ pr_err("Error enabling all erdb interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts);
+
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_interrupts(dev, false);
+ if (rc) {
+ pr_err("Error masking all event db interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts);
+
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i),
+ &dev->evdb[i].status);
+ if (rc) {
+ pr_err("Error setting erdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts);
+
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1);
+ if (rc) {
+ pr_err("Error enabling control interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt);
+
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0);
+ if (rc) {
+ pr_err("Error disabling control interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt);
+
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int);
+ if (rc) {
+ pr_err("Error reading control status interrupt\n");
+ return rc;
+ }
+
+ dev->ctrl_int &= 0x1;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt);
+
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int);
+ if (rc) {
+ pr_err("Error reading cmd status register\n");
+ return rc;
+ }
+
+ dev->cmd_int &= 0x10;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt);
+
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt);
+
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt);
+
+static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_disable_ctrl_interrupt(dev);
+ if (rc) {
+ pr_err("Error disabling control interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_disable_cmdb_interrupt(dev);
+ if (rc) {
+ pr_err("Error disabling command db interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_mask_chdb_interrupts(dev);
+ if (rc) {
+ pr_err("Error masking all channel db interrupts\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_mask_erdb_interrupts(dev);
+ if (rc) {
+ pr_err("Error masking all erdb interrupts\n");
+ return;
+ }
+}
+
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i),
+ MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i),
+ MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK);
+ if (rc)
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7,
+ MHI_CTRL_INT_CRDB_CLEAR);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts);
+
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev)
+{
+ uint32_t ccabap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value);
+ if (rc)
+ return rc;
+
+ dev->ch_ctx_shadow.host_pa = ccabap_value;
+ dev->ch_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value);
+ if (rc)
+ return rc;
+
+ dev->ch_ctx_shadow.host_pa |= ccabap_value;
+
+ offset = (uint32_t)(dev->ch_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base);
+
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev)
+{
+ uint32_t ecabap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value);
+ if (rc)
+ return rc;
+
+ dev->ev_ctx_shadow.host_pa = ecabap_value;
+ dev->ev_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value);
+ if (rc)
+ return rc;
+
+ dev->ev_ctx_shadow.host_pa |= ecabap_value;
+
+ offset = (uint32_t)(dev->ev_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base);
+
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev)
+{
+ uint32_t crcbap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value);
+ if (rc)
+ return rc;
+
+ dev->cmd_ctx_shadow.host_pa = crcbap_value;
+ dev->cmd_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value);
+ if (rc)
+ return rc;
+
+ dev->cmd_ctx_shadow.host_pa |= crcbap_value;
+
+ offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base);
+
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0, ch_start_idx = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ ch_start_idx = ring->mhi_dev->ch_ring_start;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ CHDB_HIGHER_n(ring->id-ch_start_idx), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ CHDB_LOWER_n(ring->id-ch_start_idx), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db);
+
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0, ev_idx_start = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ ev_idx_start = ring->mhi_dev->ev_ring_start;
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ ERDB_HIGHER_n(ring->id - ev_idx_start), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ ERDB_LOWER_n(ring->id - ev_idx_start), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db);
+
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db);
+
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value)
+{
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_write(dev, BHI_EXECENV, value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_set_env);
+
+int mhi_dev_mmio_reset(struct mhi_dev *dev)
+{
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_write(dev, MHICTRL, 0);
+ mhi_dev_mmio_write(dev, MHISTATUS, 0);
+ mhi_dev_mmio_clear_interrupts(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_reset);
+
+int mhi_dev_restore_mmio(struct mhi_dev *dev)
+{
+ uint32_t i, reg_cntl_value;
+ void *reg_cntl_addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_mask_interrupts(dev);
+
+ for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) {
+ reg_cntl_addr = dev->mmio_base_addr + (i * 4);
+ reg_cntl_value = dev->mmio_backup[i];
+ writel_relaxed(reg_cntl_value, reg_cntl_addr);
+ }
+
+ mhi_dev_mmio_clear_interrupts(dev);
+ mhi_dev_mmio_enable_ctrl_interrupt(dev);
+
+ /* Mask and enable control interrupt */
+ mb();
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_restore_mmio);
+
+int mhi_dev_backup_mmio(struct mhi_dev *dev)
+{
+ uint32_t i = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++)
+ dev->mmio_backup[i] =
+ readl_relaxed(dev->mmio_base_addr + (i * 4));
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_backup_mmio);
+
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev)
+{
+ uint32_t data_value = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value);
+ dev->host_addr.ctrl_base_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value);
+ dev->host_addr.ctrl_base_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value);
+ dev->host_addr.ctrl_limit_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value);
+ dev->host_addr.ctrl_limit_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value);
+ dev->host_addr.data_base_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value);
+ dev->host_addr.data_base_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value);
+ dev->host_addr.data_limit_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value);
+ dev->host_addr.data_limit_msb = data_value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_get_mhi_addr);
+
+int mhi_dev_mmio_init(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+ MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset);
+ if (rc)
+ return rc;
+
+ dev->cfg.channels = NUM_CHANNELS;
+
+ if (!dev->mmio_initialized) {
+ rc = mhi_dev_mmio_reset(dev);
+ if (rc) {
+ pr_err("Error resetting MMIO\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_init);
+
+int mhi_dev_update_ner(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+ MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+ if (rc) {
+ pr_err("Error update NER\n");
+ return rc;
+ }
+
+ pr_debug("NER in HW :%d\n", dev->cfg.event_rings);
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_ner);
+
+int mhi_dev_dump_mmio(struct mhi_dev *dev)
+{
+ uint32_t r1, r2, r3, r4, i, offset = 0;
+ int rc = 0;
+
+ for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) {
+ rc = mhi_dev_mmio_read(dev, offset, &r1);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+4, &r2);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+8, &r3);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+0xC, &r4);
+ if (rc)
+ return rc;
+
+ offset += 0x10;
+ pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ offset, r1, r2, r3, r4);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_dump_mmio);
diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c
new file mode 100644
index 000000000000..b7eab1eb8b64
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_ring.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+
+static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
+{
+ uint64_t rbase;
+
+ rbase = ring->ring_ctx->generic.rbase;
+
+ return (p - rbase)/sizeof(union mhi_dev_ring_element_type);
+}
+
+static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring)
+{
+ return ring->ring_ctx->generic.rlen/
+ sizeof(union mhi_dev_ring_element_type);
+}
+
+/* fetch ring elements from stat->end, take care of wrap-around case */
+int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
+ uint32_t start, uint32_t end)
+{
+ struct mhi_addr host_addr;
+
+ host_addr.device_pa = ring->ring_shadow.device_pa
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ host_addr.device_va = ring->ring_shadow.device_va
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ host_addr.host_pa = ring->ring_shadow.host_pa
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ if (start < end) {
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) * start),
+ (end-start) *
+ sizeof(union mhi_dev_ring_element_type));
+ } else if (start > end) {
+ /* copy from 'start' to ring end, then ring start to 'end'*/
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) * start),
+ (ring->ring_size-start) *
+ sizeof(union mhi_dev_ring_element_type));
+ if (end) {
+ /* wrapped around */
+ host_addr.device_pa = ring->ring_shadow.device_pa;
+ host_addr.device_va = ring->ring_shadow.device_va;
+ host_addr.host_pa = ring->ring_shadow.host_pa;
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) *
+ start),
+ end * sizeof(union mhi_dev_ring_element_type));
+ }
+ }
+
+ return 0;
+}
+
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset)
+{
+ uint32_t old_offset = 0;
+ struct mhi_dev *mhi_ctx;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_ctx = ring->mhi_dev;
+
+ if (ring->wr_offset == wr_offset) {
+ mhi_log(MHI_MSG_INFO,
+ "nothing to cache for ring %d, local wr_ofst %d\n",
+ ring->id, ring->wr_offset);
+ mhi_log(MHI_MSG_INFO,
+ "new wr_offset %d\n", wr_offset);
+ return 0;
+ }
+
+ old_offset = ring->wr_offset;
+
+ mhi_log(MHI_MSG_ERROR,
+ "caching - rng size :%d local ofst:%d new ofst: %d\n",
+ (uint32_t) ring->ring_size, old_offset,
+ ring->wr_offset);
+
+ /*
+ * copy the elements starting from old_offset to wr_offset
+ * take in to account wrap around case event rings are not
+ * cached, not required
+ */
+ if (ring->id >= mhi_ctx->ev_ring_start &&
+ ring->id < (mhi_ctx->ev_ring_start +
+ mhi_ctx->cfg.event_rings)) {
+ mhi_log(MHI_MSG_ERROR,
+ "not caching event ring %d\n", ring->id);
+ return 0;
+ }
+
+ mhi_log(MHI_MSG_ERROR, "caching ring %d, start %d, end %d\n",
+ ring->id, old_offset, wr_offset);
+
+ if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
+ mhi_log(MHI_MSG_ERROR,
+ "failed to fetch elements for ring %d, start %d, end %d\n",
+ ring->id, old_offset, wr_offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_cache_ring);
+
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
+{
+ uint64_t wr_offset = 0;
+ uint32_t new_wr_offset = 0;
+ int32_t rc = 0;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (ring->type) {
+ case RING_TYPE_CMD:
+ rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: CMD DB read failed\n", __func__);
+ return rc;
+ }
+ mhi_log(MHI_MSG_ERROR,
+ "ring %d wr_offset from db 0x%x\n",
+ ring->id, (uint32_t) wr_offset);
+ break;
+ case RING_TYPE_ER:
+ rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: EVT DB read failed\n", __func__);
+ return rc;
+ }
+ break;
+ case RING_TYPE_CH:
+ rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: CH DB read failed\n", __func__);
+ return rc;
+ }
+ mhi_log(MHI_MSG_ERROR,
+ "ring %d wr_offset from db 0x%x\n",
+ ring->id, (uint32_t) wr_offset);
+ break;
+ default:
+ mhi_log(MHI_MSG_ERROR, "invalid ring type\n");
+ return -EINVAL;
+ }
+
+ new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset);
+
+ mhi_dev_cache_ring(ring, new_wr_offset);
+
+ ring->wr_offset = new_wr_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_wr_offset);
+
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset)
+{
+ union mhi_dev_ring_element_type *el;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ /* get the element and invoke the respective callback */
+ el = &ring->ring_cache[offset];
+
+ if (ring->ring_cb)
+ ring->ring_cb(ring->mhi_dev, el, (void *)ring);
+ else
+ mhi_log(MHI_MSG_INFO, "No callback registered for ring %d\n",
+ ring->id);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring_element);
+
+int mhi_dev_process_ring(struct mhi_dev_ring *ring)
+{
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_update_wr_offset(ring);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error updating write-offset for ring %d\n",
+ ring->id);
+ return rc;
+ }
+
+ if (ring->type == RING_TYPE_CH) {
+ /* notify the clients that there are elements in the ring */
+ rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+ if (rc)
+ pr_err("Error fetching elements\n");
+ return rc;
+ }
+
+ while (ring->rd_offset != ring->wr_offset) {
+ rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error processing ring (%d) element (%d)\n",
+ ring->id, ring->rd_offset);
+ return rc;
+ }
+
+ mhi_log(MHI_MSG_ERROR,
+ "Processing ring (%d) rd_offset:%d, wr_offset:%d\n",
+ ring->id, ring->rd_offset, ring->wr_offset);
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ }
+
+ if (!(ring->rd_offset == ring->wr_offset)) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error with the rd offset/wr offset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring);
+
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_element_type *element)
+{
+ uint32_t old_offset = 0;
+ struct mhi_addr host_addr;
+
+ if (!ring || !element) {
+ pr_err("%s: Invalid context\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_dev_update_wr_offset(ring);
+
+ if ((ring->rd_offset + 1) % ring->ring_size == ring->wr_offset) {
+ mhi_log(MHI_MSG_INFO, "ring full to insert element\n");
+ return -EINVAL;
+ }
+
+ old_offset = ring->rd_offset;
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+
+ ring->ring_ctx->generic.rp = (ring->rd_offset *
+ sizeof(union mhi_dev_ring_element_type)) +
+ ring->ring_ctx->generic.rbase;
+ /*
+ * Write the element, ring_base has to be the
+ * iomap of the ring_base for memcpy
+ */
+ host_addr.host_pa = ring->ring_shadow.host_pa +
+ sizeof(union mhi_dev_ring_element_type) * old_offset;
+ host_addr.device_va = ring->ring_shadow.device_va +
+ sizeof(union mhi_dev_ring_element_type) * old_offset;
+
+ mhi_log(MHI_MSG_ERROR, "adding element to ring (%d)\n", ring->id);
+ mhi_log(MHI_MSG_ERROR, "rd_ofset %d\n", ring->rd_offset);
+ mhi_log(MHI_MSG_ERROR, "type %d\n", element->generic.type);
+
+ mhi_dev_write_to_host(&host_addr, element,
+ sizeof(union mhi_dev_ring_element_type), ring->mhi_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_add_element);
+
+int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint32_t wr_offset = 0;
+ uint32_t offset = 0;
+
+ if (!ring || !ctx || !mhi) {
+ pr_err("%s: Invalid context\n", __func__);
+ return -EINVAL;
+ }
+
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_dev_ring_num_elems(ring);
+ ring->rd_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->wr_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->mhi_dev = mhi;
+
+ mhi_ring_set_state(ring, RING_STATE_IDLE);
+
+ wr_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.wp);
+
+ ring->ring_cache = dma_alloc_coherent(mhi->dev,
+ ring->ring_size *
+ sizeof(union mhi_dev_ring_element_type),
+ &ring->ring_cache_dma_handle,
+ GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ offset = (uint32_t)(ring->ring_ctx->generic.rbase -
+ mhi->ctrl_base.host_pa);
+
+ ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset;
+ ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
+ ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;
+
+ if (ring->type == RING_TYPE_ER)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
+ (ring->id - mhi->ev_ring_start) *
+ sizeof(union mhi_dev_ring_ctx));
+ else if (ring->type == RING_TYPE_CMD)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
+ else if (ring->type == RING_TYPE_CH)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
+ (ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));
+
+
+ ring->ring_ctx_shadow = ring->ring_ctx;
+
+ if (ring->type != RING_TYPE_ER) {
+ rc = mhi_dev_cache_ring(ring, wr_offset);
+ if (rc)
+ return rc;
+ }
+
+ mhi_log(MHI_MSG_ERROR, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
+ (uint32_t)ring->ring_ctx->generic.rbase,
+ (uint32_t)ring->ring_ctx->generic.rp,
+ (uint32_t)ring->ring_ctx->generic.wp);
+ ring->wr_offset = wr_offset;
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_ring_start);
+
+void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type,
+ int id)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return;
+ }
+
+ ring->id = id;
+ ring->state = RING_STATE_UINT;
+ ring->ring_cb = NULL;
+ ring->type = type;
+}
+EXPORT_SYMBOL(mhi_ring_init);
+
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el, void *ctx))
+{
+ if (!ring || !ring_cb) {
+ pr_err("%s: Invalid context\n", __func__);
+ return;
+ }
+
+ ring->ring_cb = ring_cb;
+}
+EXPORT_SYMBOL(mhi_ring_set_cb);
+
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_state state)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return;
+ }
+
+ if (state > RING_STATE_PENDING) {
+ pr_err("%s: Invalid ring state\n", __func__);
+ return;
+ }
+
+ ring->state = state;
+}
+EXPORT_SYMBOL(mhi_ring_set_state);
+
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ return ring->state;
+}
+EXPORT_SYMBOL(mhi_ring_get_state);
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
new file mode 100644
index 000000000000..12a4fb229922
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -0,0 +1,1319 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+#define MHI_SM_DBG(fmt, args...) \
+ mhi_log(MHI_MSG_DBG, fmt, ##args)
+
+#define MHI_SM_ERR(fmt, args...) \
+ mhi_log(MHI_MSG_ERROR, fmt, ##args)
+
+#define MHI_SM_FUNC_ENTRY() MHI_SM_DBG("ENTRY\n")
+#define MHI_SM_FUNC_EXIT() MHI_SM_DBG("EXIT\n")
+
+
+static inline const char *mhi_sm_dev_event_str(enum mhi_dev_event state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ str = "MHI_DEV_EVENT_CTRL_TRIG";
+ break;
+ case MHI_DEV_EVENT_M0_STATE:
+ str = "MHI_DEV_EVENT_M0_STATE";
+ break;
+ case MHI_DEV_EVENT_M1_STATE:
+ str = "MHI_DEV_EVENT_M1_STATE";
+ break;
+ case MHI_DEV_EVENT_M2_STATE:
+ str = "MHI_DEV_EVENT_M2_STATE";
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ str = "MHI_DEV_EVENT_M3_STATE";
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ str = "MHI_DEV_EVENT_HW_ACC_WAKEUP";
+ break;
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ str = "MHI_DEV_EVENT_CORE_WAKEUP";
+ break;
+ default:
+ str = "INVALID MHI_DEV_EVENT";
+ }
+
+ return str;
+}
+
+static inline const char *mhi_sm_mstate_str(enum mhi_dev_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_DEV_RESET_STATE:
+ str = "RESET";
+ break;
+ case MHI_DEV_READY_STATE:
+ str = "READY";
+ break;
+ case MHI_DEV_M0_STATE:
+ str = "M0";
+ break;
+ case MHI_DEV_M1_STATE:
+ str = "M1";
+ break;
+ case MHI_DEV_M2_STATE:
+ str = "M2";
+ break;
+ case MHI_DEV_M3_STATE:
+ str = "M3";
+ break;
+ case MHI_DEV_SYSERR_STATE:
+ str = "SYSTEM ERROR";
+ break;
+ default:
+ str = "INVALID";
+ break;
+ }
+
+ return str;
+}
+enum mhi_sm_ep_pcie_state {
+ MHI_SM_EP_PCIE_LINK_DISABLE,
+ MHI_SM_EP_PCIE_D0_STATE,
+ MHI_SM_EP_PCIE_D3_HOT_STATE,
+ MHI_SM_EP_PCIE_D3_COLD_STATE,
+};
+
+static inline const char *mhi_sm_dstate_str(enum mhi_sm_ep_pcie_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_SM_EP_PCIE_LINK_DISABLE:
+ str = "EP_PCIE_LINK_DISABLE";
+ break;
+ case MHI_SM_EP_PCIE_D0_STATE:
+ str = "D0_STATE";
+ break;
+ case MHI_SM_EP_PCIE_D3_HOT_STATE:
+ str = "D3_HOT_STATE";
+ break;
+ case MHI_SM_EP_PCIE_D3_COLD_STATE:
+ str = "D3_COLD_STATE";
+ break;
+ default:
+ str = "INVALID D-STATE";
+ break;
+ }
+
+ return str;
+}
+
+static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event)
+{
+ const char *str;
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKDOWN:
+ str = "EP_PCIE_LINKDOWN_EVENT";
+ break;
+ case EP_PCIE_EVENT_LINKUP:
+ str = "EP_PCIE_LINKUP_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ str = "EP_PCIE_PM_D3_HOT_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ str = "EP_PCIE_PM_D3_COLD_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ str = "EP_PCIE_PM_RST_DEAST_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ str = "EP_PCIE_PM_D0_EVENT";
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ str = "EP_PCIE_MHI_A7";
+ break;
+ default:
+ str = "INVALID_PCIE_EVENT";
+ break;
+ }
+
+ return str;
+}
+
+/**
+ * struct mhi_sm_device_event - mhi-core event work
+ * @event: mhi core state change event
+ * @work: work struct
+ *
+ * used to add work for mhi state change event to mhi_sm_wq
+ */
+struct mhi_sm_device_event {
+ enum mhi_dev_event event;
+ struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_ep_pcie_event - ep-pcie event work
+ * @event: ep-pcie link state change event
+ * @work: work struct
+ *
+ * used to add work for ep-pcie link state change event to mhi_sm_wq
+ */
+struct mhi_sm_ep_pcie_event {
+ enum ep_pcie_event event;
+ struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_stats - MHI state machine statistics, viewable using debugfs
+ * @m0_event_cnt: total number of MHI_DEV_EVENT_M0_STATE events
+ * @m3_event_cnt: total number of MHI_DEV_EVENT_M3_STATE events
+ * @hw_acc_wakeup_event_cnt: total number of MHI_DEV_EVENT_HW_ACC_WAKEUP events
+ * @mhi_core_wakeup_event_cnt: total number of MHI_DEV_EVENT_CORE_WAKEUP events
+ * @linkup_event_cnt: total number of EP_PCIE_EVENT_LINKUP events
+ * @rst_deast_event_cnt: total number of EP_PCIE_EVENT_PM_RST_DEAST events
+ * @d3_hot_event_cnt: total number of EP_PCIE_EVENT_PM_D3_HOT events
+ * @d3_cold_event_cnt: total number of EP_PCIE_EVENT_PM_D3_COLD events
+ * @d0_event_cnt: total number of EP_PCIE_EVENT_PM_D0 events
+ * @linkdown_event_cnt: total number of EP_PCIE_EVENT_LINKDOWN events
+ */
+struct mhi_sm_stats {
+ int m0_event_cnt;
+ int m3_event_cnt;
+ int hw_acc_wakeup_event_cnt;
+ int mhi_core_wakeup_event_cnt;
+ int linkup_event_cnt;
+ int rst_deast_event_cnt;
+ int d3_hot_event_cnt;
+ int d3_cold_event_cnt;
+ int d0_event_cnt;
+ int linkdown_event_cnt;
+};
+
+/**
+ * struct mhi_sm_dev - MHI state manager context information
+ * @mhi_state: MHI M state of the MHI device
+ * @d_state: EP-PCIe D state of the MHI device
+ * @mhi_dev: MHI device struct pointer
+ * @mhi_state_lock: mutex for mhi_state
+ * @syserr_occurred:flag to indicate if a syserr condition has occurred.
+ * @mhi_sm_wq: workqueue for state change events
+ * @pending_device_events: number of pending mhi state change events in sm_wq
+ * @pending_pcie_events: number of pending mhi state change events in sm_wq
+ * @stats: stats on the handled and pending events
+ */
+struct mhi_sm_dev {
+ enum mhi_dev_state mhi_state;
+ enum mhi_sm_ep_pcie_state d_state;
+ struct mhi_dev *mhi_dev;
+ struct mutex mhi_state_lock;
+ bool syserr_occurred;
+ struct workqueue_struct *mhi_sm_wq;
+ atomic_t pending_device_events;
+ atomic_t pending_pcie_events;
+ struct mhi_sm_stats stats;
+};
+static struct mhi_sm_dev *mhi_sm_ctx;
+
+
+#ifdef CONFIG_DEBUG_FS
+#define MHI_SM_MAX_MSG_LEN 1024
+static char dbg_buff[MHI_SM_MAX_MSG_LEN];
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos);
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos);
+
+const struct file_operations mhi_sm_stats_ops = {
+ .read = mhi_sm_debugfs_read,
+ .write = mhi_sm_debugfs_write,
+};
+
+static void mhi_sm_debugfs_init(void)
+{
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("mhi_sm", 0);
+ if (IS_ERR(dent)) {
+ MHI_SM_ERR("fail to create folder mhi_sm\n");
+ return;
+ }
+
+ dfile_stats =
+ debugfs_create_file("stats", read_write_mode, dent,
+ 0, &mhi_sm_stats_ops);
+ if (!dfile_stats || IS_ERR(dfile_stats)) {
+ MHI_SM_ERR("fail to create file stats\n");
+ goto fail;
+ }
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+static void mhi_sm_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+#else
+static inline void mhi_sm_debugfs_init(void) {}
+static inline void mhi_sm_debugfs_destroy(void) {}
+#endif /*CONFIG_DEBUG_FS*/
+
+
+static void mhi_sm_mmio_set_mhistatus(enum mhi_dev_state state)
+{
+ struct mhi_dev *dev = mhi_sm_ctx->mhi_dev;
+
+ MHI_SM_FUNC_ENTRY();
+
+ switch (state) {
+ case MHI_DEV_READY_STATE:
+ MHI_SM_DBG("set MHISTATUS to READY mode\n");
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, 1);
+
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_DEV_SYSERR_STATE:
+ MHI_SM_DBG("set MHISTATUS to SYSTEM ERROR mode\n");
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT, 1);
+
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_DEV_M1_STATE:
+ case MHI_DEV_M2_STATE:
+ MHI_SM_ERR("Not supported state, can't set MHISTATUS to %s\n",
+ mhi_sm_mstate_str(state));
+ goto exit;
+ case MHI_DEV_M0_STATE:
+ case MHI_DEV_M3_STATE:
+ MHI_SM_DBG("set MHISTATUS.MHISTATE to %s state\n",
+ mhi_sm_mstate_str(state));
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ default:
+ MHI_SM_ERR("Invalid mhi state: 0x%x state", state);
+ goto exit;
+ }
+
+ mhi_sm_ctx->mhi_state = state;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid
+ * @curr_state: current MHI state
+ * @event: MHI state change event
+ *
+ * Determine according to MHI state management if the state change event
+ * is valid on the current mhi state.
+ * Note: The decision doesn't take into account M1 and M2 states.
+ *
+ * Return: true: transition is valid
+ * false: transition is not valid
+ */
+static bool mhi_sm_is_legal_event_on_state(enum mhi_dev_state curr_state,
+ enum mhi_dev_event event)
+{
+ bool res;
+
+ switch (event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ res = (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D0_STATE &&
+ curr_state != MHI_DEV_RESET_STATE);
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ res = (curr_state == MHI_DEV_M3_STATE ||
+ curr_state == MHI_DEV_M0_STATE);
+ break;
+ default:
+ MHI_SM_ERR("Received invalid event: %s\n",
+ mhi_sm_dev_event_str(event));
+ res = false;
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * mhi_sm_is_legal_pcie_event_on_state() - Determine if EP-PCIe linke state
+ * transition is valid on the current system state.
+ * @curr_mstate: current MHI state
+ * @curr_dstate: current ep-pcie link, d, state
+ * @event: ep-pcie link state change event
+ *
+ * Return: true: transition is valid
+ * false: transition is not valid
+ */
+static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate,
+ enum mhi_sm_ep_pcie_state curr_dstate, enum ep_pcie_event event)
+{
+ bool res;
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKUP:
+ case EP_PCIE_EVENT_LINKDOWN:
+ res = true;
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ res = (curr_mstate == MHI_DEV_M3_STATE &&
+ curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE);
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE);
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ res = true;
+ break;
+ default:
+ MHI_SM_ERR("Invalid ep_pcie event, received: %s\n",
+ mhi_sm_pcie_event_str(event));
+ res = false;
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * mhi_sm_change_to_M0() - switch to M0 state.
+ *
+ * Switch MHI-device state to M0, if possible according to MHI state machine.
+ * Notify the MHI-host on the transition, in case MHI is suspended- resume MHI.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_change_to_M0(void)
+{
+ enum mhi_dev_state old_state;
+ struct ep_pcie_msi_config cfg;
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ old_state = mhi_sm_ctx->mhi_state;
+
+ if (old_state == MHI_DEV_M0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in M0 state\n");
+ res = 0;
+ goto exit;
+ } else if (old_state == MHI_DEV_M3_STATE ||
+ old_state == MHI_DEV_READY_STATE) {
+ /* Retrieve MHI configuration*/
+ res = mhi_dev_config_outbound_iatu(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Fail to configure iATU, returned %d\n",
+ res);
+ goto exit;
+ }
+ res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle,
+ &cfg);
+ if (res) {
+ MHI_SM_ERR("Error retrieving pcie msi logic\n");
+ goto exit;
+ }
+ res = mhi_pcie_config_db_routing(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Error configuring db routing\n");
+ goto exit;
+
+ }
+ } else {
+ MHI_SM_ERR("unexpected old_state: %s\n",
+ mhi_sm_mstate_str(old_state));
+ goto exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_M0_STATE);
+
+ /* Tell the host, device move to M0 */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_M0_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed to send event %s to host, returned %d\n",
+ mhi_sm_dev_event_str(MHI_DEV_EVENT_M0_STATE), res);
+ goto exit;
+ }
+
+ if (old_state == MHI_DEV_READY_STATE) {
+ /* Tell the host the EE */
+ res = mhi_dev_send_ee_event(mhi_sm_ctx->mhi_dev, 2);
+ if (res) {
+ MHI_SM_ERR("failed sending EE event to host\n");
+ goto exit;
+ }
+ } else if (old_state == MHI_DEV_M3_STATE) {
+ /*Resuming MHI operation*/
+ res = mhi_dev_resume(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Failed resuming mhi core, returned %d",
+ res);
+ goto exit;
+ }
+ res = ipa_mhi_resume();
+ if (res) {
+ MHI_SM_ERR("Failed resuming ipa_mhi, returned %d",
+ res);
+ goto exit;
+ }
+ }
+ res = 0;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_change_to_M3() - switch to M3 state
+ *
+ * Switch MHI-device state to M3, if possible according to MHI state machine.
+ * Suspend MHI traffic and notify the host on the transition.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_change_to_M3(void)
+{
+ enum mhi_dev_state old_state;
+ int res = 0;
+
+ MHI_SM_FUNC_ENTRY();
+
+ old_state = mhi_sm_ctx->mhi_state;
+ if (old_state == MHI_DEV_M3_STATE) {
+ MHI_SM_DBG("Nothing to do, already in M3 state\n");
+ res = 0;
+ goto exit;
+ }
+ /* Suspending MHI operation*/
+ res = mhi_dev_suspend(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Failed to suspend mhi_core, returned %d\n", res);
+ goto exit;
+ }
+ res = ipa_mhi_suspend(true);
+ if (res) {
+ MHI_SM_ERR("Failed to suspend ipa_mhi, returned %d\n", res);
+ goto exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_M3_STATE);
+
+ /* tell the host, device move to M3 */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_M3_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed sendind event: %s to mhi_host\n",
+ mhi_sm_dev_event_str(MHI_DEV_EVENT_M3_STATE));
+ goto exit;
+ }
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_wakeup_host() - wakeup MHI-host
+ *@event: MHI state chenge event
+ *
+ * Sends wekup event to MHI-host via EP-PCIe, in case MHI is in M3 state.
+ *
+ * Return: 0:success
+ * negative: failure
+ */
+static int mhi_sm_wakeup_host(enum mhi_dev_event event)
+{
+ int res = 0;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) {
+ /*
+ * ep_pcie driver is responsible to send the right wakeup
+ * event, assert WAKE#, according to Link state
+ */
+ res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle);
+ if (res) {
+ MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n",
+ res);
+ goto exit;
+ }
+ } else {
+ MHI_SM_DBG("Nothing to do, Host is already awake\n");
+ }
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_handle_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_handle_syserr(void)
+{
+ int res;
+ enum ep_pcie_link_status link_status;
+ bool link_enabled = false;
+
+ MHI_SM_FUNC_ENTRY();
+
+ MHI_SM_ERR("Start handling SYSERR, MHI state: %s and %s",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ if (mhi_sm_ctx->mhi_state == MHI_DEV_SYSERR_STATE) {
+ MHI_SM_DBG("Nothing to do, already in SYSERR state\n");
+ return 0;
+ }
+
+ mhi_sm_ctx->syserr_occurred = true;
+ link_status = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+ if (link_status == EP_PCIE_LINK_DISABLED) {
+ /* try to power on ep-pcie, restore mmio, and wakup host */
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_POWER_ON);
+ if (res) {
+ MHI_SM_ERR("Failed to power on ep-pcie, returned %d\n",
+ res);
+ goto exit;
+ }
+ mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_AST_WAKE | EP_PCIE_OPT_ENUM);
+ if (res) {
+ MHI_SM_ERR("Failed to wakup host and enable ep-pcie\n");
+ goto exit;
+ }
+ }
+
+ link_enabled = true;
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_SYSERR_STATE);
+
+ /* Tell the host, device move to SYSERR state */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_SYSERR_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed to send %s state change event to host\n",
+ mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+ goto exit;
+ }
+
+exit:
+ if (!link_enabled)
+ MHI_SM_ERR("EP-PCIE Link is disable cannot set MMIO to %s\n",
+ mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+
+ MHI_SM_ERR("/n/n/nASSERT ON DEVICE !!!!/n/n/n");
+ WARN_ON();
+
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_dev_event_manager() - performs MHI state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs mhi state change
+ * if possible according to MHI state machine
+ */
+static void mhi_sm_dev_event_manager(struct work_struct *work)
+{
+ int res;
+ struct mhi_sm_device_event *chg_event = container_of(work,
+ struct mhi_sm_device_event, work);
+
+ MHI_SM_FUNC_ENTRY();
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ MHI_SM_DBG("Start handling %s event, current states: %s & %s\n",
+ mhi_sm_dev_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ if (mhi_sm_ctx->syserr_occurred) {
+ MHI_SM_DBG("syserr occurred, Ignoring %s\n",
+ mhi_sm_dev_event_str(chg_event->event));
+ goto unlock_and_exit;
+ }
+
+ if (!mhi_sm_is_legal_event_on_state(mhi_sm_ctx->mhi_state,
+ chg_event->event)) {
+ MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+ mhi_sm_dev_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ }
+
+ switch (chg_event->event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ res = mhi_sm_change_to_M0();
+ if (res)
+ MHI_SM_ERR("Failed switching to M0 state\n");
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ res = mhi_sm_change_to_M3();
+ if (res)
+ MHI_SM_ERR("Failed switching to M3 state\n");
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ res = mhi_sm_wakeup_host(chg_event->event);
+ if (res)
+ MHI_SM_ERR("Failed to wakeup MHI host\n");
+ break;
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ case MHI_DEV_EVENT_M1_STATE:
+ case MHI_DEV_EVENT_M2_STATE:
+ MHI_SM_ERR("Error: %s event is not supported\n",
+ mhi_sm_dev_event_str(chg_event->event));
+ break;
+ default:
+ MHI_SM_ERR("Error: Invalid event, 0x%x", chg_event->event);
+ break;
+ }
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ atomic_dec(&mhi_sm_ctx->pending_device_events);
+ kfree(chg_event);
+
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_pcie_event_manager() - performs EP-PCIe linke state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs ep-pcie link state
+ * change if possible according to current system state and MHI state machine
+ */
+static void mhi_sm_pcie_event_manager(struct work_struct *work)
+{
+ int res;
+ enum mhi_sm_ep_pcie_state old_dstate;
+ struct mhi_sm_ep_pcie_event *chg_event = container_of(work,
+ struct mhi_sm_ep_pcie_event, work);
+ enum ep_pcie_event pcie_event = chg_event->event;
+
+ MHI_SM_FUNC_ENTRY();
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ old_dstate = mhi_sm_ctx->d_state;
+
+ MHI_SM_DBG("Start handling %s event, current MHI state %s and %s\n",
+ mhi_sm_pcie_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(old_dstate));
+
+ if (mhi_sm_ctx->syserr_occurred &&
+ pcie_event != EP_PCIE_EVENT_LINKDOWN) {
+ MHI_SM_DBG("SYSERR occurred. Ignoring %s",
+ mhi_sm_pcie_event_str(pcie_event));
+ goto unlock_and_exit;
+ }
+
+ if (!mhi_sm_is_legal_pcie_event_on_state(mhi_sm_ctx->mhi_state,
+ old_dstate, pcie_event)) {
+ MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+ mhi_sm_pcie_event_str(pcie_event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(old_dstate));
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ }
+
+ switch (pcie_event) {
+ case EP_PCIE_EVENT_LINKUP:
+ if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_LINK_DISABLE)
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ case EP_PCIE_EVENT_LINKDOWN:
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ if (old_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE) {
+ MHI_SM_DBG("cannot move to D3_HOT from D3_COLD\n");
+ break;
+ }
+ /* Backup MMIO is done on the callback function*/
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_HOT_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ if (old_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D3_COLD state\n");
+ break;
+ }
+ ep_pcie_disable_endpoint(mhi_sm_ctx->mhi_dev->phandle);
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_COLD_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D0 state\n");
+ break;
+ }
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_POWER_ON);
+ if (res) {
+ MHI_SM_ERR("Failed to power on ep_pcie, returned %d\n",
+ res);
+ goto unlock_and_exit;
+ }
+
+ mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_ENUM);
+ if (res) {
+ MHI_SM_ERR("ep-pcie failed to link train, return %d\n",
+ res);
+ goto unlock_and_exit;
+ }
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D0 state\n");
+ break;
+ }
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ default:
+ MHI_SM_ERR("Invalid EP_PCIE event, received 0x%x\n",
+ pcie_event);
+ break;
+ }
+
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ atomic_dec(&mhi_sm_ctx->pending_pcie_events);
+ kfree(chg_event);
+
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_dev_sm_init() - Initialize MHI state machine.
+ * @mhi_dev: pointer to mhi device instance
+ *
+ * Assuming MHISTATUS register is in RESET state.
+ *
+ * Return: 0 success
+ * -EINVAL: invalid param
+ * -ENOMEM: allocating memory error
+ */
+int mhi_dev_sm_init(struct mhi_dev *mhi_dev)
+{
+ int res;
+ enum ep_pcie_link_status link_state;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_dev) {
+ MHI_SM_ERR("Fail: Null argument\n");
+ return -EINVAL;
+ }
+
+ mhi_sm_ctx = devm_kzalloc(mhi_dev->dev, sizeof(*mhi_sm_ctx),
+ GFP_KERNEL);
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("devm_kzalloc err: mhi_sm_ctx\n");
+ return -ENOMEM;
+ }
+
+ /*init debugfs*/
+ mhi_sm_debugfs_init();
+ mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq");
+ if (!mhi_sm_ctx->mhi_sm_wq) {
+ MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n");
+ res = -ENOMEM;
+ goto fail_init_wq;
+ }
+
+ mutex_init(&mhi_sm_ctx->mhi_state_lock);
+ mhi_sm_ctx->mhi_dev = mhi_dev;
+ mhi_sm_ctx->mhi_state = MHI_DEV_RESET_STATE;
+ mhi_sm_ctx->syserr_occurred = false;
+ atomic_set(&mhi_sm_ctx->pending_device_events, 0);
+ atomic_set(&mhi_sm_ctx->pending_pcie_events, 0);
+
+ link_state = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+ if (link_state == EP_PCIE_LINK_ENABLED)
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ else
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_LINK_DISABLE;
+
+ MHI_SM_FUNC_EXIT();
+ return 0;
+
+fail_init_wq:
+ mhi_sm_ctx = NULL;
+ mhi_sm_debugfs_destroy();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_init);
+
+/**
+ * mhi_dev_sm_get_mhi_state() -Get current MHI state.
+ * @state: return param
+ *
+ * Returns the current MHI state of the state machine.
+ *
+ * Return: 0 success
+ * -EINVAL: invalid param
+ * -EFAULT: state machine isn't initialized
+ */
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state)
+{
+ MHI_SM_FUNC_ENTRY();
+
+ if (!state) {
+ MHI_SM_ERR("Fail: Null argument\n");
+ return -EINVAL;
+ }
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Fail: MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+ *state = mhi_sm_ctx->mhi_state;
+ MHI_SM_DBG("state machine states are: %s and %s\n",
+ mhi_sm_mstate_str(*state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ MHI_SM_FUNC_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_sm_get_mhi_state);
+
+/**
+ * mhi_dev_sm_set_ready() -Set MHI state to ready.
+ *
+ * Set MHISTATUS register in mmio to READY.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * EINVAL: mhi state manager is not initialized
+ * EPERM: Operation not permitted as EP PCIE link is desable.
+ * EFAULT: MHI state is not RESET
+ * negative: other failure
+ */
+int mhi_dev_sm_set_ready(void)
+{
+ int res;
+ int is_ready;
+ enum mhi_dev_state state;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM isn't initialized\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ if (mhi_sm_ctx->mhi_state != MHI_DEV_RESET_STATE) {
+ MHI_SM_ERR("Can not switch to READY state from %s state\n",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+ res = -EFAULT;
+ goto unlock_and_exit;
+ }
+
+ if (mhi_sm_ctx->d_state != MHI_SM_EP_PCIE_D0_STATE) {
+ if (ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle) ==
+ EP_PCIE_LINK_ENABLED) {
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ } else {
+ MHI_SM_ERR("ERROR: ep-pcie link is not enabled\n");
+ res = -EPERM;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* verify that MHISTATUS is configured to RESET*/
+ mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev,
+ MHISTATUS, MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+
+ mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &is_ready);
+
+ if (state != MHI_DEV_RESET_STATE || is_ready) {
+ MHI_SM_ERR("Cannot switch to READY, MHI is not in RESET state");
+ MHI_SM_ERR("-MHISTATE: %s, READY bit: 0x%x\n",
+ mhi_sm_mstate_str(state), is_ready);
+ res = -EFAULT;
+ goto unlock_and_exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_READY_STATE);
+
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_set_ready);
+
+/**
+ * mhi_dev_notify_sm_event() - MHI-core notify SM on trigger occurred
+ * @event - enum of the requierd operation.
+ *
+ * Asynchronic function.
+ * No trigger is sent after operation is done.
+ *
+ * Return: 0: success
+ * -EFAULT: SM isn't initialized or event isn't supported
+ * -ENOMEM: allocating memory error
+ * -EINVAL: invalied event
+ */
+int mhi_dev_notify_sm_event(enum mhi_dev_event event)
+{
+ struct mhi_sm_device_event *state_change_event;
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+
+ MHI_SM_DBG("received: %s\n",
+ mhi_sm_dev_event_str(event));
+
+ switch (event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ mhi_sm_ctx->stats.m0_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ mhi_sm_ctx->stats.m3_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ case MHI_DEV_EVENT_M1_STATE:
+ case MHI_DEV_EVENT_M2_STATE:
+ MHI_SM_ERR("Not supported event: %s\n",
+ mhi_sm_dev_event_str(event));
+ res = -EFAULT;
+ goto exit;
+ default:
+ MHI_SM_ERR("Invalid event, received: 0x%x event\n", event);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /*init work and push to queue*/
+ state_change_event = kzalloc(sizeof(*state_change_event), GFP_ATOMIC);
+ if (!state_change_event) {
+ MHI_SM_ERR("kzalloc error\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+
+ state_change_event->event = event;
+ INIT_WORK(&state_change_event->work, mhi_sm_dev_event_manager);
+ atomic_inc(&mhi_sm_ctx->pending_device_events);
+ queue_work(mhi_sm_ctx->mhi_sm_wq, &state_change_event->work);
+ res = 0;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_notify_sm_event);
+
+/**
+ * mhi_dev_sm_pcie_handler() - handler of ep_pcie events
+ * @notify - pointer to structure contains the ep_pcie event
+ *
+ * Callback function, called by ep_pcie driver to notify on pcie state change
+ * Asynchronic function
+ */
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify)
+{
+ struct mhi_sm_ep_pcie_event *dstate_change_evt;
+ enum ep_pcie_event event;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!notify) {
+ MHI_SM_ERR("Null argument - notify\n");
+ return;
+ }
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return;
+ }
+
+ event = notify->event;
+ MHI_SM_DBG("received: %s\n",
+ mhi_sm_pcie_event_str(event));
+
+ dstate_change_evt = kzalloc(sizeof(*dstate_change_evt), GFP_ATOMIC);
+ if (!dstate_change_evt) {
+ MHI_SM_ERR("kzalloc error\n");
+ goto exit;
+ }
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKUP:
+ mhi_sm_ctx->stats.linkup_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ mhi_sm_ctx->stats.d3_cold_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ mhi_sm_ctx->stats.d3_hot_event_cnt++;
+ mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev);
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ mhi_sm_ctx->stats.rst_deast_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ mhi_sm_ctx->stats.d0_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_LINKDOWN:
+ mhi_sm_ctx->stats.linkdown_event_cnt++;
+ mhi_sm_ctx->syserr_occurred = true;
+ MHI_SM_ERR("got %s, ERROR occurred\n",
+ mhi_sm_pcie_event_str(event));
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_INT_EVT_MHI_A7, false);
+ mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev);
+ goto exit;
+ default:
+ MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n",
+ event);
+ kfree(dstate_change_evt);
+ goto exit;
+ }
+
+ dstate_change_evt->event = event;
+ INIT_WORK(&dstate_change_evt->work, mhi_sm_pcie_event_manager);
+ queue_work(mhi_sm_ctx->mhi_sm_wq, &dstate_change_evt->work);
+ atomic_inc(&mhi_sm_ctx->pending_pcie_events);
+
+exit:
+ MHI_SM_FUNC_EXIT();
+}
+EXPORT_SYMBOL(mhi_dev_sm_pcie_handler);
+
+/**
+ * mhi_dev_sm_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+int mhi_dev_sm_syserr(void)
+{
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("mhi_sm_handle_syserr failed %d\n", res);
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_syserr);
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+
+ if (!mhi_sm_ctx) {
+ nbytes = scnprintf(dbg_buff, MHI_SM_MAX_MSG_LEN,
+ "Not initialized\n");
+ } else {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "*************** MHI State machine status ***************\n");
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D state: %s\n",
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M state: %s\n",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "pending device events: %d\n",
+ atomic_read(&mhi_sm_ctx->pending_device_events));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "pending pcie events: %d\n",
+ atomic_read(&mhi_sm_ctx->pending_pcie_events));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "*************** Statistics ***************\n");
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M0 events: %d\n", mhi_sm_ctx->stats.m0_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M3 events: %d\n", mhi_sm_ctx->stats.m3_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "HW_ACC wakeup events: %d\n",
+ mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "CORE wakeup events: %d\n",
+ mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "Linkup events: %d\n",
+ mhi_sm_ctx->stats.linkup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "De-assert PERST events: %d\n",
+ mhi_sm_ctx->stats.rst_deast_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D0 events: %d\n",
+ mhi_sm_ctx->stats.d0_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D3_HOT events: %d\n",
+ mhi_sm_ctx->stats.d3_hot_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D3_COLD events:%d\n",
+ mhi_sm_ctx->stats.d3_cold_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "Linkdown events: %d\n",
+ mhi_sm_ctx->stats.linkdown_event_cnt);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ s8 in_num = 0;
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &in_num))
+ return -EFAULT;
+
+ switch (in_num) {
+ case 0:
+ if (atomic_read(&mhi_sm_ctx->pending_device_events) ||
+ atomic_read(&mhi_sm_ctx->pending_pcie_events))
+ MHI_SM_DBG("Note, there are pending events in sm_wq\n");
+
+ memset(&mhi_sm_ctx->stats, 0, sizeof(struct mhi_sm_stats));
+ break;
+ default:
+ MHI_SM_ERR("invalid argument: To reset statistics echo 0\n");
+ break;
+ }
+
+ return count;
+}
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.h b/drivers/platform/msm/mhi_dev/mhi_sm.h
new file mode 100644
index 000000000000..ebf465e1cc43
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MHI_SM_H
+#define MHI_SM_H
+
+#include "mhi.h"
+#include <linux/slab.h>
+#include <linux/msm_ep_pcie.h>
+
+
+/**
+ * enum mhi_dev_event - MHI state change events
+ * @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event.
+ * Not supported,for future use
+ * @MHI_DEV_EVENT_M0_STATE: M0 state change event
+ * @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M3_STATE: M0 state change event
+ * @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup
+ * @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup
+ */
+enum mhi_dev_event {
+ MHI_DEV_EVENT_CTRL_TRIG,
+ MHI_DEV_EVENT_M0_STATE,
+ MHI_DEV_EVENT_M1_STATE,
+ MHI_DEV_EVENT_M2_STATE,
+ MHI_DEV_EVENT_M3_STATE,
+ MHI_DEV_EVENT_HW_ACC_WAKEUP,
+ MHI_DEV_EVENT_CORE_WAKEUP,
+ MHI_DEV_EVENT_MAX
+};
+
+int mhi_dev_sm_init(struct mhi_dev *dev);
+int mhi_dev_sm_set_ready(void);
+int mhi_dev_notify_sm_event(enum mhi_dev_event event);
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state);
+int mhi_dev_sm_syserr(void);
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify);
+
+#endif /* MHI_SM_H */
+
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
new file mode 100644
index 000000000000..64b5e7a73ef5
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -0,0 +1,835 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ipa.h>
+#include <linux/ipa.h>
+#include <uapi/linux/mhi.h>
+#include "mhi.h"
+
+#define MHI_DEV_NODE_NAME_LEN 13
+#define MHI_MAX_NR_OF_CLIENTS 23
+#define MHI_SOFTWARE_CLIENT_START 0
+#define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2)
+#define MHI_UCI_IPC_LOG_PAGES (100)
+
+#define MAX_NR_TRBS_PER_CHAN 1
+#define MHI_QTI_IFACE_ID 4
+#define DEVICE_NAME "mhi"
+
+enum uci_dbg_level {
+ UCI_DBG_VERBOSE = 0x0,
+ UCI_DBG_INFO = 0x1,
+ UCI_DBG_DBG = 0x2,
+ UCI_DBG_WARNING = 0x3,
+ UCI_DBG_ERROR = 0x4,
+ UCI_DBG_CRITICAL = 0x5,
+ UCI_DBG_reserved = 0x80000000
+};
+
+static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL;
+static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO;
+static void *mhi_uci_ipc_log;
+
+
+enum mhi_chan_dir {
+ MHI_DIR_INVALID = 0x0,
+ MHI_DIR_OUT = 0x1,
+ MHI_DIR_IN = 0x2,
+ MHI_DIR__reserved = 0x80000000
+};
+
+struct chan_attr {
+ /* SW maintained channel id */
+ enum mhi_client_channel chan_id;
+ /* maximum buffer size for this channel */
+ size_t max_packet_size;
+ /* number of buffers supported in this channel */
+ u32 nr_trbs;
+ /* direction of the channel, see enum mhi_chan_dir */
+ enum mhi_chan_dir dir;
+ u32 uci_ownership;
+};
+
+struct uci_client {
+ u32 client_index;
+ /* write channel - always odd*/
+ u32 out_chan;
+ /* read channel - always even */
+ u32 in_chan;
+ struct mhi_dev_client *out_handle;
+ struct mhi_dev_client *in_handle;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ atomic_t read_data_ready;
+ struct device *dev;
+ atomic_t ref_count;
+ int mhi_status;
+ void *pkt_loc;
+ size_t pkt_size;
+ struct mhi_dev_iov *in_buf_list;
+ atomic_t write_data_ready;
+ atomic_t mhi_chans_open;
+ struct mhi_uci_ctxt_t *uci_ctxt;
+ struct mutex in_chan_lock;
+ struct mutex out_chan_lock;
+};
+
+struct mhi_uci_ctxt_t {
+ struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
+ struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
+ void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
+ dev_t start_ctrl_nr;
+ struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS];
+ struct class *mhi_uci_class;
+ atomic_t mhi_disabled;
+ atomic_t mhi_enable_notif_wq_active;
+};
+
+#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+
+#define uci_log(_msg_lvl, _msg, ...) do { \
+ if (_msg_lvl >= mhi_uci_msg_lvl) { \
+ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+ } \
+ if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \
+ ipc_log_string(mhi_uci_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+
+module_param(mhi_uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mhi_uci_msg_lvl, "uci dbg lvl");
+
+module_param(mhi_uci_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mhi_uci_ipc_log_lvl, "ipc dbg lvl");
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t mhi_uci_client_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *offp);
+static int mhi_uci_client_open(struct inode *mhi_inode, struct file*);
+static int mhi_uci_client_release(struct inode *mhi_inode,
+ struct file *file_handle);
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait);
+static struct mhi_uci_ctxt_t uci_ctxt;
+
+static int mhi_init_read_chan(struct uci_client *client_handle,
+ enum mhi_client_channel chan)
+{
+ int rc = 0;
+ u32 i, j;
+ struct chan_attr *chan_attributes;
+ size_t buf_size;
+ void *data_loc;
+
+ if (client_handle == NULL) {
+ uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n");
+ return -EINVAL;
+ }
+ if (chan >= MHI_MAX_SOFTWARE_CHANNELS) {
+ uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan);
+ return -EINVAL;
+ }
+
+ chan_attributes = &uci_ctxt.chan_attrib[chan];
+ buf_size = chan_attributes->max_packet_size;
+
+ for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+ data_loc = kmalloc(buf_size, GFP_KERNEL);
+ if (!data_loc) {
+ rc = -ENOMEM;
+ goto free_memory;
+ }
+ client_handle->in_buf_list[i].addr = data_loc;
+ client_handle->in_buf_list[i].buf_size = buf_size;
+ }
+
+ return rc;
+
+free_memory:
+ for (j = 0; j < i; j++)
+ kfree(client_handle->in_buf_list[j].addr);
+
+ return rc;
+}
+
+static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
+ u32 size, u32 is_uspace_buf)
+{
+ void *data_loc = NULL;
+ uintptr_t memcpy_result = 0;
+ u32 data_inserted_so_far = 0;
+ struct uci_client *uci_handle;
+
+ uci_handle = container_of(client_handle, struct uci_client,
+ out_handle);
+
+ if (!client_handle || !buf ||
+ !size || !uci_handle)
+ return -EINVAL;
+
+ if (is_uspace_buf) {
+ data_loc = kmalloc(size, GFP_KERNEL);
+ if (!data_loc) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to allocate memory 0x%x\n",
+ size);
+ return -ENOMEM;
+ }
+ memcpy_result = copy_from_user(data_loc, buf, size);
+ if (memcpy_result)
+ goto error_memcpy;
+ } else {
+ data_loc = buf;
+ }
+
+ data_inserted_so_far = mhi_dev_write_channel(*client_handle, data_loc,
+ size);
+
+error_memcpy:
+ kfree(data_loc);
+ return data_inserted_so_far;
+}
+
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct uci_client *uci_handle;
+
+ uci_handle = file->private_data;
+
+ if (!uci_handle)
+ return -ENODEV;
+
+ poll_wait(file, &uci_handle->read_wq, wait);
+ poll_wait(file, &uci_handle->write_wq, wait);
+ if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+ !mhi_dev_channel_isempty(uci_handle->in_handle)) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Client can read chan %d\n", uci_handle->in_chan);
+ mask |= POLLIN | POLLRDNORM;
+ }
+ if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+ !mhi_dev_channel_isempty(uci_handle->out_handle)) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Client can write chan %d\n", uci_handle->out_chan);
+ mask |= POLLOUT | POLLWRNORM;
+ }
+
+ uci_log(UCI_DBG_VERBOSE,
+ "Client attempted to poll chan %d, returning mask 0x%x\n",
+ uci_handle->in_chan, mask);
+ return mask;
+}
+
+static int open_client_mhi_channels(struct uci_client *uci_client)
+{
+ int rc = 0;
+
+ uci_log(UCI_DBG_DBG,
+ "Starting channels %d %d.\n",
+ uci_client->out_chan,
+ uci_client->in_chan);
+ mutex_lock(&uci_client->out_chan_lock);
+ mutex_lock(&uci_client->in_chan_lock);
+ uci_log(UCI_DBG_DBG,
+ "Initializing inbound chan %d.\n",
+ uci_client->in_chan);
+
+ rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
+ if (rc < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to init inbound 0x%x, ret 0x%x\n",
+ uci_client->in_chan, rc);
+ }
+
+ rc = mhi_dev_open_channel(uci_client->out_chan,
+ &uci_client->out_handle,
+ uci_ctxt.event_notifier);
+ if (rc < 0)
+ goto handle_not_rdy_err;
+
+ rc = mhi_dev_open_channel(uci_client->in_chan,
+ &uci_client->in_handle,
+ uci_ctxt.event_notifier);
+
+ if (rc < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to open chan %d, ret 0x%x\n",
+ uci_client->out_chan, rc);
+ goto handle_in_err;
+ }
+ atomic_set(&uci_client->mhi_chans_open, 1);
+ mutex_unlock(&uci_client->in_chan_lock);
+ mutex_unlock(&uci_client->out_chan_lock);
+
+ return 0;
+
+handle_in_err:
+ mhi_dev_close_channel(uci_client->out_handle);
+handle_not_rdy_err:
+ mutex_unlock(&uci_client->in_chan_lock);
+ mutex_unlock(&uci_client->out_chan_lock);
+ return rc;
+}
+
+static int mhi_uci_client_open(struct inode *mhi_inode,
+ struct file *file_handle)
+{
+ struct uci_client *uci_handle;
+ int rc = 0;
+
+ uci_handle =
+ &uci_ctxt.client_handles[iminor(mhi_inode)];
+
+ uci_log(UCI_DBG_DBG,
+ "Client opened struct device node 0x%x, ref count 0x%x\n",
+ iminor(mhi_inode), atomic_read(&uci_handle->ref_count));
+ if (atomic_add_return(1, &uci_handle->ref_count) == 1) {
+ if (!uci_handle) {
+ atomic_dec(&uci_handle->ref_count);
+ return -ENOMEM;
+ }
+ uci_handle->uci_ctxt = &uci_ctxt;
+ if (!atomic_read(&uci_handle->mhi_chans_open)) {
+ uci_log(UCI_DBG_INFO,
+ "Opening channels client %d\n",
+ iminor(mhi_inode));
+ rc = open_client_mhi_channels(uci_handle);
+ if (rc) {
+ uci_log(UCI_DBG_INFO,
+ "Failed to open channels ret %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ file_handle->private_data = uci_handle;
+
+ return 0;
+
+}
+
+static int mhi_uci_client_release(struct inode *mhi_inode,
+ struct file *file_handle)
+{
+ struct uci_client *uci_handle = file_handle->private_data;
+ struct mhi_uci_ctxt_t *uci_ctxt = uci_handle->uci_ctxt;
+ u32 nr_in_bufs = 0;
+ int rc = 0;
+ int in_chan = 0;
+ u32 buf_size = 0;
+
+ in_chan = iminor(mhi_inode) + 1;
+ nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
+ buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
+
+ if (!uci_handle)
+ return -EINVAL;
+ if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
+ uci_log(UCI_DBG_DBG,
+ "Last client left, closing channel 0x%x\n",
+ iminor(mhi_inode));
+ if (atomic_read(&uci_handle->mhi_chans_open)) {
+ atomic_set(&uci_handle->mhi_chans_open, 0);
+
+ mutex_lock(&uci_handle->out_chan_lock);
+ rc = mhi_dev_close_channel(uci_handle->out_handle);
+ wake_up(&uci_handle->write_wq);
+ mutex_unlock(&uci_handle->out_chan_lock);
+
+ mutex_lock(&uci_handle->in_chan_lock);
+ rc = mhi_dev_close_channel(uci_handle->in_handle);
+ wake_up(&uci_handle->read_wq);
+ mutex_unlock(&uci_handle->in_chan_lock);
+
+ }
+ atomic_set(&uci_handle->read_data_ready, 0);
+ atomic_set(&uci_handle->write_data_ready, 0);
+ file_handle->private_data = NULL;
+ } else {
+ uci_log(UCI_DBG_DBG,
+ "Client close chan %d, ref count 0x%x\n",
+ iminor(mhi_inode),
+ atomic_read(&uci_handle->ref_count));
+ }
+ return rc;
+}
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+ size_t uspace_buf_size, loff_t *bytes_pending)
+{
+ struct uci_client *uci_handle = NULL;
+ struct mhi_dev_client *client_handle = NULL;
+ int bytes_avail = 0;
+ int ret_val = 0;
+ struct mutex *mutex;
+ u32 chan = 0;
+ ssize_t bytes_copied = 0;
+ u32 addr_offset = 0;
+ uint32_t buf_size;
+ uint32_t chained = 0;
+ void *local_buf = NULL;
+
+ if (!file || !buf || !uspace_buf_size ||
+ !file->private_data)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+ client_handle = uci_handle->in_handle;
+ mutex = &uci_handle->in_chan_lock;
+ chan = uci_handle->in_chan;
+
+ mutex_lock(mutex);
+
+ local_buf = uci_handle->in_buf_list[0].addr;
+ buf_size = uci_handle->in_buf_list[0].buf_size;
+
+
+ uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", chan);
+ do {
+ if (!uci_handle->pkt_loc &&
+ !atomic_read(&uci_ctxt.mhi_disabled)) {
+
+ bytes_avail = mhi_dev_read_channel(client_handle,
+ local_buf, buf_size, &chained);
+
+ uci_log(UCI_DBG_VERBOSE,
+ "reading from mhi_core local_buf = %p,buf_size = 0x%x bytes_read = 0x%x\n",
+ local_buf, buf_size, bytes_avail);
+
+ if (bytes_avail < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to read channel ret %d\n",
+ bytes_avail);
+ ret_val = -EIO;
+ goto error;
+ }
+
+ if (bytes_avail > 0) {
+ uci_handle->pkt_loc = (void *)local_buf;
+ uci_handle->pkt_size = bytes_avail;
+
+ *bytes_pending = (loff_t)uci_handle->pkt_size;
+ uci_log(UCI_DBG_VERBOSE,
+ "Got pkt of size 0x%x at addr %p, chan %d\n",
+ uci_handle->pkt_size, local_buf, chan);
+ } else {
+ uci_handle->pkt_loc = 0;
+ uci_handle->pkt_size = 0;
+ }
+ }
+ if (bytes_avail == 0) {
+
+ /* If nothing was copied yet, wait for data */
+ uci_log(UCI_DBG_VERBOSE,
+ "No data read_data_ready %d, chan %d\n",
+ atomic_read(&uci_handle->read_data_ready),
+ chan);
+
+ ret_val = wait_event_interruptible(uci_handle->read_wq,
+ (!mhi_dev_channel_isempty(client_handle)));
+
+ if (ret_val == -ERESTARTSYS) {
+ uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+ goto error;
+ }
+ uci_log(UCI_DBG_VERBOSE,
+ "Thread woke up. Got data on chan %d read_data_ready %d\n",
+ chan,
+ atomic_read(&uci_handle->read_data_ready));
+
+ /* A valid packet was returned from MHI */
+ } else if (bytes_avail > 0) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Got packet: avail pkts %d phy_adr %p, chan %d\n",
+ atomic_read(&uci_handle->read_data_ready),
+ local_buf,
+ chan);
+ break;
+ /*
+ * MHI did not return a valid packet, but we have one
+ * which we did not finish returning to user
+ */
+ } else {
+ uci_log(UCI_DBG_CRITICAL,
+ "chan %d err: avail pkts %d phy_adr %p",
+ chan,
+ atomic_read(&uci_handle->read_data_ready),
+ local_buf);
+ return -EIO;
+ }
+ } while (!uci_handle->pkt_loc);
+
+ if (uspace_buf_size >= *bytes_pending) {
+ addr_offset = uci_handle->pkt_size - *bytes_pending;
+ if (copy_to_user(buf, uci_handle->pkt_loc + addr_offset,
+ *bytes_pending)) {
+ ret_val = -EIO;
+ goto error;
+ }
+
+ bytes_copied = *bytes_pending;
+ *bytes_pending = 0;
+ uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n",
+ bytes_copied, (u32)*bytes_pending, chan);
+ } else {
+ addr_offset = uci_handle->pkt_size - *bytes_pending;
+ if (copy_to_user(buf, (void *) (uintptr_t)uci_handle->pkt_loc +
+ addr_offset, uspace_buf_size)) {
+ ret_val = -EIO;
+ goto error;
+ }
+ bytes_copied = uspace_buf_size;
+ *bytes_pending -= uspace_buf_size;
+ uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n",
+ bytes_copied,
+ (u32)*bytes_pending,
+ chan);
+ }
+ /* We finished with this buffer, map it back */
+ if (*bytes_pending == 0) {
+ uci_log(UCI_DBG_VERBOSE,
+ "All data consumed. Pkt loc %p ,chan %d\n",
+ uci_handle->pkt_loc, chan);
+ uci_handle->pkt_loc = 0;
+ uci_handle->pkt_size = 0;
+ }
+ uci_log(UCI_DBG_VERBOSE,
+ "Returning 0x%x bytes, 0x%x bytes left\n",
+ bytes_copied, (u32)*bytes_pending);
+ mutex_unlock(mutex);
+ return bytes_copied;
+error:
+ mutex_unlock(mutex);
+ uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val);
+ return ret_val;
+}
+
+static ssize_t mhi_uci_client_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct uci_client *uci_handle = NULL;
+ int ret_val = 0;
+ u32 chan = 0xFFFFFFFF;
+
+ if (file == NULL || buf == NULL ||
+ !count || file->private_data == NULL)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+
+ if (atomic_read(&uci_ctxt.mhi_disabled)) {
+ uci_log(UCI_DBG_ERROR,
+ "Client %d attempted to write while MHI is disabled\n",
+ uci_handle->out_chan);
+ return -EIO;
+ }
+ chan = uci_handle->out_chan;
+ mutex_lock(&uci_handle->out_chan_lock);
+ while (!ret_val) {
+ ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
+ (void *)buf, count, 1);
+ if (ret_val < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Error while writing data to MHI, chan %d, buf %p, size %d\n",
+ chan, (void *)buf, count);
+ ret_val = -EIO;
+ break;
+ }
+ if (!ret_val) {
+ uci_log(UCI_DBG_VERBOSE,
+ "No descriptors available, did we poll, chan %d?\n",
+ chan);
+ mutex_unlock(&uci_handle->out_chan_lock);
+ ret_val = wait_event_interruptible(uci_handle->write_wq,
+ !mhi_dev_channel_isempty(
+ uci_handle->out_handle));
+
+ mutex_lock(&uci_handle->out_chan_lock);
+ if (-ERESTARTSYS == ret_val) {
+ uci_log(UCI_DBG_WARNING,
+ "Waitqueue cancelled by system\n");
+ break;
+ }
+ }
+ }
+ mutex_unlock(&uci_handle->out_chan_lock);
+ return ret_val;
+}
+
+static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
+{
+ u32 i = 0;
+ u32 data_size = TRB_MAX_DATA_SIZE;
+ u32 index = 0;
+ struct uci_client *client;
+ struct chan_attr *chan_attrib = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
+ chan_attrib = &uci_ctxt->chan_attrib[i];
+ switch (i) {
+ case MHI_CLIENT_LOOPBACK_OUT:
+ case MHI_CLIENT_LOOPBACK_IN:
+ case MHI_CLIENT_SAHARA_OUT:
+ case MHI_CLIENT_SAHARA_IN:
+ case MHI_CLIENT_EFS_OUT:
+ case MHI_CLIENT_EFS_IN:
+ case MHI_CLIENT_QMI_OUT:
+ case MHI_CLIENT_QMI_IN:
+ case MHI_CLIENT_IP_CTRL_0_OUT:
+ case MHI_CLIENT_IP_CTRL_0_IN:
+ case MHI_CLIENT_IP_CTRL_1_OUT:
+ case MHI_CLIENT_IP_CTRL_1_IN:
+ case MHI_CLIENT_DUN_OUT:
+ case MHI_CLIENT_DUN_IN:
+ chan_attrib->uci_ownership = 1;
+ break;
+ default:
+ chan_attrib->uci_ownership = 0;
+ break;
+ }
+ if (chan_attrib->uci_ownership) {
+ chan_attrib->chan_id = i;
+ chan_attrib->max_packet_size = data_size;
+ index = CHAN_TO_CLIENT(i);
+ client = &uci_ctxt->client_handles[index];
+ chan_attrib->nr_trbs = 9;
+ client->in_buf_list =
+ kmalloc(sizeof(struct mhi_dev_iov) *
+ chan_attrib->nr_trbs,
+ GFP_KERNEL);
+ if (client->in_buf_list == NULL)
+ return -ENOMEM;
+ }
+ if (i % 2 == 0)
+ chan_attrib->dir = MHI_DIR_OUT;
+ else
+ chan_attrib->dir = MHI_DIR_IN;
+ }
+ return 0;
+}
+
+
+static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason)
+{
+ int client_index = 0;
+ struct uci_client *uci_handle = NULL;
+
+ if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
+ client_index = reason->ch_id / 2;
+ uci_handle = &uci_ctxt.client_handles[client_index];
+ uci_log(UCI_DBG_DBG,
+ "recived TRE available event for chan %d\n",
+ uci_handle->in_chan);
+
+ if (reason->ch_id % 2) {
+ atomic_set(&uci_handle->write_data_ready, 1);
+ wake_up(&uci_handle->write_wq);
+ } else {
+ atomic_set(&uci_handle->read_data_ready, 1);
+ wake_up(&uci_handle->read_wq);
+ }
+ }
+}
+
+static int mhi_register_client(struct uci_client *mhi_client, int index)
+{
+ init_waitqueue_head(&mhi_client->read_wq);
+ init_waitqueue_head(&mhi_client->write_wq);
+ mhi_client->out_chan = index * 2 + 1;
+ mhi_client->in_chan = index * 2;
+ mhi_client->client_index = index;
+
+ mutex_init(&mhi_client->in_chan_lock);
+ mutex_init(&mhi_client->out_chan_lock);
+
+ uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
+ return 0;
+}
+
+static long mhi_uci_client_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct uci_client *uci_handle = NULL;
+ int rc = 0;
+ struct ep_info epinfo;
+
+ if (file == NULL || file->private_data == NULL)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+
+ uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n",
+ cmd, uci_handle->client_index);
+
+ if (cmd == MHI_UCI_EP_LOOKUP) {
+ uci_log(UCI_DBG_DBG, "EP_LOOKUP for client:%d\n",
+ uci_handle->client_index);
+ epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE;
+ epinfo.ph_ep_info.peripheral_iface_id = MHI_QTI_IFACE_ID;
+ epinfo.ipa_ep_pair.cons_pipe_num =
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD);
+ epinfo.ipa_ep_pair.prod_pipe_num =
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS);
+
+ uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n",
+ uci_handle->client_index,
+ epinfo.ph_ep_info.ep_type,
+ epinfo.ph_ep_info.peripheral_iface_id);
+
+ uci_log(UCI_DBG_DBG, "ipa_cons_idx:%d ipa_prod_idx:%d\n",
+ epinfo.ipa_ep_pair.cons_pipe_num,
+ epinfo.ipa_ep_pair.prod_pipe_num);
+
+ rc = copy_to_user((void __user *)arg, &epinfo,
+ sizeof(epinfo));
+ if (rc)
+ uci_log(UCI_DBG_ERROR, "copying to user space failed");
+ } else {
+ uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static const struct file_operations mhi_uci_client_fops = {
+ .read = mhi_uci_client_read,
+ .write = mhi_uci_client_write,
+ .open = mhi_uci_client_open,
+ .release = mhi_uci_client_release,
+ .poll = mhi_uci_client_poll,
+ .unlocked_ioctl = mhi_uci_client_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mhi_uci_client_ioctl,
+#endif
+};
+
+int mhi_uci_init(void)
+{
+ u32 i = 0;
+ int ret_val = 0;
+ struct uci_client *mhi_client = NULL;
+ s32 r = 0;
+
+ mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
+ "mhi-uci", 0);
+ if (mhi_uci_ipc_log == NULL) {
+ uci_log(UCI_DBG_WARNING,
+ "Failed to create IPC logging context\n");
+ }
+ uci_ctxt.event_notifier = uci_event_notifier;
+
+ uci_log(UCI_DBG_DBG, "Setting up channel attributes.\n");
+
+ ret_val = uci_init_client_attributes(&uci_ctxt);
+ if (ret_val < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to init client attributes\n");
+ return -EIO;
+ }
+
+ uci_log(UCI_DBG_DBG, "Initializing clients\n");
+ uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
+
+ for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+ if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
+ mhi_client = &uci_ctxt.client_handles[i];
+
+ r = mhi_register_client(mhi_client, i);
+
+ if (r) {
+ uci_log(UCI_DBG_CRITICAL,
+ "Failed to reg client %d ret %d\n",
+ r, i);
+ }
+ }
+ }
+ uci_log(UCI_DBG_INFO, "Allocating char devices.\n");
+ r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr,
+ 0, MHI_MAX_SOFTWARE_CHANNELS,
+ DEVICE_NAME);
+
+ if (IS_ERR_VALUE(r)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to alloc char devs, ret 0x%x\n", r);
+ goto failed_char_alloc;
+ }
+ uci_log(UCI_DBG_INFO, "Creating class\n");
+ uci_ctxt.mhi_uci_class = class_create(THIS_MODULE,
+ DEVICE_NAME);
+ if (IS_ERR(uci_ctxt.mhi_uci_class)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to instantiate class, ret 0x%x\n", r);
+ r = -ENOMEM;
+ goto failed_class_add;
+ }
+
+ uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
+ for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+ if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
+ cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
+ uci_ctxt.cdev[i].owner = THIS_MODULE;
+ r = cdev_add(&uci_ctxt.cdev[i],
+ uci_ctxt.start_ctrl_nr + i, 1);
+ if (IS_ERR_VALUE(r)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d, ret 0x%x\n",
+ i, r);
+ goto failed_char_add;
+ }
+ uci_ctxt.client_handles[i].dev =
+ device_create(uci_ctxt.mhi_uci_class, NULL,
+ uci_ctxt.start_ctrl_nr + i,
+ NULL, DEVICE_NAME "_pipe_%d",
+ i * 2);
+
+ if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d\n", i);
+ cdev_del(&uci_ctxt.cdev[i]);
+ goto failed_device_create;
+ }
+ }
+ }
+ return 0;
+
+failed_char_add:
+failed_device_create:
+ while (--i >= 0) {
+ cdev_del(&uci_ctxt.cdev[i]);
+ device_destroy(uci_ctxt.mhi_uci_class,
+ MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2));
+ };
+ class_destroy(uci_ctxt.mhi_uci_class);
+failed_class_add:
+ unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr),
+ MHI_MAX_SOFTWARE_CHANNELS);
+failed_char_alloc:
+ return r;
+}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a14feed47dcb..b6e2a55d5a9e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -636,4 +636,14 @@ config QCOM_REMOTEQDSS
enable/disable these events. Interface located in
/sys/class/remoteqdss.
+config MSM_SERVICE_NOTIFIER
+ bool "Service Notifier"
+ depends on MSM_SERVICE_LOCATOR && MSM_SUBSYSTEM_RESTART
+ help
+ The Service Notifier provides a library for a kernel client to
+ register for state change notifications regarding a remote service.
+ A remote service here refers to a process providing certain services
+ like audio, the identifier for which is provided by the service
+ locator.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 13c63d6e59bf..1a4757f16e77 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
obj-$(CONFIG_SOC_BUS) += socinfo.o
obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
+obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
obj-$(CONFIG_MSM_SECURE_BUFFER) += secure_buffer.o
obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
diff --git a/drivers/soc/qcom/event_timer.c b/drivers/soc/qcom/event_timer.c
index 374fa56b0b28..5ae42ee749b3 100644
--- a/drivers/soc/qcom/event_timer.c
+++ b/drivers/soc/qcom/event_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -97,7 +97,7 @@ struct event_timer_info *add_event_timer(uint32_t irq,
if (irq) {
struct irq_desc *desc = irq_to_desc(irq);
- struct cpumask *mask = desc->irq_data.affinity;
+ struct cpumask *mask = desc->irq_common_data.affinity;
get_online_cpus();
event_info->cpu = cpumask_any_and(mask, cpu_online_mask);
diff --git a/drivers/soc/qcom/remoteqdss.c b/drivers/soc/qcom/remoteqdss.c
index bb10099db83f..e66ca587adca 100644
--- a/drivers/soc/qcom/remoteqdss.c
+++ b/drivers/soc/qcom/remoteqdss.c
@@ -16,6 +16,7 @@
#include <soc/qcom/scm.h>
#include <linux/debugfs.h>
#include <linux/ratelimit.h>
+#include <linux/dma-mapping.h>
#define REMOTEQDSS_FLAG_QUIET (BIT(0))
@@ -25,10 +26,10 @@ module_param_named(dbg_flags, remoteqdss_dbg_flags, ulong, 0644);
static struct dentry *remoteqdss_dir;
#define REMOTEQDSS_ERR(fmt, ...) \
- pr_err("%s: " fmt, __func__, ## __VA_ARGS__)
+ pr_debug("%s: " fmt, __func__, ## __VA_ARGS__)
#define REMOTEQDSS_ERR_CALLER(fmt, ...) \
- pr_err("%pf: " fmt, __builtin_return_address(0), ## __VA_ARGS__)
+ pr_debug("%pf: " fmt, __builtin_return_address(1), ## __VA_ARGS__)
struct qdss_msg_translation {
u64 val;
@@ -42,12 +43,58 @@ struct qdss_msg_translation {
* dir Parent debugfs directory
*/
struct remoteqdss_data {
- u8 id;
- u64 sw_entity_group;
- u64 sw_event_group;
+ uint32_t id;
+ uint32_t sw_entity_group;
+ uint32_t sw_event_group;
struct dentry *dir;
};
+static struct device dma_dev;
+
+/* Allowed message formats */
+
+enum remoteqdss_cmd_id {
+ CMD_ID_QUERY_SWEVENT_TAG,
+ CMD_ID_FILTER_SWTRACE_STATE,
+ CMD_ID_QUERY_SWTRACE_STATE,
+ CMD_ID_FILTER_SWEVENT,
+ CMD_ID_QUERY_SWEVENT,
+ CMD_ID_FILTER_SWENTITY,
+ CMD_ID_QUERY_SWENTITY,
+};
+
+struct remoteqdss_header_fmt {
+ uint32_t subsys_id;
+ uint32_t cmd_id;
+};
+
+struct remoteqdss_filter_swtrace_state_fmt {
+ struct remoteqdss_header_fmt h;
+ uint32_t state;
+};
+
+struct remoteqdss_filter_swevent_fmt {
+ struct remoteqdss_header_fmt h;
+ uint32_t event_group;
+ uint32_t event_mask;
+};
+
+struct remoteqdss_query_swevent_fmt {
+ struct remoteqdss_header_fmt h;
+ uint32_t event_group;
+};
+
+struct remoteqdss_filter_swentity_fmt {
+ struct remoteqdss_header_fmt h;
+ uint32_t entity_group;
+ uint32_t entity_mask;
+};
+
+struct remoteqdss_query_swentity_fmt {
+ struct remoteqdss_header_fmt h;
+ uint32_t entity_group;
+};
+
/* msgs is a null terminated array */
static void remoteqdss_err_translation(struct qdss_msg_translation *msgs,
u64 err)
@@ -71,9 +118,8 @@ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs,
REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", err);
}
-/* SCM based devices */
-#define SCM_FILTER_SWTRACE_ID (0x1)
-#define SCM_QUERY_SWTRACE_ID (0x2)
+/* Shared across all remoteqdss scm functions */
+#define SCM_CMD_ID (0x1)
/* Response Values */
#define SCM_CMD_FAIL (0x80)
@@ -113,25 +159,45 @@ static void free_remoteqdss_data(struct remoteqdss_data *data)
kfree(data);
}
-static int remoteqdss_scm_query_swtrace(void *priv, u64 *val)
+static int remoteqdss_do_scm_call(struct scm_desc *desc,
+ dma_addr_t addr, size_t size)
{
- struct remoteqdss_data *data = priv;
int ret;
- struct scm_desc desc;
- memset(&desc, 0, sizeof(desc));
- desc.args[0] = data->id;
- desc.arginfo = SCM_ARGS(1, SCM_VAL);
+ memset(desc, 0, sizeof(*desc));
+ desc->args[0] = dma_to_phys(NULL, addr);
+ desc->args[1] = size;
+ desc->arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL);
ret = scm_call2(
- SCM_SIP_FNID(SCM_SVC_QDSS, SCM_QUERY_SWTRACE_ID),
- &desc);
+ SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID),
+ desc);
if (ret)
return ret;
- remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]);
- ret = desc.ret[0] ? -EINVAL : 0;
+ remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0]);
+ ret = desc->ret[0] ? -EINVAL : 0;
+ return ret;
+}
+
+static int remoteqdss_scm_query_swtrace(void *priv, u64 *val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_header_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->subsys_id = data->id;
+ fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
*val = desc.ret[1];
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
return ret;
}
@@ -140,26 +206,150 @@ static int remoteqdss_scm_filter_swtrace(void *priv, u64 val)
struct remoteqdss_data *data = priv;
int ret;
struct scm_desc desc;
+ struct remoteqdss_filter_swtrace_state_fmt *fmt;
+ dma_addr_t addr;
- memset(&desc, 0, sizeof(desc));
- desc.args[0] = data->id;
- desc.args[1] = val;
- desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->h.subsys_id = data->id;
+ fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE;
+ fmt->state = (uint32_t)val;
- ret = scm_call2(
- SCM_SIP_FNID(SCM_SVC_QDSS, SCM_FILTER_SWTRACE_ID),
- &desc);
- if (ret)
- return ret;
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
- remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]);
- ret = desc.ret[0] ? -EINVAL : 0;
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
return ret;
}
+
DEFINE_SIMPLE_ATTRIBUTE(fops_sw_trace_output,
remoteqdss_scm_query_swtrace,
remoteqdss_scm_filter_swtrace,
- "%llu\n");
+ "0x%llx\n");
+
+static int remoteqdss_scm_query_tag(void *priv, u64 *val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_header_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->subsys_id = data->id;
+ fmt->cmd_id = CMD_ID_QUERY_SWEVENT_TAG;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ *val = desc.ret[1];
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_tag,
+ remoteqdss_scm_query_tag,
+ NULL,
+ "0x%llx\n");
+
+static int remoteqdss_scm_query_swevent(void *priv, u64 *val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_query_swevent_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->h.subsys_id = data->id;
+ fmt->h.cmd_id = CMD_ID_QUERY_SWEVENT;
+ fmt->event_group = data->sw_event_group;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ *val = desc.ret[1];
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+ return ret;
+}
+
+static int remoteqdss_scm_filter_swevent(void *priv, u64 val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_filter_swevent_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->h.subsys_id = data->id;
+ fmt->h.cmd_id = CMD_ID_FILTER_SWEVENT;
+ fmt->event_group = data->sw_event_group;
+ fmt->event_mask = (uint32_t)val;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_swevent,
+ remoteqdss_scm_query_swevent,
+ remoteqdss_scm_filter_swevent,
+ "0x%llx\n");
+
+static int remoteqdss_scm_query_swentity(void *priv, u64 *val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_query_swentity_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->h.subsys_id = data->id;
+ fmt->h.cmd_id = CMD_ID_QUERY_SWENTITY;
+ fmt->entity_group = data->sw_entity_group;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ *val = desc.ret[1];
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+ return ret;
+}
+
+static int remoteqdss_scm_filter_swentity(void *priv, u64 val)
+{
+ struct remoteqdss_data *data = priv;
+ int ret;
+ struct scm_desc desc;
+ struct remoteqdss_filter_swentity_fmt *fmt;
+ dma_addr_t addr;
+
+ fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+ if (!fmt)
+ return -ENOMEM;
+ fmt->h.subsys_id = data->id;
+ fmt->h.cmd_id = CMD_ID_FILTER_SWENTITY;
+ fmt->entity_group = data->sw_entity_group;
+ fmt->entity_mask = (uint32_t)val;
+
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+
+ dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_swentity,
+ remoteqdss_scm_query_swentity,
+ remoteqdss_scm_filter_swentity,
+ "0x%llx\n");
static void __init enumerate_scm_devices(struct dentry *parent)
{
@@ -189,16 +379,31 @@ static void __init enumerate_scm_devices(struct dentry *parent)
if (IS_ERR_OR_NULL(dentry))
goto out;
- dentry = debugfs_create_u64("sw_entity_group", S_IRUGO | S_IWUSR,
+ dentry = debugfs_create_u32("sw_entity_group", S_IRUGO | S_IWUSR,
data->dir, &data->sw_entity_group);
if (IS_ERR_OR_NULL(dentry))
goto out;
- dentry = debugfs_create_u64("sw_event_group", S_IRUGO | S_IWUSR,
+ dentry = debugfs_create_u32("sw_event_group", S_IRUGO | S_IWUSR,
data->dir, &data->sw_event_group);
if (IS_ERR_OR_NULL(dentry))
goto out;
+ dentry = debugfs_create_file("tag", S_IRUGO,
+ data->dir, data, &fops_tag);
+ if (IS_ERR_OR_NULL(dentry))
+ goto out;
+
+ dentry = debugfs_create_file("swevent", S_IRUGO | S_IWUSR,
+ data->dir, data, &fops_swevent);
+ if (IS_ERR_OR_NULL(dentry))
+ goto out;
+
+ dentry = debugfs_create_file("swentity", S_IRUGO | S_IWUSR,
+ data->dir, data, &fops_swentity);
+ if (IS_ERR_OR_NULL(dentry))
+ goto out;
+
return;
out:
@@ -209,6 +414,13 @@ out:
static int __init remoteqdss_init(void)
{
unsigned long old_flags = remoteqdss_dbg_flags;
+ int ret;
+
+ /* Set up DMA */
+ arch_setup_dma_ops(&dma_dev, 0, U64_MAX, NULL, false);
+ ret = dma_coerce_mask_and_coherent(&dma_dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
/*
* disable normal error messages while checking
@@ -225,4 +437,4 @@ static int __init remoteqdss_init(void)
remoteqdss_dbg_flags = old_flags;
return 0;
}
-module_init(remoteqdss_init);
+late_initcall(remoteqdss_init);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
new file mode 100644
index 000000000000..7355c2af8f61
--- /dev/null
+++ b/drivers/soc/qcom/service-notifier.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "service-notifier: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/service-locator.h>
+#include "service-notifier.h"
+
+#define QMI_RESP_BIT_SHIFT(x) (x << 16)
+#define SERVREG_NOTIF_NAME_LENGTH QMI_SERVREG_NOTIF_NAME_LENGTH_V01
+#define SERVREG_NOTIF_SERVICE_ID SERVREG_NOTIF_SERVICE_ID_V01
+#define SERVREG_NOTIF_SERVICE_VERS SERVREG_NOTIF_SERVICE_VERS_V01
+
+#define SERVREG_NOTIF_SET_ACK_REQ \
+ QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01
+#define SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN \
+ QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_SET_ACK_RESP \
+ QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01
+#define SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN \
+ QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG \
+ QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN \
+ QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN
+
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ \
+ QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN \
+ QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP \
+ QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN \
+ QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN
+
+#define QMI_STATE_MIN_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01
+#define QMI_STATE_MAX_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01
+
+#define SERVER_TIMEOUT 500
+
+/*
+ * Per user service data structure
+ * struct service_notif_info - notifier struct for each unique service path
+ * service_path - service provider path/location
+ * instance_id - service instance id specific to a subsystem
+ * service_notif_rcvr_list - list of clients interested in this service
+ * providers notifications
+ * curr_state: Current state of the service
+ */
+struct service_notif_info {
+ char service_path[SERVREG_NOTIF_NAME_LENGTH];
+ int instance_id;
+ struct srcu_notifier_head service_notif_rcvr_list;
+ struct list_head list;
+ int curr_state;
+};
+static LIST_HEAD(service_list);
+static DEFINE_MUTEX(service_list_lock);
+
+struct ind_req_resp {
+ char service_path[SERVREG_NOTIF_NAME_LENGTH];
+ int transaction_id;
+};
+
+/*
+ * Per Root Process Domain (Root service) data structure
+ * struct qmi_client_info - QMI client info for each subsystem/instance id
+ * instance_id - service instance id specific to a subsystem (Root PD)
+ * clnt_handle - unique QMI client handle
+ * service_connected - indicates if QMI service is up on the subsystem
+ * ind_recv - completion variable to record receiving an indication
+ * ssr_handle - The SSR handle provided by the SSR driver for the subsystem
+ * on which the remote root PD runs.
+ */
+struct qmi_client_info {
+ int instance_id;
+ struct work_struct svc_arrive;
+ struct work_struct svc_exit;
+ struct work_struct svc_rcv_msg;
+ struct work_struct ind_ack;
+ struct workqueue_struct *svc_event_wq;
+ struct qmi_handle *clnt_handle;
+ struct notifier_block notifier;
+ void *ssr_handle;
+ struct notifier_block ssr_notifier;
+ bool service_connected;
+ struct completion ind_recv;
+ struct list_head list;
+ struct ind_req_resp ind_msg;
+};
+static LIST_HEAD(qmi_client_list);
+static DEFINE_MUTEX(qmi_list_lock);
+
+static DEFINE_MUTEX(notif_add_lock);
+
+static void root_service_clnt_recv_msg(struct work_struct *work);
+static void root_service_service_arrive(struct work_struct *work);
+static void root_service_exit_work(struct work_struct *work);
+
+static struct service_notif_info *_find_service_info(const char *service_path)
+{
+ struct service_notif_info *service_notif;
+
+ mutex_lock(&service_list_lock);
+ list_for_each_entry(service_notif, &service_list, list)
+ if (!strcmp(service_notif->service_path, service_path)) {
+ mutex_unlock(&service_list_lock);
+ return service_notif;
+ }
+ mutex_unlock(&service_list_lock);
+ return NULL;
+}
+
+static int service_notif_queue_notification(struct service_notif_info
+ *service_notif,
+ enum qmi_servreg_notif_service_state_enum_type_v01 notif_type,
+ void *info)
+{
+ int ret = 0;
+
+ if (!service_notif)
+ return -EINVAL;
+
+ if ((int) notif_type < QMI_STATE_MIN_VAL ||
+ (int) notif_type > QMI_STATE_MAX_VAL)
+ return -EINVAL;
+
+ if (service_notif->curr_state == notif_type)
+ return 0;
+
+ if (!service_notif->service_notif_rcvr_list.head)
+ return 0;
+
+ ret = srcu_notifier_call_chain(&service_notif->service_notif_rcvr_list,
+ notif_type, info);
+ return ret;
+}
+
+static void root_service_clnt_recv_msg(struct work_struct *work)
+{
+ int ret;
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, svc_rcv_msg);
+
+ do {
+ pr_debug("Notified about a Receive event (instance-id: %d)\n",
+ data->instance_id);
+ } while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+ if (ret != -ENOMSG)
+ pr_err("Error receiving message (instance-id: %d)\n",
+ data->instance_id);
+}
+
+static void root_service_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ struct qmi_client_info *data = container_of(notify_priv,
+ struct qmi_client_info, svc_arrive);
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ schedule_work(&data->svc_rcv_msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void send_ind_ack(struct work_struct *work)
+{
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, ind_ack);
+ struct qmi_servreg_notif_set_ack_req_msg_v01 req;
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } };
+ int rc;
+
+ req.transaction_id = data->ind_msg.transaction_id;
+ snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+ data->ind_msg.service_path);
+
+ req_desc.msg_id = SERVREG_NOTIF_SET_ACK_REQ;
+ req_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN;
+ req_desc.ei_array = qmi_servreg_notif_set_ack_req_msg_v01_ei;
+
+ resp_desc.msg_id = SERVREG_NOTIF_SET_ACK_RESP;
+ resp_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN;
+ resp_desc.ei_array = qmi_servreg_notif_set_ack_resp_msg_v01_ei;
+
+ rc = qmi_send_req_wait(data->clnt_handle, &req_desc,
+ &req, sizeof(req), &resp_desc, &resp,
+ sizeof(resp), SERVER_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s: Sending Ack failed/server timeout, ret - %d\n",
+ data->ind_msg.service_path, rc);
+ goto exit;
+ }
+
+ /* Check the response */
+ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01)
+ pr_err("QMI request failed 0x%x\n",
+ QMI_RESP_BIT_SHIFT(resp.resp.error));
+ pr_debug("Indication ACKed for transid %d, service %s, instance %d!\n",
+ data->ind_msg.transaction_id, data->ind_msg.service_path,
+ data->instance_id);
+exit:
+ complete(&data->ind_recv);
+}
+
+static void root_service_service_ind_cb(struct qmi_handle *handle,
+ unsigned int msg_id, void *msg,
+ unsigned int msg_len, void *ind_cb_priv)
+{
+ struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
+ struct service_notif_info *service_notif;
+ struct msg_desc ind_desc;
+ struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg;
+ int rc;
+
+ ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
+ ind_desc.max_msg_len = SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN;
+ ind_desc.ei_array = qmi_servreg_notif_state_updated_ind_msg_v01_ei;
+ rc = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (rc < 0) {
+ pr_err("Failed to decode message!\n");
+ goto send_ind_resp;
+ }
+
+ pr_debug("Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg.service_name, ind_msg.curr_state,
+ ind_msg.transaction_id);
+
+ service_notif = _find_service_info(ind_msg.service_name);
+ if (!service_notif)
+ return;
+
+ if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL ||
+ (int)ind_msg.curr_state > QMI_STATE_MAX_VAL)
+ pr_err("Unexpected indication notification state %d\n",
+ ind_msg.curr_state);
+ else {
+ mutex_lock(&notif_add_lock);
+ mutex_lock(&service_list_lock);
+ if (service_notif_queue_notification(service_notif,
+ ind_msg.curr_state, NULL))
+ pr_err("Nnotification failed for %s\n",
+ ind_msg.service_name);
+ service_notif->curr_state = ind_msg.curr_state;
+ mutex_unlock(&service_list_lock);
+ mutex_unlock(&notif_add_lock);
+ }
+send_ind_resp:
+ data->ind_msg.transaction_id = ind_msg.transaction_id;
+ snprintf(data->ind_msg.service_path,
+ ARRAY_SIZE(data->ind_msg.service_path), "%s",
+ ind_msg.service_name);
+ schedule_work(&data->ind_ack);
+ rc = wait_for_completion_timeout(&data->ind_recv, SERVER_TIMEOUT);
+ if (rc < 0) {
+ pr_err("Timeout waiting for sending indication ACK!");
+ return;
+ }
+
+}
+
+static int send_notif_listener_msg_req(struct service_notif_info *service_notif,
+ struct qmi_client_info *data,
+ bool register_notif, int *curr_state)
+{
+ struct qmi_servreg_notif_register_listener_req_msg_v01 req;
+ struct qmi_servreg_notif_register_listener_resp_msg_v01
+ resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+ service_notif->service_path);
+ req.enable = register_notif;
+
+ req_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_REQ;
+ req_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN;
+ req_desc.ei_array = qmi_servreg_notif_register_listener_req_msg_v01_ei;
+
+ resp_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_RESP;
+ resp_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN;
+ resp_desc.ei_array =
+ qmi_servreg_notif_register_listener_resp_msg_v01_ei;
+
+ rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ SERVER_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s: Message sending failed/server timeout, ret - %d\n",
+ service_notif->service_path, rc);
+ return rc;
+ }
+
+ /* Check the response */
+ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+ pr_err("QMI request failed 0x%x\n",
+ QMI_RESP_BIT_SHIFT(resp.resp.error));
+ return -EREMOTEIO;
+ }
+
+ if ((int) resp.curr_state < QMI_STATE_MIN_VAL ||
+ (int) resp.curr_state > QMI_STATE_MAX_VAL) {
+ pr_err("Invalid notif info 0x%x\n", resp.curr_state);
+ rc = -EINVAL;
+ }
+ service_notif->curr_state = resp.curr_state;
+ *curr_state = resp.curr_state;
+ return rc;
+}
+
+static int register_notif_listener(struct service_notif_info *service_notif,
+ struct qmi_client_info *data,
+ int *curr_state)
+{
+ return send_notif_listener_msg_req(service_notif, data, true,
+ curr_state);
+}
+
+static void root_service_service_arrive(struct work_struct *work)
+{
+ struct service_notif_info *service_notif = NULL;
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, svc_arrive);
+ int rc;
+ int curr_state;
+
+ /* Create a Local client port for QMI communication */
+ data->clnt_handle = qmi_handle_create(root_service_clnt_notify, work);
+ if (!data->clnt_handle) {
+ pr_err("QMI client handle alloc failed (instance-id: %d)\n",
+ data->instance_id);
+ return;
+ }
+
+ /* Connect to the service on the root PD service */
+ rc = qmi_connect_to_service(data->clnt_handle,
+ SERVREG_NOTIF_SERVICE_ID, SERVREG_NOTIF_SERVICE_VERS,
+ data->instance_id);
+ if (rc < 0) {
+ pr_err("Could not connect handle to service(instance-id: %d)\n",
+ data->instance_id);
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+ return;
+ }
+ data->service_connected = true;
+ pr_info("Connection established between QMI handle and %d service\n",
+ data->instance_id);
+ /* Register for indication messages about service */
+ rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
+ (void *)data);
+ if (rc < 0)
+ pr_err("Indication callback register failed(instance-id: %d)\n",
+ data->instance_id);
+
+ mutex_lock(&notif_add_lock);
+ mutex_lock(&service_list_lock);
+ list_for_each_entry(service_notif, &service_list, list) {
+ if (service_notif->instance_id == data->instance_id) {
+ rc = register_notif_listener(service_notif, data,
+ &curr_state);
+ if (rc) {
+ pr_err("Notifier registration failed for %s\n",
+ service_notif->service_path);
+ } else {
+ rc = service_notif_queue_notification(
+ service_notif,
+ curr_state, NULL);
+ if (rc)
+ pr_err("Notifier failed for %s\n",
+ service_notif->service_path);
+ service_notif->curr_state = curr_state;
+ }
+ }
+ }
+ mutex_unlock(&service_list_lock);
+ mutex_unlock(&notif_add_lock);
+}
+
+static void root_service_service_exit(struct qmi_client_info *data)
+{
+ struct service_notif_info *service_notif = NULL;
+ int rc;
+
+ /*
+ * Send service down notifications to all clients
+ * of registered for notifications for that service.
+ */
+ mutex_lock(&notif_add_lock);
+ mutex_lock(&service_list_lock);
+ list_for_each_entry(service_notif, &service_list, list) {
+ if (service_notif->instance_id == data->instance_id) {
+ rc = service_notif_queue_notification(service_notif,
+ SERVREG_NOTIF_SERVICE_STATE_DOWN_V01,
+ NULL);
+ if (rc)
+ pr_err("Notification failed for %s\n",
+ service_notif->service_path);
+ service_notif->curr_state =
+ SERVREG_NOTIF_SERVICE_STATE_DOWN_V01;
+ }
+ }
+ mutex_unlock(&service_list_lock);
+ mutex_unlock(&notif_add_lock);
+
+ /*
+ * Destroy client handle and try connecting when
+ * service comes up again.
+ */
+ data->service_connected = false;
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+}
+
+static void root_service_exit_work(struct work_struct *work)
+{
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, svc_exit);
+ root_service_service_exit(data);
+}
+
+static int service_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ struct qmi_client_info *data = container_of(this,
+ struct qmi_client_info, notifier);
+
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ pr_debug("Root PD service UP\n");
+ queue_work(data->svc_event_wq, &data->svc_arrive);
+ break;
+ case QMI_SERVER_EXIT:
+ pr_debug("Root PD service DOWN\n");
+ queue_work(data->svc_event_wq, &data->svc_exit);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ssr_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ struct qmi_client_info *info = container_of(this,
+ struct qmi_client_info, ssr_notifier);
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ pr_debug("Root PD service Down (SSR notification)\n");
+ root_service_service_exit(info);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void *add_service_notif(const char *service_path, int instance_id,
+ int *curr_state)
+{
+ struct service_notif_info *service_notif;
+ struct qmi_client_info *tmp, *qmi_data;
+ long int rc;
+ char subsys[SERVREG_NOTIF_NAME_LENGTH];
+
+ rc = find_subsys(service_path, subsys);
+ if (rc < 0) {
+ pr_err("Could not find subsys for %s\n", service_path);
+ return ERR_PTR(rc);
+ }
+
+ service_notif = kzalloc(sizeof(struct service_notif_info), GFP_KERNEL);
+ if (!service_notif)
+ return ERR_PTR(-ENOMEM);
+
+ strlcpy(service_notif->service_path, service_path,
+ ARRAY_SIZE(service_notif->service_path));
+ service_notif->instance_id = instance_id;
+
+ /* If we already have a connection to the root PD on which the remote
+ * service we are interested in notifications about runs, then use
+ * the existing QMI connection.
+ */
+ mutex_lock(&qmi_list_lock);
+ list_for_each_entry(tmp, &qmi_client_list, list) {
+ if (tmp->instance_id == instance_id) {
+ if (tmp->service_connected) {
+ rc = register_notif_listener(service_notif, tmp,
+ curr_state);
+ if (rc) {
+ mutex_unlock(&qmi_list_lock);
+ pr_err("Register notifier failed: %s",
+ service_path);
+ kfree(service_notif);
+ return ERR_PTR(rc);
+ }
+ }
+ mutex_unlock(&qmi_list_lock);
+ goto add_service_list;
+ }
+ }
+ mutex_unlock(&qmi_list_lock);
+
+ qmi_data = kzalloc(sizeof(struct qmi_client_info), GFP_KERNEL);
+ if (!qmi_data) {
+ kfree(service_notif);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qmi_data->instance_id = instance_id;
+ qmi_data->clnt_handle = NULL;
+ qmi_data->notifier.notifier_call = service_event_notify;
+ init_completion(&qmi_data->ind_recv);
+
+ qmi_data->svc_event_wq = create_singlethread_workqueue(subsys);
+ if (!qmi_data->svc_event_wq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ INIT_WORK(&qmi_data->svc_arrive, root_service_service_arrive);
+ INIT_WORK(&qmi_data->svc_exit, root_service_exit_work);
+ INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg);
+ INIT_WORK(&qmi_data->ind_ack, send_ind_ack);
+
+ *curr_state = service_notif->curr_state =
+ SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01;
+
+ rc = qmi_svc_event_notifier_register(SERVREG_NOTIF_SERVICE_ID,
+ SERVREG_NOTIF_SERVICE_VERS, qmi_data->instance_id,
+ &qmi_data->notifier);
+ if (rc < 0) {
+ pr_err("Notifier register failed (instance-id: %d)\n",
+ qmi_data->instance_id);
+ goto exit;
+ }
+ qmi_data->ssr_notifier.notifier_call = ssr_event_notify;
+ qmi_data->ssr_handle = subsys_notif_register_notifier(subsys,
+ &qmi_data->ssr_notifier);
+ if (IS_ERR(qmi_data->ssr_handle)) {
+ pr_err("SSR notif register for %s failed(instance-id: %d)\n",
+ subsys, qmi_data->instance_id);
+ rc = PTR_ERR(qmi_data->ssr_handle);
+ goto exit;
+ }
+
+ mutex_lock(&qmi_list_lock);
+ INIT_LIST_HEAD(&qmi_data->list);
+ list_add_tail(&qmi_data->list, &qmi_client_list);
+ mutex_unlock(&qmi_list_lock);
+
+add_service_list:
+ srcu_init_notifier_head(&service_notif->service_notif_rcvr_list);
+
+ mutex_lock(&service_list_lock);
+ INIT_LIST_HEAD(&service_notif->list);
+ list_add_tail(&service_notif->list, &service_list);
+ mutex_unlock(&service_list_lock);
+
+ return service_notif;
+exit:
+ if (qmi_data->svc_event_wq)
+ destroy_workqueue(qmi_data->svc_event_wq);
+ kfree(qmi_data);
+ kfree(service_notif);
+ return ERR_PTR(rc);
+}
+
+/* service_notif_register_notifier() - Register a notifier for a service
+ * On success, it returns back a handle. It takes the following arguments:
+ * service_path: Individual service identifier path for which a client
+ * registers for notifications.
+ * instance_id: Instance id specific to a subsystem.
+ * current_state: Current state of service returned by the registration
+ * process.
+ * notifier block: notifier callback for service events.
+ */
+void *service_notif_register_notifier(const char *service_path, int instance_id,
+ struct notifier_block *nb, int *curr_state)
+{
+ struct service_notif_info *service_notif;
+ int ret = 0;
+
+ if (!service_path || !instance_id || !nb)
+ return ERR_PTR(-EINVAL);
+
+ service_notif = _find_service_info(service_path);
+ mutex_lock(&notif_add_lock);
+ if (!service_notif) {
+ service_notif = (struct service_notif_info *)add_service_notif(
+ service_path,
+ instance_id,
+ curr_state);
+ if (IS_ERR(service_notif))
+ goto exit;
+ }
+
+ ret = srcu_notifier_chain_register(
+ &service_notif->service_notif_rcvr_list, nb);
+ *curr_state = service_notif->curr_state;
+ if (ret < 0)
+ service_notif = ERR_PTR(ret);
+exit:
+ mutex_unlock(&notif_add_lock);
+ return service_notif;
+}
+EXPORT_SYMBOL(service_notif_register_notifier);
+
+/* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ * service_notif_handle - The notifier handler that was provided by the
+ * service_notif_register_notifier function when the
+ * client registered for notifications.
+ * nb - The notifier block that was previously used during the registration.
+ */
+int service_notif_unregister_notifier(void *service_notif_handle,
+ struct notifier_block *nb)
+{
+ struct service_notif_info *service_notif;
+
+ if (!service_notif_handle || !nb)
+ return -EINVAL;
+
+ service_notif = (struct service_notif_info *)service_notif_handle;
+ if (service_notif < 0)
+ return -EINVAL;
+
+ return srcu_notifier_chain_unregister(
+ &service_notif->service_notif_rcvr_list, nb);
+}
+EXPORT_SYMBOL(service_notif_unregister_notifier);
diff --git a/drivers/soc/qcom/service-notifier.h b/drivers/soc/qcom/service-notifier.h
new file mode 100644
index 000000000000..2fa44b8181f6
--- /dev/null
+++ b/drivers/soc/qcom/service-notifier.h
@@ -0,0 +1,303 @@
+ /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SERVICE_REGISTRY_NOTIFIER_H
+#define SERVICE_REGISTRY_NOTIFIER_H
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define SERVREG_NOTIF_SERVICE_ID_V01 0x42
+#define SERVREG_NOTIF_SERVICE_VERS_V01 0x01
+
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01 0x0020
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_V01 0x0021
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01 0x0020
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_V01 0x0021
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01 0x0022
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01 0x0023
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01 0x0023
+
+#define QMI_SERVREG_NOTIF_NAME_LENGTH_V01 64
+
+enum qmi_servreg_notif_service_state_enum_type_v01 {
+ QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+ QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+ SERVREG_NOTIF_SERVICE_STATE_DOWN_V01 = 0x0FFFFFFF,
+ SERVREG_NOTIF_SERVICE_STATE_UP_V01 = 0x1FFFFFFF,
+ SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01 = 0x7FFFFFFF,
+};
+
+struct qmi_servreg_notif_register_listener_req_msg_v01 {
+ uint8_t enable;
+ char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN 71
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_register_listener_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t curr_state_valid;
+ enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_req_msg_v01 {
+ char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_MSG_V01_MAX_MSG_LEN 67
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t curr_state_valid;
+ enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_state_updated_ind_msg_v01 {
+ enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+ char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+ uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 79
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_req_msg_v01 {
+ char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+ uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN 72
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[];
+
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ qmi_servreg_notif_register_listener_req_msg_v01,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_register_listener_req_msg_v01,
+ service_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_register_listener_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ qmi_servreg_notif_register_listener_resp_msg_v01,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ enum qmi_servreg_notif_service_state_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ qmi_servreg_notif_register_listener_resp_msg_v01,
+ curr_state),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ qmi_servreg_notif_query_state_req_msg_v01,
+ service_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_query_state_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ qmi_servreg_notif_query_state_resp_msg_v01,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum
+ qmi_servreg_notif_service_state_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ qmi_servreg_notif_query_state_resp_msg_v01,
+ curr_state),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum
+ qmi_servreg_notif_service_state_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ qmi_servreg_notif_state_updated_ind_msg_v01,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_state_updated_ind_msg_v01,
+ service_name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ qmi_servreg_notif_state_updated_ind_msg_v01,
+ transaction_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ qmi_servreg_notif_set_ack_req_msg_v01,
+ service_name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_set_ack_req_msg_v01,
+ transaction_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ qmi_servreg_notif_set_ack_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+#endif
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index b9be6d9a52ef..72e1d437fd1b 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -44,6 +44,19 @@
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/delay.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/cpuset.h>
+#include <linux/vmpressure.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/almk.h>
+
+#ifdef CONFIG_HIGHMEM
+#define _ZONE ZONE_HIGHMEM
+#else
+#define _ZONE ZONE_NORMAL
+#endif
#define CREATE_TRACE_POINTS
#include "trace/lowmemorykiller.h"
@@ -63,6 +76,7 @@ static int lowmem_minfree[6] = {
16 * 1024, /* 64MB */
};
static int lowmem_minfree_size = 4;
+static int lmk_fast_run = 1;
static unsigned long lowmem_deathpending_timeout;
@@ -81,24 +95,292 @@ static unsigned long lowmem_count(struct shrinker *s,
global_page_state(NR_INACTIVE_FILE);
}
+static atomic_t shift_adj = ATOMIC_INIT(0);
+static short adj_max_shift = 353;
+
+/* User knob to enable/disable adaptive lmk feature */
+static int enable_adaptive_lmk;
+module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int,
+ S_IRUGO | S_IWUSR);
+
+/*
+ * This parameter controls the behaviour of LMK when vmpressure is in
+ * the range of 90-94. Adaptive lmk triggers based on number of file
+ * pages wrt vmpressure_file_min, when vmpressure is in the range of
+ * 90-94. Usually this is a pseudo minfree value, higher than the
+ * highest configured value in minfree array.
+ */
+static int vmpressure_file_min;
+module_param_named(vmpressure_file_min, vmpressure_file_min, int,
+ S_IRUGO | S_IWUSR);
+
+enum {
+ VMPRESSURE_NO_ADJUST = 0,
+ VMPRESSURE_ADJUST_ENCROACH,
+ VMPRESSURE_ADJUST_NORMAL,
+};
+
+int adjust_minadj(short *min_score_adj)
+{
+ int ret = VMPRESSURE_NO_ADJUST;
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (atomic_read(&shift_adj) &&
+ (*min_score_adj > adj_max_shift)) {
+ if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
+ ret = VMPRESSURE_ADJUST_ENCROACH;
+ else
+ ret = VMPRESSURE_ADJUST_NORMAL;
+ *min_score_adj = adj_max_shift;
+ }
+ atomic_set(&shift_adj, 0);
+
+ return ret;
+}
+
+static int lmk_vmpressure_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int other_free, other_file;
+ unsigned long pressure = action;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (pressure >= 95) {
+ other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ } else if (pressure >= 90) {
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+
+ other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ if ((other_free < lowmem_minfree[array_size - 1]) &&
+ (other_file < vmpressure_file_min)) {
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ }
+ } else if (atomic_read(&shift_adj)) {
+ /*
+ * shift_adj would have been set by a previous invocation
+ * of notifier, which is not followed by a lowmem_shrink yet.
+ * Since vmpressure has improved, reset shift_adj to avoid
+ * false adaptive LMK trigger.
+ */
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ atomic_set(&shift_adj, 0);
+ }
+
+ return 0;
+}
+
+static struct notifier_block lmk_vmpr_nb = {
+ .notifier_call = lmk_vmpressure_notifier,
+};
+
static int test_task_flag(struct task_struct *p, int flag)
{
- struct task_struct *t = p;
+ struct task_struct *t;
- do {
+ for_each_thread(p, t) {
task_lock(t);
if (test_tsk_thread_flag(t, flag)) {
task_unlock(t);
return 1;
}
task_unlock(t);
- } while_each_thread(p, t);
+ }
return 0;
}
static DEFINE_MUTEX(scan_mutex);
+int can_use_cma_pages(gfp_t gfp_mask)
+{
+ int can_use = 0;
+ int mtype = gfpflags_to_migratetype(gfp_mask);
+ int i = 0;
+ int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+ if (is_migrate_cma(mtype)) {
+ can_use = 1;
+ } else {
+ for (i = 0;; i++) {
+ int fallbacktype = mtype_fallbacks[i];
+
+ if (is_migrate_cma(fallbacktype)) {
+ can_use = 1;
+ break;
+ }
+
+ if (fallbacktype == MIGRATE_TYPES)
+ break;
+ }
+ }
+ return can_use;
+}
+
+void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
+ int *other_free, int *other_file,
+ int use_cma_pages)
+{
+ struct zone *zone;
+ struct zoneref *zoneref;
+ int zone_idx;
+
+ for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
+ zone_idx = zonelist_zone_idx(zoneref);
+ if (zone_idx == ZONE_MOVABLE) {
+ if (!use_cma_pages && other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_CMA_PAGES);
+ continue;
+ }
+
+ if (zone_idx > classzone_idx) {
+ if (other_free != NULL)
+ *other_free -= zone_page_state(zone,
+ NR_FREE_PAGES);
+ if (other_file != NULL)
+ *other_file -= zone_page_state(zone,
+ NR_FILE_PAGES)
+ - zone_page_state(zone, NR_SHMEM)
+ - zone_page_state(zone, NR_SWAPCACHE);
+ } else if (zone_idx < classzone_idx) {
+ if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
+ other_free) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ zone->lowmem_reserve[classzone_idx] +
+ zone_page_state(
+ zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ zone->lowmem_reserve[classzone_idx];
+ }
+ } else {
+ if (other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_PAGES);
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_HIGHMEM
+void adjust_gfp_mask(gfp_t *gfp_mask)
+{
+ struct zone *preferred_zone;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx;
+
+ if (current_is_kswapd()) {
+ zonelist = node_zonelist(0, *gfp_mask);
+ high_zoneidx = gfp_zone(*gfp_mask);
+ first_zones_zonelist(zonelist, high_zoneidx, NULL,
+ &preferred_zone);
+
+ if (high_zoneidx == ZONE_NORMAL) {
+ if (zone_watermark_ok_safe(
+ preferred_zone, 0,
+ high_wmark_pages(preferred_zone), 0,
+ 0))
+ *gfp_mask |= __GFP_HIGHMEM;
+ } else if (high_zoneidx == ZONE_HIGHMEM) {
+ *gfp_mask |= __GFP_HIGHMEM;
+ }
+ }
+}
+#else
+void adjust_gfp_mask(gfp_t *unused)
+{
+}
+#endif
+
+void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
+{
+ gfp_t gfp_mask;
+ struct zone *preferred_zone;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx, classzone_idx;
+ unsigned long balance_gap;
+ int use_cma_pages;
+
+ gfp_mask = sc->gfp_mask;
+ adjust_gfp_mask(&gfp_mask);
+
+ zonelist = node_zonelist(0, gfp_mask);
+ high_zoneidx = gfp_zone(gfp_mask);
+ first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
+ classzone_idx = zone_idx(preferred_zone);
+ use_cma_pages = can_use_cma_pages(gfp_mask);
+
+ balance_gap = min(low_wmark_pages(preferred_zone),
+ (preferred_zone->present_pages +
+ KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+
+ if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
+ high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
+ balance_gap, 0, 0))) {
+ if (lmk_fast_run)
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+ else
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ NULL, use_cma_pages);
+
+ if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ preferred_zone->lowmem_reserve[_ZONE]
+ + zone_page_state(
+ preferred_zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ preferred_zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ preferred_zone->lowmem_reserve[_ZONE];
+ }
+ } else {
+ *other_free -= zone_page_state(preferred_zone,
+ NR_FREE_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
+ "ofree %d, %d\n", *other_free, *other_file);
+ } else {
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+
+ if (!use_cma_pages) {
+ *other_free -=
+ zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
+ "%d\n", *other_free, *other_file);
+ }
+}
+
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
@@ -106,6 +388,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
unsigned long rem = 0;
int tasksize;
int i;
+ int ret = 0;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
@@ -118,9 +401,16 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
return 0;
other_free = global_page_state(NR_FREE_PAGES);
- other_file = global_page_state(NR_FILE_PAGES) -
+
+ if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
+ global_page_state(NR_FILE_PAGES))
+ other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM) -
total_swapcache_pages();
+ else
+ other_file = 0;
+
+ tune_lmk_param(&other_free, &other_file, sc);
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -134,11 +424,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
}
}
+ ret = adjust_minadj(&min_score_adj);
+
lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ trace_almk_shrink(0, ret, other_free, other_file, 0);
lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
sc->nr_to_scan, sc->gfp_mask);
mutex_unlock(&scan_mutex);
@@ -155,6 +448,10 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (tsk->flags & PF_KTHREAD)
continue;
+ /* if task no longer has any memory ignore it */
+ if (test_task_flag(tsk, TIF_MM_RELEASED))
+ continue;
+
if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
if (test_task_flag(tsk, TIF_MEMDIE)) {
rcu_read_unlock();
@@ -188,7 +485,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
@@ -210,23 +507,48 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
" to free %ldkB on behalf of '%s' (%d) because\n" \
" cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
- " Free memory is %ldkB above reserved\n",
+ " Free memory is %ldkB above reserved.\n" \
+ " Free CMA is %ldkB\n" \
+ " Total reserve is %ldkB\n" \
+ " Total free pages is %ldkB\n" \
+ " Total file cache is %ldkB\n" \
+ " GFP mask is 0x%x\n",
selected->comm, selected->pid,
selected_oom_score_adj,
selected_tasksize * (long)(PAGE_SIZE / 1024),
current->comm, current->pid,
cache_size, cache_limit,
min_score_adj,
- free);
+ free,
+ global_page_state(NR_FREE_CMA_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ totalreserve_pages * (long)(PAGE_SIZE / 1024),
+ global_page_state(NR_FREE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ global_page_state(NR_FILE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ sc->gfp_mask);
+
+ if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
+ show_mem(SHOW_MEM_FILTER_NODES);
+ dump_tasks(NULL, NULL);
+ }
+
lowmem_deathpending_timeout = jiffies + HZ;
rem += selected_tasksize;
+ rcu_read_unlock();
/* give the system time to free up the memory */
msleep_interruptible(20);
+ trace_almk_shrink(selected_tasksize, ret,
+ other_free, other_file,
+ selected_oom_score_adj);
+ } else {
+ trace_almk_shrink(1, ret, other_free, other_file, 0);
+ rcu_read_unlock();
}
lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- rcu_read_unlock();
mutex_unlock(&scan_mutex);
return rem;
}
@@ -240,6 +562,7 @@ static struct shrinker lowmem_shrinker = {
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
+ vmpressure_notifier_register(&lmk_vmpr_nb);
return 0;
}
device_initcall(lowmem_init);
@@ -338,4 +661,5 @@ module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 4becac67bcdc..eb42feff0712 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -24,6 +24,8 @@
#include <linux/usb/usb_ctrl_qti.h>
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb/msm_hsusb.h>
#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
#define GSI_MBIM_CTRL_NAME "android_mbim"
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index cacb46378c94..8b4fd8c0436a 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -26,12 +26,18 @@
#include <linux/clk.h>
#include <linux/clk/msm-clk.h>
-#define INIT_MAX_TIME_USEC 1000
+enum core_ldo_levels {
+ CORE_LEVEL_NONE = 0,
+ CORE_LEVEL_MIN,
+ CORE_LEVEL_MAX,
+};
-#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
-#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
-#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
+#define INIT_MAX_TIME_USEC 1000
+/* default CORE votlage and load values */
+#define USB_SSPHY_1P2_VOL_MIN 1200000 /* uV */
+#define USB_SSPHY_1P2_VOL_MAX 1200000 /* uV */
+#define USB_SSPHY_HPM_LOAD 23000 /* uA */
/* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
@@ -64,8 +70,9 @@ struct msm_ssphy_qmp {
void __iomem *vls_clamp_reg;
struct regulator *vdd;
- struct regulator *vdda18;
int vdd_levels[3]; /* none, low, high */
+ struct regulator *core_ldo;
+ int core_voltage_levels[3];
struct clk *ref_clk_src;
struct clk *ref_clk;
struct clk *aux_clk;
@@ -171,41 +178,44 @@ static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
goto disable_regulators;
- rc = regulator_set_load(phy->vdda18, USB_SSPHY_1P8_HPM_LOAD);
+ rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
if (rc < 0) {
- dev_err(phy->phy.dev, "Unable to set HPM of vdda18\n");
+ dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
return rc;
}
- rc = regulator_set_voltage(phy->vdda18, USB_SSPHY_1P8_VOL_MIN,
- USB_SSPHY_1P8_VOL_MAX);
+ rc = regulator_set_voltage(phy->core_ldo,
+ phy->core_voltage_levels[CORE_LEVEL_MIN],
+ phy->core_voltage_levels[CORE_LEVEL_MAX]);
if (rc) {
- dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
- goto put_vdda18_lpm;
+ dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
+ goto put_core_ldo_lpm;
}
- rc = regulator_enable(phy->vdda18);
+ rc = regulator_enable(phy->core_ldo);
if (rc) {
- dev_err(phy->phy.dev, "Unable to enable vdda18\n");
- goto unset_vdda18;
+ dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
+ goto unset_core_ldo;
}
return 0;
disable_regulators:
- rc = regulator_disable(phy->vdda18);
+ rc = regulator_disable(phy->core_ldo);
if (rc)
- dev_err(phy->phy.dev, "Unable to disable vdda18\n");
+ dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
-unset_vdda18:
- rc = regulator_set_voltage(phy->vdda18, 0, USB_SSPHY_1P8_VOL_MAX);
+unset_core_ldo:
+ rc = regulator_set_voltage(phy->core_ldo,
+ phy->core_voltage_levels[CORE_LEVEL_NONE],
+ phy->core_voltage_levels[CORE_LEVEL_MAX]);
if (rc)
- dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
+ dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-put_vdda18_lpm:
- rc = regulator_set_load(phy->vdda18, 0);
+put_core_ldo_lpm:
+ rc = regulator_set_load(phy->core_ldo, 0);
if (rc < 0)
- dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n");
+ dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
return rc < 0 ? rc : 0;
}
@@ -495,7 +505,7 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
struct msm_ssphy_qmp *phy;
struct device *dev = &pdev->dev;
struct resource *res;
- int ret = 0, size = 0;
+ int ret = 0, size = 0, len;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
@@ -631,11 +641,36 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
}
}
- ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
- (u32 *) phy->vdd_levels,
- ARRAY_SIZE(phy->vdd_levels));
- if (ret) {
- dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ /* Set default core voltage values */
+ phy->core_voltage_levels[CORE_LEVEL_NONE] = 0;
+ phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+ phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+
+ if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
+ len == sizeof(phy->core_voltage_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,core-voltage-level",
+ (u32 *)phy->core_voltage_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev, "err qcom,core-voltage-level property\n");
+ goto err;
+ }
+ }
+
+ if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) &&
+ len == sizeof(phy->vdd_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,vdd-voltage-level",
+ (u32 *) phy->vdd_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev, "err qcom,vdd-voltage-level property\n");
+ goto err;
+ }
+ } else {
+ ret = -EINVAL;
+ dev_err(dev, "error invalid inputs for vdd-voltage-level\n");
goto err;
}
@@ -646,10 +681,10 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
goto err;
}
- phy->vdda18 = devm_regulator_get(dev, "vdda18");
- if (IS_ERR(phy->vdda18)) {
- dev_err(dev, "unable to get vdda18 supply\n");
- ret = PTR_ERR(phy->vdda18);
+ phy->core_ldo = devm_regulator_get(dev, "core");
+ if (IS_ERR(phy->core_ldo)) {
+ dev_err(dev, "unable to get core ldo supply\n");
+ ret = PTR_ERR(phy->core_ldo);
goto err;
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8942af0813e3..9796b4426710 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -36,6 +36,7 @@ struct vm_area_struct;
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_CMA 0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -50,8 +51,9 @@ struct vm_area_struct;
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Page mobility and placement hints
*
@@ -183,7 +185,7 @@ struct vm_area_struct;
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 27
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -264,7 +266,12 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+ return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+ ((gfp_flags & __GFP_CMA) != 0);
+#endif
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4c70716759a6..61aff324bd5e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -187,9 +187,24 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 423d214f708b..ad4c3f186f61 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -39,8 +39,6 @@ enum {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
- MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -57,18 +55,30 @@ enum {
*/
MIGRATE_CMA,
#endif
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
+# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \
@@ -159,6 +169,7 @@ enum zone_stat_item {
WORKINGSET_NODERECLAIM,
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_FREE_CMA_PAGES,
+ NR_SWAPCACHE,
NR_VM_ZONE_STAT_ITEMS };
/*
@@ -367,6 +378,9 @@ struct zone {
* considered dirtyable memory.
*/
unsigned long dirty_balance_reserve;
+#ifdef CONFIG_CMA
+ bool cma_alloc;
+#endif
#ifndef CONFIG_SPARSEMEM
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e963ff30a7f6..7ece18efd02b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2740,7 +2740,7 @@ static inline void mmdrop(struct mm_struct * mm)
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
diff --git a/include/trace/events/almk.h b/include/trace/events/almk.h
new file mode 100644
index 000000000000..85d712d48f50
--- /dev/null
+++ b/include/trace/events/almk.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM almk
+
+#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_ALMK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(almk_vmpressure,
+
+ TP_PROTO(unsigned long pressure,
+ int other_free,
+ int other_file),
+
+ TP_ARGS(pressure, other_free, other_file),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pressure)
+ __field(int, other_free)
+ __field(int, other_file)
+ ),
+
+ TP_fast_assign(
+ __entry->pressure = pressure;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ ),
+
+ TP_printk("%lu, %d, %d",
+ __entry->pressure, __entry->other_free,
+ __entry->other_file)
+);
+
+TRACE_EVENT(almk_shrink,
+
+ TP_PROTO(int tsize,
+ int vmp,
+ int other_free,
+ int other_file,
+ short adj),
+
+ TP_ARGS(tsize, vmp, other_free, other_file, adj),
+
+ TP_STRUCT__entry(
+ __field(int, tsize)
+ __field(int, vmp)
+ __field(int, other_free)
+ __field(int, other_file)
+ __field(short, adj)
+ ),
+
+ TP_fast_assign(
+ __entry->tsize = tsize;
+ __entry->vmp = vmp;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ __entry->adj = adj;
+ ),
+
+ TP_printk("%d, %d, %d, %d, %d",
+ __entry->tsize,
+ __entry->vmp,
+ __entry->other_free,
+ __entry->other_file,
+ __entry->adj)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 96a79417671a..29e3be2ce18c 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -271,6 +271,7 @@ header-y += membarrier.h
header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
+header-y += mhi.h
header-y += mic_common.h
header-y += mic_ioctl.h
header-y += mii.h
diff --git a/include/uapi/linux/mhi.h b/include/uapi/linux/mhi.h
new file mode 100644
index 000000000000..834c1dc77173
--- /dev/null
+++ b/include/uapi/linux/mhi.h
@@ -0,0 +1,37 @@
+#ifndef _UAPI_MHI_H
+#define _UAPI_MHI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+enum peripheral_ep_type {
+ DATA_EP_TYPE_RESERVED,
+ DATA_EP_TYPE_HSIC,
+ DATA_EP_TYPE_HSUSB,
+ DATA_EP_TYPE_PCIE,
+ DATA_EP_TYPE_EMBEDDED,
+ DATA_EP_TYPE_BAM_DMUX,
+};
+
+struct peripheral_ep_info {
+ enum peripheral_ep_type ep_type;
+ __u32 peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+ __u32 cons_pipe_num;
+ __u32 prod_pipe_num;
+};
+
+struct ep_info {
+ struct peripheral_ep_info ph_ep_info;
+ struct ipa_ep_pair ipa_ep_pair;
+
+};
+
+#define MHI_UCI_IOCTL_MAGIC 'm'
+
+#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info)
+
+#endif /* _UAPI_MHI_H */
+
diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
index 4cc4d6e7e523..ba153d582422 100644
--- a/include/uapi/linux/usb/Kbuild
+++ b/include/uapi/linux/usb/Kbuild
@@ -9,4 +9,5 @@ header-y += g_printer.h
header-y += gadgetfs.h
header-y += midi.h
header-y += tmc.h
+header-y += usb_ctrl_qti.h
header-y += video.h
diff --git a/include/uapi/linux/usb/usb_ctrl_qti.h b/include/uapi/linux/usb/usb_ctrl_qti.h
new file mode 100644
index 000000000000..b02272a03e40
--- /dev/null
+++ b/include/uapi/linux/usb/usb_ctrl_qti.h
@@ -0,0 +1,41 @@
+#ifndef __UAPI_LINUX_USB_CTRL_QTI_H
+#define __UAPI_LINUX_USB_CTRL_QTI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_QTI_PKT_SIZE 2048
+
+#define QTI_CTRL_IOCTL_MAGIC 'r'
+#define QTI_CTRL_GET_LINE_STATE _IOR(QTI_CTRL_IOCTL_MAGIC, 2, int)
+#define QTI_CTRL_EP_LOOKUP _IOR(QTI_CTRL_IOCTL_MAGIC, 3, struct ep_info)
+#define QTI_CTRL_MODEM_OFFLINE _IO(QTI_CTRL_IOCTL_MAGIC, 4)
+#define QTI_CTRL_MODEM_ONLINE _IO(QTI_CTRL_IOCTL_MAGIC, 5)
+
+enum peripheral_ep_type {
+ DATA_EP_TYPE_RESERVED = 0x0,
+ DATA_EP_TYPE_HSIC = 0x1,
+ DATA_EP_TYPE_HSUSB = 0x2,
+ DATA_EP_TYPE_PCIE = 0x3,
+ DATA_EP_TYPE_EMBEDDED = 0x4,
+ DATA_EP_TYPE_BAM_DMUX = 0x5,
+};
+
+struct peripheral_ep_info {
+ enum peripheral_ep_type ep_type;
+ __u32 peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+ __u32 cons_pipe_num;
+ __u32 prod_pipe_num;
+};
+
+struct ep_info {
+ struct peripheral_ep_info ph_ep_info;
+ struct ipa_ep_pair ipa_ep_pair;
+
+};
+
+#endif
+
diff --git a/kernel/exit.c b/kernel/exit.c
index 77d54139672b..a32e83d567b9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -388,6 +388,7 @@ static void exit_mm(struct task_struct *tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+ int mm_released;
mm_release(tsk, mm);
if (!mm)
@@ -434,9 +435,12 @@ static void exit_mm(struct task_struct *tsk)
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
- mmput(mm);
+
+ mm_released = mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
+ if (mm_released)
+ set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index 859b949d106f..c9eb86b646ab 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -694,8 +694,9 @@ EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
-void mmput(struct mm_struct *mm)
+int mmput(struct mm_struct *mm)
{
+ int mm_freed = 0;
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
@@ -713,7 +714,9 @@ void mmput(struct mm_struct *mm)
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
+ mm_freed = 1;
}
+ return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d63689fda9b4..f54a84fb5e6e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1466,6 +1466,11 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
@@ -1810,17 +1815,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
-
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ page = __rmqueue_fallback(zone, order, migratetype);
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ struct page *page = 0;
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+ return page;
+}
+
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
@@ -1834,7 +1845,17 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- struct page *page = __rmqueue(zone, order, migratetype, 0);
+ struct page *page;
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
+ else
+ page = __rmqueue(zone, order, migratetype, 0);
if (unlikely(page == NULL))
break;
@@ -1861,6 +1882,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2061,8 +2104,7 @@ void free_hot_cold_page(struct page *page, bool cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- if (unlikely(is_migrate_isolate(migratetype)) ||
- is_migrate_cma(migratetype)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, pfn, 0, migratetype);
goto out;
}
@@ -2211,21 +2253,32 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
gfp_t gfp_flags, int alloc_flags, int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no free CMA
+ * pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2258,8 +2311,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
if (!page)
page = __rmqueue(zone, order, migratetype, gfp_flags);
+
spin_unlock(&zone->lock);
if (!page)
goto failed;
@@ -6748,6 +6806,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
return ret;
+ cc.zone->cma_alloc = 1;
+
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
goto done;
@@ -6806,6 +6866,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ cc.zone->cma_alloc = 0;
return ret;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 4e166f1c692c..61039e39e25f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -96,6 +96,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
if (likely(!error)) {
address_space->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
+ __inc_zone_page_state(page, NR_SWAPCACHE);
INC_CACHE_INFO(add_total);
}
spin_unlock_irq(&address_space->tree_lock);
@@ -148,6 +149,7 @@ void __delete_from_swap_cache(struct page *page)
ClearPageSwapCache(page);
address_space->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
+ __dec_zone_page_state(page, NR_SWAPCACHE);
INC_CACHE_INFO(del_total);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4923dfe89983..ca75eeecbad1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -763,6 +763,7 @@ const char * const vmstat_text[] = {
"workingset_nodereclaim",
"nr_anon_transparent_hugepages",
"nr_free_cma",
+ "nr_swapcache",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
@@ -926,10 +927,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Movable",
"Reclaimable",
- "HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
+ "HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif