summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/clock-controller.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/msm_pcie.txt10
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-qpnp.txt23
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-sbc.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-liquid.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi89
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi24
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-coresight.dtsi81
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi57
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig6
-rw-r--r--arch/arm64/configs/msmcortex_defconfig6
-rw-r--r--arch/arm64/include/asm/debugv8.h229
-rw-r--r--arch/arm64/include/asm/etmv4x.h385
-rw-r--r--drivers/char/adsprpc.c40
-rw-r--r--drivers/char/adsprpc_compat.c22
-rw-r--r--drivers/char/adsprpc_shared.h2
-rw-r--r--drivers/char/diag/diag_dci.c24
-rw-r--r--drivers/char/diag/diag_dci.h1
-rw-r--r--drivers/char/diag/diagchar_core.c44
-rw-r--r--drivers/char/diag/diagfwd_cntl.c28
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c17
-rw-r--r--drivers/clk/msm/clock-gpu-cobalt.c119
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c55
-rw-r--r--drivers/cpuidle/lpm-levels.c2
-rw-r--r--drivers/crypto/Kconfig12
-rw-r--r--drivers/extcon/extcon.c3
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c24
-rw-r--r--drivers/input/qpnp-power-on.c2
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c7
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c32
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c23
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c37
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.h2
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi.h6
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h2
-rw-r--r--drivers/pci/host/pci-msm.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c312
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h63
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/Makefile4
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c2589
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h189
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h108
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c1658
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c36
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c56
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dma.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h114
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c2783
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c11
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c50
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.c2
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.c2
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c9
-rw-r--r--drivers/pwm/pwm-qpnp.c137
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icnss.c308
-rw-r--r--drivers/soc/qcom/jtag-fuse.c209
-rw-r--r--drivers/soc/qcom/jtagv8-etm.c1722
-rw-r--r--drivers/soc/qcom/jtagv8.c1015
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c10
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c24
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/qcom/watchdog_v2.c128
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_phy.h23
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_phy_v3.c111
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_formats.h19
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_wb.c15
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c151
-rw-r--r--include/linux/extcon.h3
-rw-r--r--include/linux/ipa.h142
-rw-r--r--include/linux/ipa_mhi.h161
-rw-r--r--include/linux/trace_events.h13
-rw-r--r--include/soc/qcom/icnss.h4
-rw-r--r--include/trace/trace_events.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/media/msm_vidc.h20
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--sound/soc/codecs/wcd_cpe_core.c29
-rw-r--r--sound/soc/msm/qdsp6v2/audio_cal_utils.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c16
-rwxr-xr-xsound/soc/msm/qdsp6v2/msm-compress-q6-v2.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dts-eagle.c32
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c22
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c8
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c6
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c4
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c22
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c26
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c78
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c12
-rw-r--r--sound/soc/msm/qdsp6v2/q6lsm.c20
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c26
-rw-r--r--sound/soc/msm/qdsp6v2/rtac.c24
125 files changed, 8875 insertions, 5390 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/clock-controller.txt b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
index 7e421d3153db..c6fcf937ec3f 100644
--- a/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
+++ b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
@@ -64,6 +64,7 @@ Required properties:
"qcom,gcc-mdm9607"
"qcom,cc-debug-mdm9607"
"qcom,gcc-cobalt"
+ "qcom,gcc-cobalt-v2"
"qcom,gcc-hamster"
"qcom,cc-debug-cobalt"
"qcom,gpucc-cobalt"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
index e6fbe23524d0..56559a69eb46 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -22,6 +22,12 @@ Required properties:
- interrupts : should contain bark and bite irq numbers
- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
+- qcom,userspace-watchdog :
+ (boolean) Allow enabling the userspace-watchdog feature. This feature
+ requires userspace to pet the watchdog every qcom,pet-time interval
+ in addition to the existing kernel-level checks.
+ This feature is supported through device sysfs files.
+
Optional properties:
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index c8e634617edb..4b5a6b4af789 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -75,6 +75,9 @@ Optional Properties:
- qcom,pcie-phy-ver: version of PCIe PHY.
- qcom,phy-sequence: The initialization sequence to bring up the PCIe PHY.
Should be specified in groups (offset, value, delay).
+ - qcom,port-phy-sequence: The initialization sequence to bring up the
+ PCIe port PHY.
+ Should be specified in groups (offset, value, delay).
- qcom,use-19p2mhz-aux-clk: The frequency of PCIe AUX clock is 19.2MHz.
- qcom,ep-wakeirq: The endpoint will issue wake signal when it is up, and the
root complex has the capability to enumerate the endpoint for this case.
@@ -197,6 +200,13 @@ Example:
0x15c 0x06 0x00
0x090 0x01 0x00
0x808 0x03 0x00>;
+ qcom,port-phy-sequence = <0x804 0x01 0x00
+ 0x034 0x14 0x00
+ 0x138 0x30 0x00
+ 0x048 0x0f 0x00
+ 0x15c 0x06 0x00
+ 0x090 0x01 0x00
+ 0x808 0x03 0x00>;
perst-gpio = <&msmgpio 70 0>;
wake-gpio = <&msmgpio 69 0>;
clkreq-gpio = <&msmgpio 68 0>;
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
index 52a4bc81b4c1..c784a01d6411 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
@@ -33,14 +33,21 @@ Optional device bindings:
- qcom,channel-owner: A string value to supply owner information.
- qcom,mode-select: 0 = PWM mode
1 = LPG mode
-- qcom,lpg-dtest-line: indicates which DTEST line to be configured for LPG
- output. Possible values are 1, 2, 3 and 4.
+- qcom,dtest-line: indicates which DTEST line to be configured for LPG
+ or PWM output. For LPG subtypes, possible values are 1,
+ 2, 3 and 4. For PWM subtype, possibe values are 1 and 2.
- qcom,dtest-output: indicates the output configuration for DTEST line.
- 0 = Disabled
- 1 = LPG output low
- 2 = LPG output high
- 3,4,5 = DTEST line specific configuration
- 6,7 = Not used
+ For LPG subtypes, possible output values are:
+ 0 = Disabled
+ 1 = LPG output low
+ 2 = LPG output high
+ 3,4,5 = DTEST line specific configuration
+ 6,7 = Not used
+ For PWM subtype, possible output values are:
+ 0 = Disabled
+ 1 = pwm_out for DTEST1 or reserved
+ 2 = pwm_out for DTEST2 or reserved
+ 3 = Not used
If this binding is specified along with the required bindings of PWM/LPG then
in addition to configure PWM/LPG the qpnp-pwm driver also enables the feature
at the probe time. In the case where the binding is not specified the qpnp-pwm
@@ -181,7 +188,7 @@ Example:
qcom,ramp-index = <1>;
qcom,force-pwm-size = <9>;
qcom,period = <6000000>;
- qcom,lpg-dtest-line = <3>;
+ qcom,dtest-line = <3>;
qcom,dtest-output = <1>;
status = "okay";
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index 13f81d72b4e1..9083c89802f3 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -678,7 +678,7 @@
&spmi_bus {
qcom,pm8994@1 {
pwm@b100 {
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
index 79af62efc7d4..a2085945ac33 100644
--- a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -648,7 +648,7 @@
&spmi_bus {
qcom,pm8994@1 {
pwm@b100 {
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/apq8096-sbc.dtsi b/arch/arm/boot/dts/qcom/apq8096-sbc.dtsi
index 2c880ef9d41b..4e7379d9e164 100644
--- a/arch/arm/boot/dts/qcom/apq8096-sbc.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-sbc.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -615,7 +615,7 @@
&spmi_bus {
qcom,pm8994@1 {
pwm@b100 {
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
index 51bb308d9832..4855da387e21 100644
--- a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
@@ -735,7 +735,7 @@
&pmi8994_pwm_4 {
qcom,channel-owner = "lcd_bl";
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
index a1376fe1b454..33397e3e4762 100644
--- a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -335,7 +335,7 @@
&pmi8994_pwm_4 {
qcom,channel-owner = "lcd_bl";
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index 36af9f431cad..96279288d336 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -590,7 +590,7 @@
&pmi8994_pwm_4 {
qcom,channel-owner = "lcd_bl";
- qcom,lpg-dtest-line = <4>;
+ qcom,dtest-line = <4>;
qcom,dtest-output = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
index 044bd1a5e510..a17fc360e2d0 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
@@ -420,65 +420,72 @@
vfe1-vdd-supply = <&gdsc_vfe1>;
qcom,vdd-names = "camss-vdd", "vfe0-vdd",
"vfe1-vdd";
- clocks = <&clock_mmss clk_mmss_camss_top_ahb_clk>,
+ qcom,clock-cntl-support;
+ clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_camss_ahb_clk>,
+ <&clock_mmss clk_mmss_camss_top_ahb_clk>,
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csi0_clk_src>,
- <&clock_mmss clk_mmss_camss_csi0_clk>,
- <&clock_mmss clk_mmss_camss_csi0rdi_clk>,
- <&clock_mmss clk_mmss_camss_csi0pix_clk>,
<&clock_mmss clk_csi1_clk_src>,
- <&clock_mmss clk_mmss_camss_csi1_clk>,
- <&clock_mmss clk_mmss_camss_csi1rdi_clk>,
- <&clock_mmss clk_mmss_camss_csi1pix_clk>,
<&clock_mmss clk_csi2_clk_src>,
- <&clock_mmss clk_mmss_camss_csi2_clk>,
- <&clock_mmss clk_mmss_camss_csi2rdi_clk>,
- <&clock_mmss clk_mmss_camss_csi2pix_clk>,
<&clock_mmss clk_csi3_clk_src>,
- <&clock_mmss clk_mmss_camss_csi3_clk>,
+ <&clock_mmss clk_mmss_camss_csi0rdi_clk>,
+ <&clock_mmss clk_mmss_camss_csi1rdi_clk>,
+ <&clock_mmss clk_mmss_camss_csi2rdi_clk>,
<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
+ <&clock_mmss clk_mmss_camss_csi0pix_clk>,
+ <&clock_mmss clk_mmss_camss_csi1pix_clk>,
+ <&clock_mmss clk_mmss_camss_csi2pix_clk>,
<&clock_mmss clk_mmss_camss_csi3pix_clk>,
- <&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_mmss_camss_csi0_clk>,
+ <&clock_mmss clk_mmss_camss_csi1_clk>,
+ <&clock_mmss clk_mmss_camss_csi2_clk>,
+ <&clock_mmss clk_mmss_camss_csi3_clk>,
<&clock_mmss clk_mmss_camss_vfe0_clk>,
+ <&clock_mmss clk_vfe0_clk_src>,
<&clock_mmss clk_mmss_camss_csi_vfe0_clk>,
- <&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_mmss_camss_vfe1_clk>,
+ <&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
- clock-names = "camss_top_ahb_clk",
- "camss_ahb_clk", "ispif_ahb_clk",
- "csi0_src_clk", "csi0_clk",
- "csi0_pix_clk", "csi0_rdi_clk",
- "csi1_src_clk", "csi1_clk",
- "csi1_pix_clk", "csi1_rdi_clk",
- "csi2_src_clk", "csi2_clk",
- "csi2_pix_clk", "csi2_rdi_clk",
- "csi3_src_clk", "csi3_clk",
- "csi3_pix_clk", "csi3_rdi_clk",
- "vfe0_clk_src", "camss_vfe_vfe0_clk",
- "camss_csi_vfe0_clk",
- "vfe1_clk_src", "camss_vfe_vfe1_clk",
- "camss_csi_vfe1_clk";
- qcom,clock-rates = <0 0 0
+ clock-names = "mnoc_maxi_clk", "mnoc_ahb_clk",
+ "camss_ahb_clk",
+ "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csi0_src_clk", "csi1_src_clk",
+ "csi2_src_clk", "csi3_src_clk",
+ "csi0_rdi_clk", "csi1_rdi_clk",
+ "csi2_rdi_clk", "csi3_rdi_clk",
+ "csi0_pix_clk", "csi1_pix_clk",
+ "csi2_pix_clk", "csi3_pix_clk",
+ "camss_csi0_clk", "camss_csi1_clk",
+ "camss_csi2_clk", "camss_csi3_clk",
+ "camss_vfe_vfe0_clk",
+ "vfe0_clk_src", "camss_csi_vfe0_clk",
+ "camss_vfe_vfe1_clk",
+ "vfe1_clk_src", "camss_csi_vfe1_clk";
+ qcom,clock-rates = <0 0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0
0 0 0>;
- qcom,clock-control = "NO_SET_RATE", "NO_SET_RATE",
- "NO_SET_RATE", "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE", "INIT_RATE",
- "NO_SET_RATE", "NO_SET_RATE";
- status = "disabled";
+ qcom,clock-control = "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "INIT_RATE",
+ "INIT_RATE", "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "NO_SET_RATE";
+ status = "ok";
};
vfe0: qcom,vfe0@ca10000 {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 29429546ac62..4173152f7b8c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -175,6 +175,30 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+&dsi_dual_nt35597_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_nt35597_dsc_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_nt35597_dsc_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
&pmicobalt_haptics {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-coresight.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-coresight.dtsi
index c6367c05775c..53f415153a21 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-coresight.dtsi
@@ -165,6 +165,14 @@
};
};
port@1 {
+ reg = <0>;
+ funnel_in0_in_rpm_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&rpm_etm0_out_funnel_in0>;
+ };
+ };
+ port@2 {
reg = <3>;
funnel_in0_in_funnel_spss: endpoint {
slave-mode;
@@ -172,7 +180,7 @@
<&funnel_spss_out_funnel_in0>;
};
};
- port@2 {
+ port@3 {
reg = <6>;
funnel_in0_in_funnel_qatb: endpoint {
slave-mode;
@@ -180,7 +188,7 @@
<&funnel_qatb_out_funnel_in0>;
};
};
- port@3 {
+ port@4 {
reg = <7>;
funnel_in0_in_stm: endpoint {
slave-mode;
@@ -231,6 +239,22 @@
};
};
port@3 {
+ reg = <4>;
+ funnel_in1_in_audio_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&audio_etm0_out_funnel_in1>;
+ };
+ };
+ port@4 {
+ reg = <5>;
+ funnel_in1_in_modem_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&modem_etm0_out_funnel_in1>;
+ };
+ };
+ port@5 {
reg = <6>;
funnel_in1_in_funnel_apss_merg: endpoint {
slave-mode;
@@ -598,6 +622,10 @@
clocks = <&clock_gcc clk_qdss_clk>,
<&clock_gcc clk_qdss_a_clk>;
clock-names = "core_clk", "core_a_clk";
+
+ qcom,cti-gpio-trigout = <4>;
+ pinctrl-names = "cti-trigout-pctrl";
+ pinctrl-0 = <&trigout_a>;
};
cti3: cti@6013000 {
@@ -1396,4 +1424,53 @@
qcom,hwevent-clks = "core_mmss_clk";
};
+
+ csr: csr@6001000 {
+ compatible = "qcom,coresight-csr";
+ reg = <0x6001000 0x1000>;
+ reg-names = "csr-base";
+
+ coresight-name = "coresight-csr";
+
+ qcom,blk-size = <1>;
+ };
+
+ modem_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-modem-etm0";
+ qcom,inst-id = <2>;
+
+ port{
+ modem_etm0_out_funnel_in1: endpoint {
+ remote-endpoint = <&funnel_in1_in_modem_etm0>;
+ };
+ };
+ };
+
+ audio_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-audio-etm0";
+ qcom,inst-id = <5>;
+
+ port{
+ audio_etm0_out_funnel_in1: endpoint {
+ remote-endpoint = <&funnel_in1_in_audio_etm0>;
+ };
+ };
+ };
+
+ rpm_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-rpm-etm0";
+ qcom,inst-id = <4>;
+
+ port{
+ rpm_etm0_out_funnel_in0: endpoint {
+ remote-endpoint = <&funnel_in0_in_rpm_etm0>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index 1200282be589..3eefd37c4a58 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -13,6 +13,9 @@
#include "dsi-panel-sim-video.dtsi"
#include "dsi-panel-sim-dualmipi-video.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-dsc-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-dsc-wqxga-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -70,3 +73,21 @@
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
};
+
+&dsi_dual_nt35597_cmd {
+ qcom,mdss-dsi-panel-timings = [00 1a 04 06 0a 0a 05 06 05 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_nt35597_dsc_video {
+ qcom,mdss-dsi-panel-timings = [00 12 03 04 07 07 04 04 03 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0b>;
+ qcom,mdss-dsi-t-clk-pre = <0x24>;
+};
+
+&dsi_nt35597_dsc_cmd {
+ qcom,mdss-dsi-panel-timings = [00 12 03 04 07 07 04 04 03 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0b>;
+ qcom,mdss-dsi-t-clk-pre = <0x24>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
index 2523ce88803f..29f02486398e 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
@@ -1479,5 +1479,18 @@
output-low;
};
};
+
+ trigout_a: trigout_a {
+ mux {
+ pins = "gpio58";
+ function = "qdss_cti1_a";
+ };
+
+ config {
+ pins = "gpio58";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index be83a130e357..c8dce43ba5a5 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -23,3 +23,6 @@
qcom,msm-id = <292 0x20000>;
};
+&clock_gcc {
+ compatible = "qcom,gcc-cobalt-v2";
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 52f0d3249859..b0a5970bd93e 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -2091,6 +2091,63 @@
hyplog-size-offset = <0x414>; /* 0x066BFB34 */
};
+ qcom_crypto: qcrypto@1DE0000 {
+ compatible = "qcom,qcrypto";
+ reg = <0x1DE0000 0x20000>,
+ <0x1DC4000 0x24000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 206 0>;
+ qcom,bam-pipe-pair = <2>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,bam-ee = <0>;
+ qcom,ce-hw-shared;
+ qcom,clk-mgmt-sus-res;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ clocks = <&clock_gcc clk_qcrypto_ce1_clk>,
+ <&clock_gcc clk_qcrypto_ce1_clk>,
+ <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+ <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+ qcom,ce-opp-freq = <171430000>;
+ qcom,use-sw-aes-cbc-ecb-ctr-algo;
+ qcom,use-sw-aes-xts-algo;
+ qcom,use-sw-aes-ccm-algo;
+ qcom,use-sw-ahash-algo;
+ };
+
+ qcom_cedev: qcedev@1DE0000{
+ compatible = "qcom,qcedev";
+ reg = <0x1DE0000 0x20000>,
+ <0x1DC4000 0x24000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 206 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,bam-ee = <0>;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ clocks = <&clock_gcc clk_qcedev_ce1_clk>,
+ <&clock_gcc clk_qcedev_ce1_clk>,
+ <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+ <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+ qcom,ce-opp-freq = <171430000>;
+ };
+
mitigation_profile0: qcom,limit_info-0 {
qcom,temperature-sensor = <&sensor_information1>;
qcom,hotplug-mitigation-enable;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5e20e3293a53..706129c86096 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -74,7 +74,6 @@ config ARM64
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
- select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_PATA_PLATFORM
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 0f75fab3833b..f0a81fa39e30 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -273,6 +273,8 @@ CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_USB_USBNET=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
CONFIG_CLD_LL_CORE=y
CONFIG_QPNP_POWER_ON=y
CONFIG_INPUT_EVDEV=y
@@ -577,6 +579,10 @@ CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index c45cc64cd1e5..8eec686715a4 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -273,6 +273,8 @@ CONFIG_PPP_MPPE=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
CONFIG_CLD_LL_CORE=y
CONFIG_QPNP_POWER_ON=y
CONFIG_INPUT_EVDEV=y
@@ -656,6 +658,10 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/include/asm/debugv8.h b/arch/arm64/include/asm/debugv8.h
new file mode 100644
index 000000000000..6a2538279f39
--- /dev/null
+++ b/arch/arm64/include/asm/debugv8.h
@@ -0,0 +1,229 @@
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_DEBUGV8_H
+#define __ASM_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for aarch 64 bit */
+#define dbg_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for aarch 64 bit */
+#define dbg_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for aarch 64 bit */
+#define dbg_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(reg) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(reg) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, reg) \
+({ \
+asm volatile("msr "#reg", %0" : : "r" (val)); \
+})
+
+/*
+ * Debug Feature Register
+ *
+ * Read only
+ */
+#define RSYSQ_ID_AA64DFR0_EL1() MRSQ(ID_AA64DFR0_EL1)
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv8
+ *
+ * Read only
+ * MDCCSR_EL0, MDRAR_EL1, OSLSR_EL1, DBGDTRRX_EL0, DBGAUTHSTATUS_EL1
+ *
+ * Write only
+ * DBGDTRTX_EL0, OSLAR_EL1
+ */
+/* 32 bit registers */
+#define RSYSL_DBGDTRRX_EL0() MRSL(DBGDTRRX_EL0)
+#define RSYSL_MDCCSR_EL0() MRSL(MDCCSR_EL0)
+#define RSYSL_MDSCR_EL1() MRSL(MDSCR_EL1)
+#define RSYSL_OSDTRRX_EL1() MRSL(OSDTRRX_EL1)
+#define RSYSL_OSDTRTX_EL1() MRSL(OSDTRTX_EL1)
+#define RSYSL_OSDLR_EL1() MRSL(OSDLR_EL1)
+#define RSYSL_OSLSR_EL1() MRSL(OSLSR_EL1)
+#define RSYSL_MDCCINT_EL1() MRSL(MDCCINT_EL1)
+#define RSYSL_OSECCR_EL1() MRSL(OSECCR_EL1)
+#define RSYSL_DBGPRCR_EL1() MRSL(DBGPRCR_EL1)
+#define RSYSL_DBGBCR0_EL1() MRSL(DBGBCR0_EL1)
+#define RSYSL_DBGBCR1_EL1() MRSL(DBGBCR1_EL1)
+#define RSYSL_DBGBCR2_EL1() MRSL(DBGBCR2_EL1)
+#define RSYSL_DBGBCR3_EL1() MRSL(DBGBCR3_EL1)
+#define RSYSL_DBGBCR4_EL1() MRSL(DBGBCR4_EL1)
+#define RSYSL_DBGBCR5_EL1() MRSL(DBGBCR5_EL1)
+#define RSYSL_DBGBCR6_EL1() MRSL(DBGBCR6_EL1)
+#define RSYSL_DBGBCR7_EL1() MRSL(DBGBCR7_EL1)
+#define RSYSL_DBGBCR8_EL1() MRSL(DBGBCR8_EL1)
+#define RSYSL_DBGBCR9_EL1() MRSL(DBGBCR9_EL1)
+#define RSYSL_DBGBCR10_EL1() MRSL(DBGBCR10_EL1)
+#define RSYSL_DBGBCR11_EL1() MRSL(DBGBCR11_EL1)
+#define RSYSL_DBGBCR12_EL1() MRSL(DBGBCR12_EL1)
+#define RSYSL_DBGBCR13_EL1() MRSL(DBGBCR13_EL1)
+#define RSYSL_DBGBCR14_EL1() MRSL(DBGBCR14_EL1)
+#define RSYSL_DBGBCR15_EL1() MRSL(DBGBCR15_EL1)
+#define RSYSL_DBGWCR0_EL1() MRSL(DBGWCR0_EL1)
+#define RSYSL_DBGWCR1_EL1() MRSL(DBGWCR1_EL1)
+#define RSYSL_DBGWCR2_EL1() MRSL(DBGWCR2_EL1)
+#define RSYSL_DBGWCR3_EL1() MRSL(DBGWCR3_EL1)
+#define RSYSL_DBGWCR4_EL1() MRSL(DBGWCR4_EL1)
+#define RSYSL_DBGWCR5_EL1() MRSL(DBGWCR5_EL1)
+#define RSYSL_DBGWCR6_EL1() MRSL(DBGWCR6_EL1)
+#define RSYSL_DBGWCR7_EL1() MRSL(DBGWCR7_EL1)
+#define RSYSL_DBGWCR8_EL1() MRSL(DBGWCR8_EL1)
+#define RSYSL_DBGWCR9_EL1() MRSL(DBGWCR9_EL1)
+#define RSYSL_DBGWCR10_EL1() MRSL(DBGWCR10_EL1)
+#define RSYSL_DBGWCR11_EL1() MRSL(DBGWCR11_EL1)
+#define RSYSL_DBGWCR12_EL1() MRSL(DBGWCR12_EL1)
+#define RSYSL_DBGWCR13_EL1() MRSL(DBGWCR13_EL1)
+#define RSYSL_DBGWCR14_EL1() MRSL(DBGWCR14_EL1)
+#define RSYSL_DBGWCR15_EL1() MRSL(DBGWCR15_EL1)
+#define RSYSL_DBGCLAIMSET_EL1() MRSL(DBGCLAIMSET_EL1)
+#define RSYSL_DBGCLAIMCLR_EL1() MRSL(DBGCLAIMCLR_EL1)
+#define RSYSL_DBGAUTHSTATUS_EL1() MRSL(DBGAUTHSTATUS_EL1)
+#define RSYSL_DBGVCR32_EL2() MRSL(DBGVCR32_EL2)
+#define RSYSL_MDCR_EL2() MRSL(MDCR_EL2)
+#define RSYSL_MDCR_EL3() MRSL(MDCR_EL3)
+/* 64 bit registers */
+#define RSYSQ_DBGDTR_EL0() MRSQ(DBGDTR_EL0)
+#define RSYSQ_MDRAR_EL1() MRSQ(MDRAR_EL1)
+#define RSYSQ_DBGBVR0_EL1() MRSQ(DBGBVR0_EL1)
+#define RSYSQ_DBGBVR1_EL1() MRSQ(DBGBVR1_EL1)
+#define RSYSQ_DBGBVR2_EL1() MRSQ(DBGBVR2_EL1)
+#define RSYSQ_DBGBVR3_EL1() MRSQ(DBGBVR3_EL1)
+#define RSYSQ_DBGBVR4_EL1() MRSQ(DBGBVR4_EL1)
+#define RSYSQ_DBGBVR5_EL1() MRSQ(DBGBVR5_EL1)
+#define RSYSQ_DBGBVR6_EL1() MRSQ(DBGBVR6_EL1)
+#define RSYSQ_DBGBVR7_EL1() MRSQ(DBGBVR7_EL1)
+#define RSYSQ_DBGBVR8_EL1() MRSQ(DBGBVR8_EL1)
+#define RSYSQ_DBGBVR9_EL1() MRSQ(DBGBVR9_EL1)
+#define RSYSQ_DBGBVR10_EL1() MRSQ(DBGBVR10_EL1)
+#define RSYSQ_DBGBVR11_EL1() MRSQ(DBGBVR11_EL1)
+#define RSYSQ_DBGBVR12_EL1() MRSQ(DBGBVR12_EL1)
+#define RSYSQ_DBGBVR13_EL1() MRSQ(DBGBVR13_EL1)
+#define RSYSQ_DBGBVR14_EL1() MRSQ(DBGBVR14_EL1)
+#define RSYSQ_DBGBVR15_EL1() MRSQ(DBGBVR15_EL1)
+#define RSYSQ_DBGWVR0_EL1() MRSQ(DBGWVR0_EL1)
+#define RSYSQ_DBGWVR1_EL1() MRSQ(DBGWVR1_EL1)
+#define RSYSQ_DBGWVR2_EL1() MRSQ(DBGWVR2_EL1)
+#define RSYSQ_DBGWVR3_EL1() MRSQ(DBGWVR3_EL1)
+#define RSYSQ_DBGWVR4_EL1() MRSQ(DBGWVR4_EL1)
+#define RSYSQ_DBGWVR5_EL1() MRSQ(DBGWVR5_EL1)
+#define RSYSQ_DBGWVR6_EL1() MRSQ(DBGWVR6_EL1)
+#define RSYSQ_DBGWVR7_EL1() MRSQ(DBGWVR7_EL1)
+#define RSYSQ_DBGWVR8_EL1() MRSQ(DBGWVR8_EL1)
+#define RSYSQ_DBGWVR9_EL1() MRSQ(DBGWVR9_EL1)
+#define RSYSQ_DBGWVR10_EL1() MRSQ(DBGWVR10_EL1)
+#define RSYSQ_DBGWVR11_EL1() MRSQ(DBGWVR11_EL1)
+#define RSYSQ_DBGWVR12_EL1() MRSQ(DBGWVR12_EL1)
+#define RSYSQ_DBGWVR13_EL1() MRSQ(DBGWVR13_EL1)
+#define RSYSQ_DBGWVR14_EL1() MRSQ(DBGWVR14_EL1)
+#define RSYSQ_DBGWVR15_EL1() MRSQ(DBGWVR15_EL1)
+
+/* 32 bit registers */
+#define WSYS_DBGDTRTX_EL0(val) MSR(val, DBGDTRTX_EL0)
+#define WSYS_MDCCINT_EL1(val) MSR(val, MDCCINT_EL1)
+#define WSYS_MDSCR_EL1(val) MSR(val, MDSCR_EL1)
+#define WSYS_OSDTRRX_EL1(val) MSR(val, OSDTRRX_EL1)
+#define WSYS_OSDTRTX_EL1(val) MSR(val, OSDTRTX_EL1)
+#define WSYS_OSDLR_EL1(val) MSR(val, OSDLR_EL1)
+#define WSYS_OSECCR_EL1(val) MSR(val, OSECCR_EL1)
+#define WSYS_DBGPRCR_EL1(val) MSR(val, DBGPRCR_EL1)
+#define WSYS_DBGBCR0_EL1(val) MSR(val, DBGBCR0_EL1)
+#define WSYS_DBGBCR1_EL1(val) MSR(val, DBGBCR1_EL1)
+#define WSYS_DBGBCR2_EL1(val) MSR(val, DBGBCR2_EL1)
+#define WSYS_DBGBCR3_EL1(val) MSR(val, DBGBCR3_EL1)
+#define WSYS_DBGBCR4_EL1(val) MSR(val, DBGBCR4_EL1)
+#define WSYS_DBGBCR5_EL1(val) MSR(val, DBGBCR5_EL1)
+#define WSYS_DBGBCR6_EL1(val) MSR(val, DBGBCR6_EL1)
+#define WSYS_DBGBCR7_EL1(val) MSR(val, DBGBCR7_EL1)
+#define WSYS_DBGBCR8_EL1(val) MSR(val, DBGBCR8_EL1)
+#define WSYS_DBGBCR9_EL1(val) MSR(val, DBGBCR9_EL1)
+#define WSYS_DBGBCR10_EL1(val) MSR(val, DBGBCR10_EL1)
+#define WSYS_DBGBCR11_EL1(val) MSR(val, DBGBCR11_EL1)
+#define WSYS_DBGBCR12_EL1(val) MSR(val, DBGBCR12_EL1)
+#define WSYS_DBGBCR13_EL1(val) MSR(val, DBGBCR13_EL1)
+#define WSYS_DBGBCR14_EL1(val) MSR(val, DBGBCR14_EL1)
+#define WSYS_DBGBCR15_EL1(val) MSR(val, DBGBCR15_EL1)
+#define WSYS_DBGWCR0_EL1(val) MSR(val, DBGWCR0_EL1)
+#define WSYS_DBGWCR1_EL1(val) MSR(val, DBGWCR1_EL1)
+#define WSYS_DBGWCR2_EL1(val) MSR(val, DBGWCR2_EL1)
+#define WSYS_DBGWCR3_EL1(val) MSR(val, DBGWCR3_EL1)
+#define WSYS_DBGWCR4_EL1(val) MSR(val, DBGWCR4_EL1)
+#define WSYS_DBGWCR5_EL1(val) MSR(val, DBGWCR5_EL1)
+#define WSYS_DBGWCR6_EL1(val) MSR(val, DBGWCR6_EL1)
+#define WSYS_DBGWCR7_EL1(val) MSR(val, DBGWCR7_EL1)
+#define WSYS_DBGWCR8_EL1(val) MSR(val, DBGWCR8_EL1)
+#define WSYS_DBGWCR9_EL1(val) MSR(val, DBGWCR9_EL1)
+#define WSYS_DBGWCR10_EL1(val) MSR(val, DBGWCR10_EL1)
+#define WSYS_DBGWCR11_EL1(val) MSR(val, DBGWCR11_EL1)
+#define WSYS_DBGWCR12_EL1(val) MSR(val, DBGWCR12_EL1)
+#define WSYS_DBGWCR13_EL1(val) MSR(val, DBGWCR13_EL1)
+#define WSYS_DBGWCR14_EL1(val) MSR(val, DBGWCR14_EL1)
+#define WSYS_DBGWCR15_EL1(val) MSR(val, DBGWCR15_EL1)
+#define WSYS_DBGCLAIMSET_EL1(val) MSR(val, DBGCLAIMSET_EL1)
+#define WSYS_DBGCLAIMCLR_EL1(val) MSR(val, DBGCLAIMCLR_EL1)
+#define WSYS_OSLAR_EL1(val) MSR(val, OSLAR_EL1)
+#define WSYS_DBGVCR32_EL2(val) MSR(val, DBGVCR32_EL2)
+#define WSYS_MDCR_EL2(val) MSR(val, MDCR_EL2)
+#define WSYS_MDCR_EL3(val) MSR(val, MDCR_EL3)
+/* 64 bit registers */
+#define WSYS_DBGDTR_EL0(val) MSR(val, DBGDTR_EL0)
+#define WSYS_DBGBVR0_EL1(val) MSR(val, DBGBVR0_EL1)
+#define WSYS_DBGBVR1_EL1(val) MSR(val, DBGBVR1_EL1)
+#define WSYS_DBGBVR2_EL1(val) MSR(val, DBGBVR2_EL1)
+#define WSYS_DBGBVR3_EL1(val) MSR(val, DBGBVR3_EL1)
+#define WSYS_DBGBVR4_EL1(val) MSR(val, DBGBVR4_EL1)
+#define WSYS_DBGBVR5_EL1(val) MSR(val, DBGBVR5_EL1)
+#define WSYS_DBGBVR6_EL1(val) MSR(val, DBGBVR6_EL1)
+#define WSYS_DBGBVR7_EL1(val) MSR(val, DBGBVR7_EL1)
+#define WSYS_DBGBVR8_EL1(val) MSR(val, DBGBVR8_EL1)
+#define WSYS_DBGBVR9_EL1(val) MSR(val, DBGBVR9_EL1)
+#define WSYS_DBGBVR10_EL1(val) MSR(val, DBGBVR10_EL1)
+#define WSYS_DBGBVR11_EL1(val) MSR(val, DBGBVR11_EL1)
+#define WSYS_DBGBVR12_EL1(val) MSR(val, DBGBVR12_EL1)
+#define WSYS_DBGBVR13_EL1(val) MSR(val, DBGBVR13_EL1)
+#define WSYS_DBGBVR14_EL1(val) MSR(val, DBGBVR14_EL1)
+#define WSYS_DBGBVR15_EL1(val) MSR(val, DBGBVR15_EL1)
+#define WSYS_DBGWVR0_EL1(val) MSR(val, DBGWVR0_EL1)
+#define WSYS_DBGWVR1_EL1(val) MSR(val, DBGWVR1_EL1)
+#define WSYS_DBGWVR2_EL1(val) MSR(val, DBGWVR2_EL1)
+#define WSYS_DBGWVR3_EL1(val) MSR(val, DBGWVR3_EL1)
+#define WSYS_DBGWVR4_EL1(val) MSR(val, DBGWVR4_EL1)
+#define WSYS_DBGWVR5_EL1(val) MSR(val, DBGWVR5_EL1)
+#define WSYS_DBGWVR6_EL1(val) MSR(val, DBGWVR6_EL1)
+#define WSYS_DBGWVR7_EL1(val) MSR(val, DBGWVR7_EL1)
+#define WSYS_DBGWVR8_EL1(val) MSR(val, DBGWVR8_EL1)
+#define WSYS_DBGWVR9_EL1(val) MSR(val, DBGWVR9_EL1)
+#define WSYS_DBGWVR10_EL1(val) MSR(val, DBGWVR10_EL1)
+#define WSYS_DBGWVR11_EL1(val) MSR(val, DBGWVR11_EL1)
+#define WSYS_DBGWVR12_EL1(val) MSR(val, DBGWVR12_EL1)
+#define WSYS_DBGWVR13_EL1(val) MSR(val, DBGWVR13_EL1)
+#define WSYS_DBGWVR14_EL1(val) MSR(val, DBGWVR14_EL1)
+#define WSYS_DBGWVR15_EL1(val) MSR(val, DBGWVR15_EL1)
+
+#endif
diff --git a/arch/arm64/include/asm/etmv4x.h b/arch/arm64/include/asm/etmv4x.h
new file mode 100644
index 000000000000..91239f779587
--- /dev/null
+++ b/arch/arm64/include/asm/etmv4x.h
@@ -0,0 +1,385 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for AArch64 */
+#define trc_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for AArch64 */
+#define trc_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for AArch64 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(op0, op1, crn, crm, op2) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("msr S"#op0"_"#op1"_"#crn"_"#crm"_"#op2", %0" : : "r" (val)); \
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRSL(3, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MSR(val, 3, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRSL(2, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRSL(2, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRSL(2, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRSL(2, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRSL(2, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRSL(2, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRSL(2, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRSL(2, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRSL(2, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRSL(2, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRSL(2, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRSL(2, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRSL(2, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRSL(2, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRSL(2, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRSL(2, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRSL(2, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRSL(2, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRSL(2, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRSL(2, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRSL(2, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRSL(2, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRSL(2, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRSL(2, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRSL(2, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRSL(2, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRSL(2, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRSL(2, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRSL(2, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRSL(2, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRSL(2, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRSL(2, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRSL(2, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRSL(2, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRSL(2, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRSL(2, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRSL(2, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRSL(2, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRSL(2, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRSL(2, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRSL(2, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRSL(2, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRSL(2, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRSL(2, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRSL(2, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRSL(2, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRSL(2, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRSL(2, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRSL(2, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRSL(2, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRSL(2, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRSL(2, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRSL(2, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRSL(2, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRSL(2, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRSL(2, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRSL(2, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRSL(2, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRSL(2, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRSL(2, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRSL(2, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRSL(2, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRSL(2, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRSL(2, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRSL(2, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRSL(2, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRSL(2, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRSL(2, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRSL(2, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRSL(2, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRSL(2, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRSL(2, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRSL(2, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRSL(2, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRSL(2, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRSL(2, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRSL(2, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRSL(2, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRSL(2, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRSL(2, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRSL(2, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRSL(2, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRSL(2, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRSL(2, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRSL(2, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRSL(2, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRSL(2, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRSL(2, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRSL(2, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRSL(2, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRSL(2, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRSL(2, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRSL(2, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRSL(2, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRSL(2, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRSL(2, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRSL(2, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRSL(2, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRSL(2, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRSL(2, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRSL(2, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRSL(2, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRSL(2, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRSL(2, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRSL(2, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRSL(2, 1, c1, c7, 3)
+
+/* 64 bit registers */
+#define RSYSQ_ETMACATR0() MRSQ(2, 1, c2, c0, 2)
+#define RSYSQ_ETMACATR1() MRSQ(2, 1, c2, c2, 2)
+#define RSYSQ_ETMACATR2() MRSQ(2, 1, c2, c4, 2)
+#define RSYSQ_ETMACATR3() MRSQ(2, 1, c2, c6, 2)
+#define RSYSQ_ETMACATR4() MRSQ(2, 1, c2, c8, 2)
+#define RSYSQ_ETMACATR5() MRSQ(2, 1, c2, c10, 2)
+#define RSYSQ_ETMACATR6() MRSQ(2, 1, c2, c12, 2)
+#define RSYSQ_ETMACATR7() MRSQ(2, 1, c2, c14, 2)
+#define RSYSQ_ETMACATR8() MRSQ(2, 1, c2, c0, 3)
+#define RSYSQ_ETMACATR9() MRSQ(2, 1, c2, c2, 3)
+#define RSYSQ_ETMACATR10() MRSQ(2, 1, c2, c4, 3)
+#define RSYSQ_ETMACATR11() MRSQ(2, 1, c2, c6, 3)
+#define RSYSQ_ETMACATR12() MRSQ(2, 1, c2, c8, 3)
+#define RSYSQ_ETMACATR13() MRSQ(2, 1, c2, c10, 3)
+#define RSYSQ_ETMACATR14() MRSQ(2, 1, c2, c12, 3)
+#define RSYSQ_ETMACATR15() MRSQ(2, 1, c2, c14, 3)
+#define RSYSQ_ETMCIDCVR0() MRSQ(2, 1, c3, c0, 0)
+#define RSYSQ_ETMCIDCVR1() MRSQ(2, 1, c3, c2, 0)
+#define RSYSQ_ETMCIDCVR2() MRSQ(2, 1, c3, c4, 0)
+#define RSYSQ_ETMCIDCVR3() MRSQ(2, 1, c3, c6, 0)
+#define RSYSQ_ETMCIDCVR4() MRSQ(2, 1, c3, c8, 0)
+#define RSYSQ_ETMCIDCVR5() MRSQ(2, 1, c3, c10, 0)
+#define RSYSQ_ETMCIDCVR6() MRSQ(2, 1, c3, c12, 0)
+#define RSYSQ_ETMCIDCVR7() MRSQ(2, 1, c3, c14, 0)
+#define RSYSQ_ETMACVR0() MRSQ(2, 1, c2, c0, 0)
+#define RSYSQ_ETMACVR1() MRSQ(2, 1, c2, c2, 0)
+#define RSYSQ_ETMACVR2() MRSQ(2, 1, c2, c4, 0)
+#define RSYSQ_ETMACVR3() MRSQ(2, 1, c2, c6, 0)
+#define RSYSQ_ETMACVR4() MRSQ(2, 1, c2, c8, 0)
+#define RSYSQ_ETMACVR5() MRSQ(2, 1, c2, c10, 0)
+#define RSYSQ_ETMACVR6() MRSQ(2, 1, c2, c12, 0)
+#define RSYSQ_ETMACVR7() MRSQ(2, 1, c2, c14, 0)
+#define RSYSQ_ETMACVR8() MRSQ(2, 1, c2, c0, 1)
+#define RSYSQ_ETMACVR9() MRSQ(2, 1, c2, c2, 1)
+#define RSYSQ_ETMACVR10() MRSQ(2, 1, c2, c4, 1)
+#define RSYSQ_ETMACVR11() MRSQ(2, 1, c2, c6, 1)
+#define RSYSQ_ETMACVR12() MRSQ(2, 1, c2, c8, 1)
+#define RSYSQ_ETMACVR13() MRSQ(2, 1, c2, c10, 1)
+#define RSYSQ_ETMACVR14() MRSQ(2, 1, c2, c12, 1)
+#define RSYSQ_ETMACVR15() MRSQ(2, 1, c2, c14, 1)
+#define RSYSQ_ETMVMIDCVR0() MRSQ(2, 1, c3, c0, 1)
+#define RSYSQ_ETMVMIDCVR1() MRSQ(2, 1, c3, c2, 1)
+#define RSYSQ_ETMVMIDCVR2() MRSQ(2, 1, c3, c4, 1)
+#define RSYSQ_ETMVMIDCVR3() MRSQ(2, 1, c3, c6, 1)
+#define RSYSQ_ETMVMIDCVR4() MRSQ(2, 1, c3, c8, 1)
+#define RSYSQ_ETMVMIDCVR5() MRSQ(2, 1, c3, c10, 1)
+#define RSYSQ_ETMVMIDCVR6() MRSQ(2, 1, c3, c12, 1)
+#define RSYSQ_ETMVMIDCVR7() MRSQ(2, 1, c3, c14, 1)
+#define RSYSQ_ETMDVCVR0() MRSQ(2, 1, c2, c0, 4)
+#define RSYSQ_ETMDVCVR1() MRSQ(2, 1, c2, c4, 4)
+#define RSYSQ_ETMDVCVR2() MRSQ(2, 1, c2, c8, 4)
+#define RSYSQ_ETMDVCVR3() MRSQ(2, 1, c2, c12, 4)
+#define RSYSQ_ETMDVCVR4() MRSQ(2, 1, c2, c0, 5)
+#define RSYSQ_ETMDVCVR5() MRSQ(2, 1, c2, c4, 5)
+#define RSYSQ_ETMDVCVR6() MRSQ(2, 1, c2, c8, 5)
+#define RSYSQ_ETMDVCVR7() MRSQ(2, 1, c2, c12, 5)
+#define RSYSQ_ETMDVCMR0() MRSQ(2, 1, c2, c0, 6)
+#define RSYSQ_ETMDVCMR1() MRSQ(2, 1, c2, c4, 6)
+#define RSYSQ_ETMDVCMR2() MRSQ(2, 1, c2, c8, 6)
+#define RSYSQ_ETMDVCMR3() MRSQ(2, 1, c2, c12, 6)
+#define RSYSQ_ETMDVCMR4() MRSQ(2, 1, c2, c0, 7)
+#define RSYSQ_ETMDVCMR5() MRSQ(2, 1, c2, c4, 7)
+#define RSYSQ_ETMDVCMR6() MRSQ(2, 1, c2, c8, 7)
+#define RSYSQ_ETMDVCMR7() MRSQ(2, 1, c2, c12, 7)
+
+/* 32 and 64 bit registers */
+#define WSYS_ETMAUXCTLR(val) MSR(val, 2, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MSR(val, 2, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MSR(val, 2, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MSR(val, 2, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MSR(val, 2, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MSR(val, 2, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MSR(val, 2, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MSR(val, 2, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MSR(val, 2, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MSR(val, 2, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MSR(val, 2, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MSR(val, 2, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MSR(val, 2, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MSR(val, 2, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MSR(val, 2, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MSR(val, 2, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MSR(val, 2, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MSR(val, 2, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MSR(val, 2, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MSR(val, 2, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MSR(val, 2, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MSR(val, 2, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MSR(val, 2, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MSR(val, 2, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MSR(val, 2, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MSR(val, 2, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MSR(val, 2, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MSR(val, 2, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MSR(val, 2, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MSR(val, 2, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MSR(val, 2, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MSR(val, 2, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MSR(val, 2, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MSR(val, 2, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MSR(val, 2, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MSR(val, 2, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MSR(val, 2, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MSR(val, 2, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MSR(val, 2, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MSR(val, 2, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MSR(val, 2, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MSR(val, 2, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MSR(val, 2, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MSR(val, 2, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MSR(val, 2, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MSR(val, 2, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MSR(val, 2, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MSR(val, 2, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MSR(val, 2, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MSR(val, 2, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MSR(val, 2, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MSR(val, 2, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MSR(val, 2, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MSR(val, 2, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MSR(val, 2, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MSR(val, 2, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MSR(val, 2, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MSR(val, 2, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MSR(val, 2, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MSR(val, 2, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MSR(val, 2, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MSR(val, 2, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MSR(val, 2, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MSR(val, 2, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MSR(val, 2, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MSR(val, 2, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MSR(val, 2, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MSR(val, 2, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MSR(val, 2, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MSR(val, 2, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MSR(val, 2, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MSR(val, 2, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MSR(val, 2, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MSR(val, 2, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MSR(val, 2, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MSR(val, 2, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MSR(val, 2, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MSR(val, 2, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MSR(val, 2, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MSR(val, 2, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MSR(val, 2, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MSR(val, 2, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MSR(val, 2, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MSR(val, 2, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MSR(val, 2, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MSR(val, 2, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MSR(val, 2, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MSR(val, 2, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MSR(val, 2, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MSR(val, 2, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MSR(val, 2, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MSR(val, 2, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MSR(val, 2, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MSR(val, 2, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MSR(val, 2, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MSR(val, 2, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MSR(val, 2, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MSR(val, 2, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MSR(val, 2, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MSR(val, 2, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MSR(val, 2, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MSR(val, 2, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MSR(val, 2, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MSR(val, 2, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MSR(val, 2, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MSR(val, 2, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MSR(val, 2, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MSR(val, 2, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MSR(val, 2, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MSR(val, 2, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MSR(val, 2, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MSR(val, 2, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MSR(val, 2, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MSR(val, 2, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MSR(val, 2, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MSR(val, 2, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MSR(val, 2, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MSR(val, 2, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MSR(val, 2, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MSR(val, 2, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MSR(val, 2, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MSR(val, 2, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MSR(val, 2, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MSR(val, 2, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MSR(val, 2, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MSR(val, 2, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MSR(val, 2, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MSR(val, 2, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MSR(val, 2, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MSR(val, 2, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MSR(val, 2, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MSR(val, 2, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MSR(val, 2, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MSR(val, 2, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MSR(val, 2, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MSR(val, 2, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MSR(val, 2, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MSR(val, 2, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MSR(val, 2, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MSR(val, 2, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MSR(val, 2, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MSR(val, 2, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MSR(val, 2, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MSR(val, 2, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MSR(val, 2, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MSR(val, 2, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MSR(val, 2, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MSR(val, 2, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MSR(val, 2, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MSR(val, 2, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MSR(val, 2, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MSR(val, 2, 1, c1, c7, 3)
+
+#endif
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 83e0e25f95e0..cbdda065c404 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -233,6 +233,7 @@ struct fastrpc_file {
int tgid;
int cid;
int ssrcount;
+ int pd;
struct fastrpc_apps *apps;
};
@@ -597,7 +598,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
int srcVM[1] = {VMID_HLOS};
int destVM[2] = {VMID_HLOS, vmid};
int destVMperm[2] = {PERM_READ | PERM_WRITE,
- PERM_READ | PERM_WRITE };
+ PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(map->phys,
buf_page_size(map->size),
@@ -669,7 +670,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
int srcVM[1] = {VMID_HLOS};
int destVM[2] = {VMID_HLOS, vmid};
int destVMperm[2] = {PERM_READ | PERM_WRITE,
- PERM_READ | PERM_WRITE};
+ PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
srcVM, 1, destVM, destVMperm, 2));
@@ -1241,7 +1242,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
msg->tid = current->pid;
if (kernel)
msg->pid = 0;
- msg->invoke.header.ctx = ptr_to_uint64(ctx);
+ msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
msg->invoke.header.handle = handle;
msg->invoke.header.sc = ctx->sc;
msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
@@ -1274,6 +1275,7 @@ static void fastrpc_smd_read_handler(int cid)
sizeof(rsp));
if (ret != sizeof(rsp))
break;
+ rsp.ctx = rsp.ctx & ~1;
context_notify_user(uint64_to_ptr(rsp.ctx), rsp.retval);
} while (ret == sizeof(rsp));
}
@@ -1394,6 +1396,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ioctl.inv.pra = ra;
ioctl.fds = 0;
ioctl.attrs = 0;
+ fl->pd = 0;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
@@ -1411,10 +1414,13 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
inbuf.pgid = current->tgid;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init->filelen;
- VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
+ fl->pd = 1;
+ if (init->filelen) {
+ VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
init->file, init->filelen, mflags, &file));
- if (err)
- goto bail;
+ if (err)
+ goto bail;
+ }
inbuf.pageslen = 1;
VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
init->mem, init->memlen, mflags, &mem));
@@ -1777,6 +1783,7 @@ void fastrpc_glink_notify_rx(void *handle, const void *priv,
int len = size;
while (len >= sizeof(*rsp) && rsp) {
+ rsp->ctx = rsp->ctx & ~1;
context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
rsp++;
len = len - sizeof(*rsp);
@@ -1987,6 +1994,17 @@ bail:
return err;
}
+static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
+{
+ int err = 0;
+
+ VERIFY(err, fl && fl->sctx);
+ if (err)
+ goto bail;
+ *info = (fl->sctx->smmu.enabled ? 1 : 0);
+bail:
+ return err;
+}
static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
unsigned long ioctl_param)
@@ -2000,6 +2018,8 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
void *param = (char *)ioctl_param;
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
int size = 0, err = 0;
+ uint32_t info;
+
p.inv.fds = 0;
p.inv.attrs = 0;
@@ -2054,6 +2074,14 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
break;
}
break;
+ case FASTRPC_IOCTL_GETINFO:
+ VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
+ if (err)
+ goto bail;
+ break;
case FASTRPC_IOCTL_INIT:
VERIFY(err, 0 == copy_from_user(&p.init, param,
sizeof(p.init)));
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index c5b3405ed644..1e5649a8d4c4 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -333,6 +333,26 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_INIT,
(unsigned long)init);
}
+ case FASTRPC_IOCTL_GETINFO:
+ {
+ compat_uptr_t __user *info32;
+ uint32_t __user *info;
+ compat_uint_t u;
+ long ret;
+
+ info32 = compat_ptr(arg);
+ VERIFY(err, NULL != (info = compat_alloc_user_space(
+ sizeof(*info))));
+ if (err)
+ return -EFAULT;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETINFO,
+ (unsigned long)info);
+ if (ret)
+ return ret;
+ err = get_user(u, info);
+ err |= put_user(u, info32);
+ return err;
+ }
case FASTRPC_IOCTL_SETMODE:
return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg));
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index b1b7bfba4cf3..fe8257ad3db2 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -24,6 +24,8 @@
#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init)
#define FASTRPC_IOCTL_INVOKE_ATTRS \
_IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs)
+#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
+
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
#define DEVICE_NAME "adsprpc-smd"
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 2a9ad0d95deb..ec952974f1d6 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1392,6 +1392,7 @@ void diag_dci_channel_open_work(struct work_struct *work)
* which log entries in the cumulative logs that need
* to be updated on the peripheral.
*/
+ mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
if (entry->client_info.token != DCI_LOCAL_PROC)
@@ -1403,6 +1404,7 @@ void diag_dci_channel_open_work(struct work_struct *work)
client_log_mask_ptr += 514;
}
}
+ mutex_unlock(&driver->dci_mutex);
mutex_lock(&dci_log_mask_mutex);
/* Update the appropriate dirty bits in the cumulative mask */
@@ -1448,11 +1450,20 @@ void diag_dci_notify_client(int peripheral_mask, int data, int proc)
continue;
if (entry->client_info.notification_list & peripheral_mask) {
info.si_signo = entry->client_info.signal_type;
- stat = send_sig_info(entry->client_info.signal_type,
- &info, entry->client);
- if (stat)
- pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+ if (entry->client &&
+ entry->tgid == entry->client->tgid) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "entry tgid = %d, dci client tgid = %d\n",
+ entry->tgid, entry->client->tgid);
+ stat = send_sig_info(
+ entry->client_info.signal_type,
+ &info, entry->client);
+ if (stat)
+ pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
info.si_int, stat);
+ } else
+ pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
}
}
}
@@ -2763,6 +2774,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
mutex_lock(&driver->dci_mutex);
new_entry->client = current;
+ new_entry->tgid = current->tgid;
new_entry->client_info.notification_list =
reg_entry->notification_list;
new_entry->client_info.signal_type =
@@ -2888,8 +2900,6 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
if (!entry)
return DIAG_DCI_NOT_SUPPORTED;
- mutex_lock(&driver->dci_mutex);
-
token = entry->client_info.token;
/*
* Remove the entry from the list before freeing the buffers
@@ -3000,8 +3010,6 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
}
queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
- mutex_unlock(&driver->dci_mutex);
-
return DIAG_DCI_NO_ERROR;
}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 406e43cd3395..c2a8ac1e3854 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -146,6 +146,7 @@ struct diag_dci_buf_peripheral_t {
};
struct diag_dci_client_tbl {
+ int tgid;
struct diag_dci_reg_tbl_t client_info;
struct task_struct *client;
unsigned char *dci_log_mask;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index c6e57f36fe3b..edc104acb777 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -421,9 +421,11 @@ static int diag_remove_client_entry(struct file *file)
* This will specially help in case of ungraceful exit of any DCI client
* This call will remove any pending registrations of such client
*/
+ mutex_lock(&driver->dci_mutex);
dci_entry = dci_lookup_client_entry_pid(current->tgid);
if (dci_entry)
diag_dci_deinit_client(dci_entry);
+ mutex_unlock(&driver->dci_mutex);
diag_close_logging_process(current->tgid);
@@ -1982,32 +1984,46 @@ long diagchar_compat_ioctl(struct file *filp,
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_NOT_SUPPORTED;
}
- mutex_unlock(&driver->dci_mutex);
result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_SUPPORT:
result = diag_ioctl_dci_support(ioarg);
break;
case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_EVENT_STATUS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_event_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
if (copy_from_user((void *)&client_id, (void __user *)ioarg,
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
return -EFAULT;
+ }
result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
if (copy_from_user(&client_id, (void __user *)ioarg,
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
return -EFAULT;
+ }
result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_LSM_DEINIT:
result = diag_ioctl_lsm_deinit();
@@ -2029,7 +2045,9 @@ long diagchar_compat_ioctl(struct file *filp,
result = 1;
break;
case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_GET_REAL_TIME:
result = diag_ioctl_get_real_time(ioarg);
@@ -2091,32 +2109,44 @@ long diagchar_ioctl(struct file *filp,
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_NOT_SUPPORTED;
}
- mutex_unlock(&driver->dci_mutex);
result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_SUPPORT:
result = diag_ioctl_dci_support(ioarg);
break;
case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_EVENT_STATUS:
result = diag_ioctl_dci_event_status(ioarg);
break;
case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
if (copy_from_user((void *)&client_id, (void __user *)ioarg,
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
return -EFAULT;
+ }
result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
if (copy_from_user(&client_id, (void __user *)ioarg,
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
return -EFAULT;
+ }
result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_LSM_DEINIT:
result = diag_ioctl_lsm_deinit();
@@ -2138,7 +2168,9 @@ long diagchar_ioctl(struct file *filp,
result = 1;
break;
case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_GET_REAL_TIME:
result = diag_ioctl_get_real_time(ioarg);
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 5d2381069df1..594d3b1bf3b5 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -117,19 +117,31 @@ void diag_notify_md_client(uint8_t peripheral, int data)
if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
return;
+ mutex_lock(&driver->md_session_lock);
memset(&info, 0, sizeof(struct siginfo));
info.si_code = SI_QUEUE;
info.si_int = (PERIPHERAL_MASK(peripheral) | data);
info.si_signo = SIGCONT;
if (driver->md_session_map[peripheral] &&
- driver->md_session_map[peripheral]->task) {
- stat = send_sig_info(info.si_signo, &info,
- driver->md_session_map[peripheral]->task);
- if (stat)
- pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
- info.si_int, stat);
+ driver->md_session_map[peripheral]->task) {
+ if (driver->md_session_map[peripheral]->pid ==
+ driver->md_session_map[peripheral]->task->tgid) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "md_session %d pid = %d, md_session %d task tgid = %d\n",
+ peripheral,
+ driver->md_session_map[peripheral]->pid,
+ peripheral,
+ driver->md_session_map[peripheral]->task->tgid);
+ stat = send_sig_info(info.si_signo, &info,
+ driver->md_session_map[peripheral]->task);
+ if (stat)
+ pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ } else
+ pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+ peripheral, info.si_int, stat);
}
-
+ mutex_unlock(&driver->md_session_lock);
}
static void process_pd_status(uint8_t *buf, uint32_t len,
@@ -1087,7 +1099,7 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
driver->buffering_mode[peripheral].mode = params->mode;
driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
- if (mode == DIAG_BUFFERING_MODE_STREAMING)
+ if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
driver->buffering_flag[peripheral] = 0;
fail:
mutex_unlock(&driver->mode_lock);
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index e9af651e9deb..1718114c38a8 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -307,8 +307,8 @@ static struct clk_freq_tbl ftbl_blsp_qup_spi_apps_clk_src[] = {
F( 960000, cxo_clk_src, 10, 1, 2),
F( 4800000, cxo_clk_src, 4, 0, 0),
F( 9600000, cxo_clk_src, 2, 0, 0),
- F( 19200000, cxo_clk_src, 1, 0, 0),
F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 19200000, cxo_clk_src, 1, 0, 0),
F( 25000000, gpll0_out_main, 12, 1, 2),
F( 50000000, gpll0_out_main, 12, 0, 0),
F_END
@@ -2722,12 +2722,19 @@ static void msm_gcc_cobalt_v1_fixup(void)
gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
}
+static void msm_gcc_cobalt_v2_fixup(void)
+{
+ qspi_ref_clk_src.c.ops = &clk_ops_dummy;
+ gcc_qspi_ref_clk.c.ops = &clk_ops_dummy;
+ gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
+}
+
static int msm_gcc_cobalt_probe(struct platform_device *pdev)
{
struct resource *res;
u32 regval;
int ret;
- bool is_v1 = 0;
+ bool is_v1 = 0, is_v2 = 0;
ret = vote_bimc(&bimc_clk, INT_MAX);
if (ret < 0)
@@ -2780,6 +2787,11 @@ static int msm_gcc_cobalt_probe(struct platform_device *pdev)
if (is_v1)
msm_gcc_cobalt_v1_fixup();
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gcc-cobalt-v2");
+ if (is_v2)
+ msm_gcc_cobalt_v2_fixup();
+
ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_cobalt,
ARRAY_SIZE(msm_clocks_gcc_cobalt));
if (ret)
@@ -2808,6 +2820,7 @@ static int msm_gcc_cobalt_probe(struct platform_device *pdev)
static struct of_device_id msm_clock_gcc_match_table[] = {
{ .compatible = "qcom,gcc-cobalt" },
+ { .compatible = "qcom,gcc-cobalt-v2" },
{ .compatible = "qcom,gcc-hamster" },
{}
};
diff --git a/drivers/clk/msm/clock-gpu-cobalt.c b/drivers/clk/msm/clock-gpu-cobalt.c
index c210f63c6bb4..6e3ef394ed67 100644
--- a/drivers/clk/msm/clock-gpu-cobalt.c
+++ b/drivers/clk/msm/clock-gpu-cobalt.c
@@ -448,6 +448,71 @@ static struct mux_clk gpucc_gcc_dbg_clk = {
},
};
+static void enable_gfx_crc(void)
+{
+ u32 regval;
+
+ /* Set graphics clock at a safe frequency */
+ clk_set_rate(&gpucc_gfx3d_clk.c, gfx3d_clk_src.c.fmax[2]);
+ /* Turn on the GPU_CX GDSC */
+ regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ /* Wait for 10usecs to let the GDSC turn ON */
+ mb();
+ udelay(10);
+ /* Turn on the Graphics rail */
+ if (regulator_enable(vdd_gpucc.regulator[0]))
+ pr_warn("Enabling the graphics rail during CRC sequence failed!\n");
+ /* Turn on the GPU_GX GDSC */
+ writel_relaxed(0x1, virt_base_gfx + GPU_GX_BCR);
+ /*
+ * BLK_ARES should be kept asserted for 1us before being de-asserted.
+ */
+ wmb();
+ udelay(1);
+ writel_relaxed(0x0, virt_base_gfx + GPU_GX_BCR);
+ regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ regval |= BIT(4);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* Keep reset asserted for at-least 1us before continuing. */
+ wmb();
+ udelay(1);
+ regval &= ~BIT(4);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* Make sure GMEM_RESET is de-asserted before continuing. */
+ wmb();
+ regval &= ~BIT(0);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* All previous writes should be done at this point */
+ wmb();
+ regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ /* Wait for 10usecs to let the GDSC turn ON */
+ mb();
+ udelay(10);
+ /* Enable the graphics clock */
+ clk_prepare_enable(&gpucc_gfx3d_clk.c);
+ /* Enabling MND RC in Bypass mode */
+ writel_relaxed(0x00015010, virt_base_gfx + CRC_MND_CFG_OFFSET);
+ writel_relaxed(0x00800000, virt_base_gfx + CRC_SID_FSM_OFFSET);
+ /* Wait for 16 cycles before continuing */
+ udelay(1);
+ clk_set_rate(&gpucc_gfx3d_clk.c,
+ gfx3d_clk_src.c.fmax[gfx3d_clk_src.c.num_fmax - 1]);
+ /* Disable the graphics clock */
+ clk_disable_unprepare(&gpucc_gfx3d_clk.c);
+ /* Turn off the gpu_cx and gpu_gx GDSCs */
+ regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ regulator_disable(vdd_gpucc.regulator[0]);
+ regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+}
+
static struct mux_clk gfxcc_dbg_clk = {
.ops = &mux_reg_ops,
.en_mask = BIT(16),
@@ -595,7 +660,6 @@ int msm_gfxcc_cobalt_probe(struct platform_device *pdev)
struct device_node *of_node = pdev->dev.of_node;
int rc;
struct regulator *reg;
- u32 regval;
bool is_vq = 0;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
@@ -659,58 +723,7 @@ int msm_gfxcc_cobalt_probe(struct platform_device *pdev)
if (rc)
return rc;
- /* CRC ENABLE SEQUENCE */
- clk_set_rate(&gpucc_gfx3d_clk.c, gfx3d_clk_src.c.fmax[2]);
- /* Turn on the GPU_CX GDSC */
- regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
- regval &= ~SW_COLLAPSE_MASK;
- writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
- /* Wait for 10usecs to let the GDSC turn ON */
- mb();
- udelay(10);
- /* Turn on the GPU_GX GDSC */
- writel_relaxed(0x1, virt_base_gfx + GPU_GX_BCR);
- /*
- * BLK_ARES should be kept asserted for 1us before being de-asserted.
- */
- wmb();
- udelay(1);
- writel_relaxed(0x0, virt_base_gfx + GPU_GX_BCR);
- regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
- regval |= BIT(4);
- writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
- /* Keep reset asserted for at-least 1us before continuing. */
- wmb();
- udelay(1);
- regval &= ~BIT(4);
- writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
- /* Make sure GMEM_RESET is de-asserted before continuing. */
- wmb();
- regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
- regval &= ~SW_COLLAPSE_MASK;
- writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
- /* Wait for 10usecs to let the GDSC turn ON */
- mb();
- udelay(10);
- /* Enable the graphics clock */
- clk_prepare_enable(&gpucc_gfx3d_clk.c);
- /* Enabling MND RC in Bypass mode */
- writel_relaxed(0x00015010, virt_base_gfx + CRC_MND_CFG_OFFSET);
- writel_relaxed(0x00800000, virt_base_gfx + CRC_SID_FSM_OFFSET);
- /* Wait for 16 cycles before continuing */
- udelay(1);
- clk_set_rate(&gpucc_gfx3d_clk.c,
- gfx3d_clk_src.c.fmax[gfx3d_clk_src.c.num_fmax - 1]);
- /* Disable the graphics clock */
- clk_disable_unprepare(&gpucc_gfx3d_clk.c);
- /* Turn off the gpu_cx and gpu_gx GDSCs */
- regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
- regval |= SW_COLLAPSE_MASK;
- writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
- regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
- regval |= SW_COLLAPSE_MASK;
- writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
- /* END OF CRC ENABLE SEQUENCE */
+ enable_gfx_crc();
/*
* Force periph logic to be ON since after NAP, the value of the perf
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
index 61351084bc48..e6153553e48a 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
@@ -106,7 +106,6 @@ struct dsi_pll_cobalt {
struct mdss_pll_resources *rsc;
struct dsi_pll_config pll_configuration;
struct dsi_pll_regs reg_setup;
- int bitclk_src_div;
};
static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
@@ -178,7 +177,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_cobalt *pll,
u32 frac;
u64 multiplier;
- target_freq = rsc->vco_current_rate / pll->bitclk_src_div;
+ target_freq = rsc->vco_current_rate;
pr_debug("target_freq = %llu\n", target_freq);
if (config->div_override) {
@@ -275,9 +274,11 @@ static void dsi_pll_commit(struct dsi_pll_cobalt *pll,
wmb();
}
-static int vco_cobalt_set_rate_sub(struct mdss_pll_resources *rsc)
+static int vco_cobalt_set_rate(struct clk *c, unsigned long rate)
{
int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *rsc = vco->priv;
struct dsi_pll_cobalt *pll;
if (!rsc) {
@@ -285,11 +286,6 @@ static int vco_cobalt_set_rate_sub(struct mdss_pll_resources *rsc)
return -EINVAL;
}
- if (!rsc->vco_current_rate) {
- pr_debug("vco rate not configured yet\n");
- return 0;
- }
-
if (rsc->pll_on)
return 0;
@@ -299,6 +295,11 @@ static int vco_cobalt_set_rate_sub(struct mdss_pll_resources *rsc)
return -EINVAL;
}
+ pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+ rsc->vco_current_rate = rate;
+ rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
@@ -318,25 +319,7 @@ static int vco_cobalt_set_rate_sub(struct mdss_pll_resources *rsc)
mdss_pll_resource_enable(rsc, false);
- return rc;
-}
-
-static int vco_cobalt_set_rate(struct clk *c, unsigned long rate)
-{
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *rsc = vco->priv;
-
- if (!rsc) {
- pr_err("pll resource not found\n");
- return -EINVAL;
- }
-
- pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
-
- rsc->vco_current_rate = rate;
- rsc->vco_ref_clk_rate = vco->ref_clk_rate;
-
- return vco_cobalt_set_rate_sub(rsc);
+ return 0;
}
static int dsi_pll_cobalt_lock_status(struct mdss_pll_resources *pll)
@@ -662,7 +645,7 @@ static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
reg_val &= ~0x0F;
- reg_val |= (div - 1);
+ reg_val |= div;
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
}
@@ -689,25 +672,13 @@ static int bit_clk_set_div(struct div_clk *clk, int div)
return rc;
}
- /*
- * Once the bit clock source divider is setup, we may need to
- * re-configure the rest of the VCO registers if this divider value
- * has changed.
- */
- if (pll->bitclk_src_div != div) {
- pll->bitclk_src_div = div;
- rc = vco_cobalt_set_rate_sub(rsc);
- if (rc)
- goto error;
- }
-
bit_clk_set_div_sub(rsc, div);
+ /* For slave PLL, this divider always should be set to 1 */
if (rsc->slave)
- bit_clk_set_div_sub(rsc->slave, div);
+ bit_clk_set_div_sub(rsc->slave, 1);
(void)mdss_pll_resource_enable(rsc, false);
-error:
return rc;
}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 11cdf6fa53eb..e35d79acd5e8 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1005,7 +1005,6 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv,
if (idx < 0)
return -EPERM;
- trace_cpu_idle_rcuidle(idx, dev->cpu);
return idx;
}
@@ -1044,7 +1043,6 @@ exit:
sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
trace_cpu_idle_exit(idx, success);
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
end_time = ktime_to_ns(ktime_get()) - start_time;
dev->last_residency = do_div(end_time, 1000);
local_irq_enable();
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3f712e9d1604..680d0b596970 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -372,26 +372,26 @@ config CRYPTO_DEV_QCRYPTO
config CRYPTO_DEV_QCOM_MSM_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
- select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909
+ select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSMCOBALT
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
MSM8x55, MSM8960, MSM9615, MSM8916, MSM8994, MSM8996, FSM9900,
- MSMTITANINUM and APQ8084.
+ MSMTITANINUM, APQ8084 and MSMCOBALT.
To compile this driver as a module, choose M here: the
For MSM7x30 MSM8660 and MSM8x55 the module is called qce
For MSM8960, APQ8064 and MSM9615 the module is called qce40
- For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM
- and APQ8084 the module is called qce50.
+ For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM,
+ APQ8084 and MSMCOBALT the module is called qce50.
config CRYPTO_DEV_QCEDEV
tristate "QCEDEV Interface to CE module"
default n
help
This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
- MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996
- and APQ8084. This exposes the interface to the QCE hardware
+ MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996,
+ APQ8084, MSMCOBALT. This exposes the interface to the QCE hardware
accelerator via IOCTLs.
To compile this driver as a module, choose M here: the
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index ca1095b7b552..db8f0eeca7c9 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -66,6 +66,9 @@ static const char *extcon_name[] = {
/* connector orientation 0 - CC1, 1 - CC2 */
[EXTCON_USB_CC] = "USB-CC",
+ /* connector speed 0 - High Speed, 1 - Super Speed */
+ [EXTCON_USB_SPEED] = "USB-SPEED",
+
/* Display external connector */
[EXTCON_DISP_HDMI] = "HDMI",
[EXTCON_DISP_MHL] = "MHL",
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 08f7b29fe96a..15c10ad073e6 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -32,6 +32,7 @@
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
#define KGSL_PWRFLAGS_RETENTION_ON 4
+#define KGSL_PWRFLAGS_NAP_OFF 5
#define UPDATE_BUSY_VAL 1000000
@@ -1053,6 +1054,21 @@ static ssize_t kgsl_pwrctrl_force_non_retention_on_store(struct device *dev,
KGSL_PWRFLAGS_RETENTION_ON);
}
+static ssize_t kgsl_pwrctrl_force_no_nap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_NAP_OFF);
+}
+
+static ssize_t kgsl_pwrctrl_force_no_nap_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __force_on_store(dev, attr, buf, count,
+ KGSL_PWRFLAGS_NAP_OFF);
+}
+
static ssize_t kgsl_pwrctrl_bus_split_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1217,6 +1233,9 @@ static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store);
static DEVICE_ATTR(force_non_retention_on, 0644,
kgsl_pwrctrl_force_non_retention_on_show,
kgsl_pwrctrl_force_non_retention_on_store);
+static DEVICE_ATTR(force_no_nap, 0644,
+ kgsl_pwrctrl_force_no_nap_show,
+ kgsl_pwrctrl_force_no_nap_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -1235,6 +1254,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_force_bus_on,
&dev_attr_force_rail_on,
&dev_attr_force_non_retention_on,
+ &dev_attr_force_no_nap,
&dev_attr_bus_split,
&dev_attr_default_pwrlevel,
&dev_attr_popp,
@@ -2549,7 +2569,9 @@ void kgsl_active_count_put(struct kgsl_device *device)
BUG_ON(atomic_read(&device->active_cnt) == 0);
if (atomic_dec_and_test(&device->active_cnt)) {
- if (device->state == KGSL_STATE_ACTIVE &&
+ bool nap_on = !(device->pwrctrl.ctrl_flags &
+ BIT(KGSL_PWRFLAGS_NAP_OFF));
+ if (nap_on && device->state == KGSL_STATE_ACTIVE &&
device->requested_state == KGSL_STATE_NONE) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
kgsl_schedule_work(&device->idle_check_ws);
diff --git a/drivers/input/qpnp-power-on.c b/drivers/input/qpnp-power-on.c
index 6f310a54d172..8c3c523c83de 100644
--- a/drivers/input/qpnp-power-on.c
+++ b/drivers/input/qpnp-power-on.c
@@ -348,7 +348,7 @@ int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
return 0;
rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
- PON_MASK(7, 2), (reason << 2));
+ PON_MASK(7, 1), (reason << 1));
if (rc)
dev_err(&pon->pdev->dev,
"Unable to write to addr=%x, rc(%d)\n",
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 5216fd9c7b4a..f0ec8caac3dd 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -665,6 +665,13 @@ static int get_hfi_extradata_index(enum hal_extradata_id index)
case HAL_EXTRADATA_ROI_QP:
ret = HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA;
break;
+ case HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI:
+ ret =
+ HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+ ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
+ break;
default:
dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index cd65c4d0a08b..54cb04fcc4f0 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -49,32 +49,6 @@ static const char *const mpeg_video_output_order[] = {
"Decode Order",
NULL
};
-static const char *const mpeg_video_vidc_extradata[] = {
- "Extradata none",
- "Extradata MB Quantization",
- "Extradata Interlace Video",
- "Extradata VC1 Framedisp",
- "Extradata VC1 Seqdisp",
- "Extradata timestamp",
- "Extradata S3D Frame Packing",
- "Extradata Frame Rate",
- "Extradata Panscan Window",
- "Extradata Recovery point SEI",
- "Extradata Closed Caption UD",
- "Extradata AFD UD",
- "Extradata Multislice info",
- "Extradata number of concealed MB",
- "Extradata metadata filler",
- "Extradata input crop",
- "Extradata digital zoom",
- "Extradata aspect ratio",
- "Extradata mpeg2 seqdisp",
- "Extradata stream userdata",
- "Extradata frame QP",
- "Extradata frame bits info",
- "Extradata VQZip SEI",
- "Extradata output crop",
-};
static const char *const mpeg_vidc_video_alloc_mode_type[] = {
"Buffer Allocation Static",
"Buffer Allocation Ring Buffer",
@@ -273,7 +247,7 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -297,7 +271,9 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI) |
- (1 << V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI)
),
.qmenu = mpeg_video_vidc_extradata,
},
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 8c0df7ce08e1..870eaf3fb99c 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -157,29 +157,6 @@ static const char *const vp8_profile_level[] = {
"3.0",
};
-static const char *const mpeg_video_vidc_extradata[] = {
- "Extradata none",
- "Extradata MB Quantization",
- "Extradata Interlace Video",
- "Extradata VC1 Framedisp",
- "Extradata VC1 Seqdisp",
- "Extradata timestamp",
- "Extradata S3D Frame Packing",
- "Extradata Frame Rate",
- "Extradata Panscan Window",
- "Extradata Recovery point SEI",
- "Extradata Closed Caption UD",
- "Extradata AFD UD",
- "Extradata Multislice info",
- "Extradata number of concealed MB",
- "Extradata metadata filler",
- "Extradata input crop",
- "Extradata digital zoom",
- "Extradata aspect ratio",
- "Extradata macroblock metadata",
- "Extradata YUV Stats"
-};
-
static const char *const perf_level[] = {
"Nominal",
"Performance",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 52919d7b8973..c3565bbd973c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -47,6 +47,37 @@
#define MAX_SUPPORTED_INSTANCES 16
+const char *const mpeg_video_vidc_extradata[] = {
+ "Extradata none",
+ "Extradata MB Quantization",
+ "Extradata Interlace Video",
+ "Extradata VC1 Framedisp",
+ "Extradata VC1 Seqdisp",
+ "Extradata timestamp",
+ "Extradata S3D Frame Packing",
+ "Extradata Frame Rate",
+ "Extradata Panscan Window",
+ "Extradata Recovery point SEI",
+ "Extradata Multislice info",
+ "Extradata number of concealed MB",
+ "Extradata metadata filler",
+ "Extradata input crop",
+ "Extradata digital zoom",
+ "Extradata aspect ratio",
+ "Extradata mpeg2 seqdisp",
+ "Extradata stream userdata",
+ "Extradata frame QP",
+ "Extradata frame bits info",
+ "Extradata LTR",
+ "Extradata macroblock metadata",
+ "Extradata VQZip SEI",
+ "Extradata YUV Stats",
+ "Extradata ROI QP",
+ "Extradata output crop",
+ "Extradata display colour SEI",
+ "Extradata light level SEI",
+};
+
struct getprop_buf {
struct list_head list;
void *data;
@@ -4652,6 +4683,12 @@ enum hal_extradata_id msm_comm_get_hal_extradata_index(
case V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP:
ret = HAL_EXTRADATA_OUTPUT_CROP;
break;
+ case V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI:
+ ret = HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+ ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
+ break;
default:
dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 963a2365905a..337760508eb1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -19,6 +19,8 @@ struct vb2_buf_entry {
struct vb2_buffer *vb;
};
+extern const char *const mpeg_video_vidc_extradata[];
+
enum load_calc_quirks {
LOAD_CALC_NO_QUIRKS = 0,
LOAD_CALC_IGNORE_TURBO_LOAD = 1 << 0,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 6b7292cec6a8..8c4fa786a424 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -219,6 +219,12 @@ struct hfi_extradata_header {
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x01B)
#define HFI_PROPERTY_PARAM_VDEC_VQZIP_SEI_EXTRADATA \
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001C)
+#define HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001D)
+#define HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E)
+#define HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F)
#define HFI_PROPERTY_CONFIG_VDEC_OX_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 063208b8f188..2b346f0aed70 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -122,6 +122,8 @@ enum hal_extradata_id {
HAL_EXTRADATA_YUV_STATS,
HAL_EXTRADATA_ROI_QP,
HAL_EXTRADATA_OUTPUT_CROP,
+ HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI,
+ HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
};
enum hal_property {
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index c9c092dc4c5c..a5d627a89c31 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -637,7 +637,9 @@ struct msm_pcie_dev_t {
u32 num_ep;
bool pending_ep_reg;
u32 phy_len;
+ u32 port_phy_len;
struct msm_pcie_phy_info_t *phy_sequence;
+ struct msm_pcie_phy_info_t *port_phy_sequence;
u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
u32 rc_shadow[PCIE_CONF_SPACE_DW];
bool shadow_en;
@@ -1522,6 +1524,8 @@ static void pcie_phy_init(struct msm_pcie_dev_t *dev)
static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
{
+ int i;
+ struct msm_pcie_phy_info_t *phy_seq;
u8 common_phy;
if (dev->phy_ver >= 0x20)
@@ -1534,6 +1538,21 @@ static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
else
common_phy = 0;
+ if (dev->port_phy_sequence) {
+ i = dev->port_phy_len;
+ phy_seq = dev->port_phy_sequence;
+ while (i--) {
+ msm_pcie_write_reg(dev->phy,
+ phy_seq->offset,
+ phy_seq->val);
+ if (phy_seq->delay)
+ usleep_range(phy_seq->delay,
+ phy_seq->delay + 1);
+ phy_seq++;
+ }
+ return;
+ }
+
msm_pcie_write_reg(dev->phy,
QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
common_phy), 0x45);
@@ -1833,6 +1852,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->pending_ep_reg ? "true" : "false");
PCIE_DBG_FS(dev, "phy_len is %d",
dev->phy_len);
+ PCIE_DBG_FS(dev, "port_phy_len is %d",
+ dev->port_phy_len);
PCIE_DBG_FS(dev, "disable_pc is %d",
dev->disable_pc);
PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
@@ -4028,6 +4049,31 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
dev->rc_idx);
}
+ of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
+ if (size) {
+ dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
+ devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+ if (dev->port_phy_sequence) {
+ dev->port_phy_len =
+ size / sizeof(*dev->port_phy_sequence);
+
+ of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,port-phy-sequence",
+ (unsigned int *)dev->port_phy_sequence,
+ size / sizeof(dev->port_phy_sequence->offset));
+ } else {
+ PCIE_ERR(dev,
+ "RC%d: Could not allocate memory for port phy init sequence.\n",
+ dev->rc_idx);
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
+ dev->rc_idx);
+ }
+
for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
clk_info = &dev->clk[i];
@@ -5934,7 +5980,9 @@ static int msm_pcie_probe(struct platform_device *pdev)
msm_pcie_dev[rc_idx].num_ep = 0;
msm_pcie_dev[rc_idx].pending_ep_reg = false;
msm_pcie_dev[rc_idx].phy_len = 0;
+ msm_pcie_dev[rc_idx].port_phy_len = 0;
msm_pcie_dev[rc_idx].phy_sequence = NULL;
+ msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
msm_pcie_dev[rc_idx].event_reg = NULL;
msm_pcie_dev[rc_idx].linkdown_counter = 0;
msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index e16ca747210b..72249ca07886 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -73,6 +73,24 @@
} \
} while (0)
+#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
+ do { \
+ if (!ipa_api_ctrl) { \
+ pr_err("IPA HW is not supported on this target\n"); \
+ ret = false; \
+ } \
+ else { \
+ if (ipa_api_ctrl->api) { \
+ ret = ipa_api_ctrl->api(p); \
+ } else { \
+ pr_err("%s not implemented for IPA ver %d\n", \
+ __func__, ipa_api_hw_type); \
+ WARN_ON(1); \
+ ret = false; \
+ } \
+ } \
+ } while (0)
+
static enum ipa_hw_type ipa_api_hw_type;
static struct ipa_api_controller *ipa_api_ctrl;
@@ -1778,63 +1796,23 @@ void ipa_dma_destroy(void)
}
EXPORT_SYMBOL(ipa_dma_destroy);
-/**
- * ipa_mhi_init() - Initialize IPA MHI driver
- * @params: initialization params
- *
- * This function is called by MHI client driver on boot to initialize IPA MHI
- * Driver. When this function returns device can move to READY state.
- * This function is doing the following:
- * - Initialize MHI IPA internal data structures
- * - Create IPA RM resources
- * - Initialize debugfs
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa_mhi_init(struct ipa_mhi_init_params *params)
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_mhi_init, params);
+ IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params);
return ret;
}
-EXPORT_SYMBOL(ipa_mhi_init);
+EXPORT_SYMBOL(ipa_mhi_init_engine);
/**
- * ipa_mhi_start() - Start IPA MHI engine
- * @params: pcie addresses for MHI
- *
- * This function is called by MHI client driver on MHI engine start for
- * handling MHI accelerated channels. This function is called after
- * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
- * engine. When this function returns device can move to M0 state.
- * This function is doing the following:
- * - Send command to uC for initialization of MHI engine
- * - Add dependencies to IPA RM
- * - Request MHI_PROD in IPA RM
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa_mhi_start(struct ipa_mhi_start_params *params)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_mhi_start, params);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_mhi_start);
-
-/**
- * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel start.
+ * This function is called by IPA MHI client driver on MHI channel start.
* This function is called after MHI engine was started.
* This function is doing the following:
* - Send command to uC to start corresponding MHI channel
@@ -1843,23 +1821,24 @@ EXPORT_SYMBOL(ipa_mhi_start);
* Return codes: 0 : success
* negative : error
*/
-int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_mhi_connect_pipe, in, clnt_hdl);
+ IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl);
return ret;
}
-EXPORT_SYMBOL(ipa_mhi_connect_pipe);
+EXPORT_SYMBOL(ipa_connect_mhi_pipe);
/**
- * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel reset.
+ * This function is called by IPA MHI client driver on MHI channel reset.
* This function is called after MHI channel was started.
* This function is doing the following:
* - Send command to uC to reset corresponding MHI channel
@@ -1868,81 +1847,218 @@ EXPORT_SYMBOL(ipa_mhi_connect_pipe);
* Return codes: 0 : success
* negative : error
*/
-int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_mhi_disconnect_pipe, clnt_hdl);
+ IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl);
return ret;
}
-EXPORT_SYMBOL(ipa_mhi_disconnect_pipe);
+EXPORT_SYMBOL(ipa_disconnect_mhi_pipe);
-/**
- * ipa_mhi_suspend() - Suspend MHI accelerated channels
- * @force:
- * false: in case of data pending in IPA, MHI channels will not be
- * suspended and function will fail.
- * true: in case of data pending in IPA, make sure no further access from
- * IPA to PCIe is possible. In this case suspend cannot fail.
- *
- * This function is called by MHI client driver on MHI suspend.
- * This function is called after MHI channel was started.
- * When this function returns device can move to M1/M2/M3/D3cold state.
- * This function is doing the following:
- * - Send command to uC to suspend corresponding MHI channel
- * - Make sure no further access is possible from IPA to PCIe
- * - Release MHI_PROD in IPA RM
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa_mhi_suspend(bool force)
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client);
+
+ return ret;
+}
+
+int ipa_uc_mhi_reset_channel(int channelHandle)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_mhi_suspend, force);
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle);
return ret;
}
-EXPORT_SYMBOL(ipa_mhi_suspend);
-/**
- * ipa_mhi_resume() - Resume MHI accelerated channels
- *
- * This function is called by MHI client driver on MHI resume.
- * This function is called after MHI channel was suspended.
- * When this function returns device can move to M0 state.
- * This function is doing the following:
- * - Send command to uC to resume corresponding MHI channel
- * - Request MHI_PROD in IPA RM
- * - Resume data to IPA
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa_mhi_resume(void)
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client);
+
+ return ret;
+}
+
+int ipa_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req);
+
+ return ret;
+}
+
+int ipa_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req);
+
+ return ret;
+}
+
+int ipa_generate_tag_process(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_generate_tag_process);
+
+ return ret;
+}
+
+int ipa_disable_sps_pipe(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client);
+
+ return ret;
+}
+
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client);
+
+ return ret;
+}
+
+int ipa_mhi_start_channel_internal(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client);
+
+ return ret;
+}
+
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb);
+}
+
+void ipa_set_tag_process_before_gating(bool val)
+{
+ IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val);
+}
+
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info);
+
+ return ret;
+}
+
+int ipa_uc_mhi_suspend_channel(int channelHandle)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle);
+
+ return ret;
+}
+
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel,
+ channelHandle);
+
+ return ret;
+}
+
+bool ipa_has_open_aggr_frame(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client);
+
+ return ret;
+}
+
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client,
+ LPTransitionRejected, brstmode_enabled, ch_scratch,
+ index);
+
+ return ret;
+}
+
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_mhi_resume);
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info,
+ cmd);
+
+ return ret;
+}
+
+int ipa_mhi_destroy_channel(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client);
+
+ return ret;
+}
+
+int ipa_uc_mhi_init(void (*ready_cb)(void),
+ void (*wakeup_request_cb)(void))
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb);
+
+ return ret;
+}
+
+void ipa_uc_mhi_cleanup(void)
+{
+ IPA_API_DISPATCH(ipa_uc_mhi_cleanup);
+}
+
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size);
return ret;
}
-EXPORT_SYMBOL(ipa_mhi_resume);
/**
- * ipa_mhi_destroy() - Destroy MHI IPA
+ * ipa_uc_state_check() - Check the status of the uC interface
*
- * This function is called by MHI client driver on MHI reset to destroy all IPA
- * MHI resources.
+ * Return value: 0 if the uC is loaded, interface is initialized
+ * and there was no recent failure in one of the commands.
+ * A negative value is returned otherwise.
*/
-void ipa_mhi_destroy(void)
+int ipa_uc_state_check(void)
{
- IPA_API_DISPATCH(ipa_mhi_destroy);
+ int ret;
+ IPA_API_DISPATCH_RETURN(ipa_uc_state_check);
+
+ return ret;
}
-EXPORT_SYMBOL(ipa_mhi_destroy);
int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
{
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 054b3654e9dc..e3fa4144cb84 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/ipa_mhi.h>
#include "ipa_common_i.h"
#ifndef _IPA_API_H_
@@ -214,20 +215,68 @@ struct ipa_api_controller {
void (*ipa_dma_destroy)(void);
- int (*ipa_mhi_init)(struct ipa_mhi_init_params *params);
+ bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client);
- int (*ipa_mhi_start)(struct ipa_mhi_start_params *params);
+ int (*ipa_generate_tag_process)(void);
- int (*ipa_mhi_connect_pipe)(struct ipa_mhi_connect_params *in,
+ int (*ipa_disable_sps_pipe)(enum ipa_client_type client);
+
+ void (*ipa_set_tag_process_before_gating)(bool val);
+
+ int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params);
+
+ int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in,
u32 *clnt_hdl);
- int (*ipa_mhi_disconnect_pipe)(u32 clnt_hdl);
+ int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl);
+
+ bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client);
+
+ int (*ipa_qmi_disable_force_clear)(u32 request_id);
+
+ int (*ipa_qmi_enable_force_clear_datapath_send)(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+ int (*ipa_qmi_disable_force_clear_datapath_send)(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+ bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client);
+
+ int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client);
+
+ int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client);
+
+ void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb);
+
+ int (*ipa_mhi_query_ch_info)(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
+
+ int (*ipa_mhi_resume_channels_internal)(
+ enum ipa_client_type client,
+ bool LPTransitionRejected,
+ bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch,
+ u8 index);
+
+ int (*ipa_mhi_destroy_channel)(enum ipa_client_type client);
+
+ int (*ipa_uc_mhi_send_dl_ul_sync_info)
+ (union IpaHwMhiDlUlSyncCmdData_t *cmd);
+
+ int (*ipa_uc_mhi_init)
+ (void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+
+ void (*ipa_uc_mhi_cleanup)(void);
+
+ int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size);
+
+ int (*ipa_uc_mhi_reset_channel)(int channelHandle);
- int (*ipa_mhi_suspend)(bool force);
+ int (*ipa_uc_mhi_suspend_channel)(int channelHandle);
- int (*ipa_mhi_resume)(void);
+ int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle);
- void (*ipa_mhi_destroy)(void);
+ int (*ipa_uc_state_check)(void);
int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in);
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index 26b45a6c2b42..aac473f62751 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o
-obj-$(CONFIG_IPA) += odu_bridge.o \ No newline at end of file
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o \ No newline at end of file
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
new file mode 100644
index 000000000000..b5f84bdafea8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -0,0 +1,2589 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi_client"
+#define IPA_MHI_DBG(fmt, args...) \
+ pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define IPA_MHI_ERR(fmt, args...) \
+ pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPA_MHI_FUNC_ENTRY() \
+ IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+ IPA_MHI_DBG("EXIT\n")
+
+#define IPA_MHI_RM_TIMEOUT_MSEC 10000
+#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
+
+#define IPA_MHI_SUSPEND_SLEEP_MIN 900
+#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+#define IPA_MHI_GSI_ER_START 10
+#define IPA_MHI_GSI_ER_END 16
+
+#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
+ (IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
+#error not enought event rings for MHI
+#endif
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
+ ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa_mhi_rm_state {
+ IPA_MHI_RM_STATE_RELEASED,
+ IPA_MHI_RM_STATE_REQUESTED,
+ IPA_MHI_RM_STATE_GRANTED,
+ IPA_MHI_RM_STATE_MAX
+};
+
+enum ipa_mhi_state {
+ IPA_MHI_STATE_INITIALIZED,
+ IPA_MHI_STATE_READY,
+ IPA_MHI_STATE_STARTED,
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
+ IPA_MHI_STATE_SUSPENDED,
+ IPA_MHI_STATE_RESUME_IN_PROGRESS,
+ IPA_MHI_STATE_MAX
+};
+
+static char *ipa_mhi_state_str[] = {
+ __stringify(IPA_MHI_STATE_INITIALIZED),
+ __stringify(IPA_MHI_STATE_READY),
+ __stringify(IPA_MHI_STATE_STARTED),
+ __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
+ __stringify(IPA_MHI_STATE_SUSPENDED),
+ __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
+};
+
+#define MHI_STATE_STR(state) \
+ (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
+ ipa_mhi_state_str[(state)] : \
+ "INVALID")
+
+enum ipa_mhi_dma_dir {
+ IPA_MHI_DMA_TO_HOST,
+ IPA_MHI_DMA_FROM_HOST,
+};
+
+/**
+ * struct ipa_mhi_channel_ctx - MHI Channel context
+ * @valid: entry is valid
+ * @id: MHI channel ID
+ * @hdl: channel handle for uC
+ * @client: IPA Client
+ * @state: Channel state
+ */
+struct ipa_mhi_channel_ctx {
+ bool valid;
+ u8 id;
+ u8 index;
+ enum ipa_client_type client;
+ enum ipa_hw_mhi_channel_states state;
+ bool stop_in_proc;
+ struct gsi_chan_info ch_info;
+ u64 channel_context_addr;
+ struct ipa_mhi_ch_ctx ch_ctx_host;
+ u64 event_context_addr;
+ struct ipa_mhi_ev_ctx ev_ctx_host;
+ bool brstmode_enabled;
+ union __packed gsi_channel_scratch ch_scratch;
+ unsigned long cached_gsi_evt_ring_hdl;
+};
+
+struct ipa_mhi_client_ctx {
+ enum ipa_mhi_state state;
+ spinlock_t state_lock;
+ mhi_client_cb cb_notify;
+ void *cb_priv;
+ struct completion rm_prod_granted_comp;
+ enum ipa_mhi_rm_state rm_cons_state;
+ struct completion rm_cons_comp;
+ bool trigger_wakeup;
+ bool wakeup_notified;
+ struct workqueue_struct *wq;
+ struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
+ struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
+ u32 total_channels;
+ struct ipa_mhi_msi_info msi;
+ u32 mmio_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u64 channel_context_array_addr;
+ u64 event_context_array_addr;
+ u32 qmi_req_id;
+ u32 use_ipadma;
+ bool assert_bit40;
+ bool test_mode;
+};
+
+static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MHI_MAX_MSG_LEN 512
+static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
+static struct dentry *dent;
+
+static char *ipa_mhi_channel_state_str[] = {
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
+};
+
+#define MHI_CH_STATE_STR(state) \
+ (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
+ ipa_mhi_channel_state_str[(state)] : \
+ "INVALID")
+
+static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
+ u64 host_addr, int size)
+{
+ struct ipa_mem_buffer mem;
+ int res;
+ struct device *pdev;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (ipa_mhi_client_ctx->use_ipadma) {
+ pdev = ipa_get_dma_dev();
+ host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
+
+ mem.size = size;
+ mem.base = dma_alloc_coherent(pdev, mem.size,
+ &mem.phys_base, GFP_KERNEL);
+ if (!mem.base) {
+ IPA_MHI_ERR(
+ "dma_alloc_coherent failed, DMA buff size %d\n"
+ , mem.size);
+ return -ENOMEM;
+ }
+
+ if (dir == IPA_MHI_DMA_FROM_HOST) {
+ res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
+ size);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_dma_sync_memcpy from host fail%d\n"
+ , res);
+ goto fail_memcopy;
+ }
+ memcpy(dev_addr, mem.base, size);
+ } else {
+ memcpy(mem.base, dev_addr, size);
+ res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
+ size);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_dma_sync_memcpy to host fail %d\n"
+ , res);
+ goto fail_memcopy;
+ }
+ }
+ dma_free_coherent(pdev, mem.size, mem.base,
+ mem.phys_base);
+ } else {
+ void *host_ptr;
+
+ if (!ipa_mhi_client_ctx->test_mode)
+ host_ptr = ioremap(host_addr, size);
+ else
+ host_ptr = phys_to_virt(host_addr);
+ if (!host_ptr) {
+ IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
+ return -EFAULT;
+ }
+ if (dir == IPA_MHI_DMA_FROM_HOST)
+ memcpy(dev_addr, host_ptr, size);
+ else
+ memcpy(host_ptr, dev_addr, size);
+ if (!ipa_mhi_client_ctx->test_mode)
+ iounmap(host_ptr);
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_memcopy:
+ dma_free_coherent(ipa_get_dma_dev(), mem.size, mem.base,
+ mem.phys_base);
+ return res;
+}
+
+static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
+ char *buff, int len)
+{
+ int nbytes = 0;
+
+ if (channel->valid) {
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ "channel idx=%d ch_id=%d client=%d state=%s\n",
+ channel->index, channel->id, channel->client,
+ MHI_CH_STATE_STR(channel->state));
+
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ " ch_ctx=%llx\n",
+ channel->channel_context_addr);
+
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
+ channel->cached_gsi_evt_ring_hdl,
+ channel->event_context_addr);
+ }
+ return nbytes;
+}
+
+static int ipa_mhi_print_host_channel_ctx_info(
+ struct ipa_mhi_channel_ctx *channel, char *buff, int len)
+{
+ int res, nbytes = 0;
+ struct ipa_mhi_ch_ctx ch_ctx_host;
+
+ memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
+
+ /* reading ch context from host */
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &ch_ctx_host, channel->channel_context_addr,
+ sizeof(ch_ctx_host));
+ if (res) {
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "Failed to read from host %d\n", res);
+ return nbytes;
+ }
+
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "ch_id: %d\n", channel->id);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "chstate: 0x%x\n", ch_ctx_host.chstate);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "brstmode: 0x%x\n", ch_ctx_host.brstmode);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "chtype: 0x%x\n", ch_ctx_host.chtype);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "erindex: 0x%x\n", ch_ctx_host.erindex);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rbase: 0x%llx\n", ch_ctx_host.rbase);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rlen: 0x%llx\n", ch_ctx_host.rlen);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rp: 0x%llx\n", ch_ctx_host.rp);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "wp: 0x%llx\n", ch_ctx_host.wp);
+
+ return nbytes;
+}
+
+static ssize_t ipa_mhi_debugfs_stats(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int i;
+ struct ipa_mhi_channel_ctx *channel;
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "IPA MHI state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ nbytes += ipa_mhi_print_channel_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ nbytes += ipa_mhi_print_channel_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+
+ nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int i, nbytes = 0;
+ struct ipa_mhi_channel_ctx *channel;
+
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
+ ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "Cannot dump host channel context ");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "before IPA MHI was STARTED\n");
+ return simple_read_from_buffer(ubuf, count, ppos,
+ dbg_buff, nbytes);
+ }
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "IPA MHI is suspended, cannot dump channel ctx array");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ " from host -PCIe can be in D3 state\n");
+ return simple_read_from_buffer(ubuf, count, ppos,
+ dbg_buff, nbytes);
+ }
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "channel contex array - dump from host\n");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "***** UL channels *******\n");
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+ nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+ &dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "\n***** DL channels *******\n");
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ if (!channel->valid)
+ continue;
+ nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations ipa_mhi_stats_ops = {
+ .read = ipa_mhi_debugfs_stats,
+};
+
+const struct file_operations ipa_mhi_uc_stats_ops = {
+ .read = ipa_mhi_debugfs_uc_stats,
+};
+
+const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
+ .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
+};
+
+
+static void ipa_mhi_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP;
+ struct dentry *file;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ dent = debugfs_create_dir("ipa_mhi", 0);
+ if (IS_ERR(dent)) {
+ IPA_MHI_ERR("fail to create folder ipa_mhi\n");
+ return;
+ }
+
+ file = debugfs_create_file("stats", read_only_mode, dent,
+ 0, &ipa_mhi_stats_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file stats\n");
+ goto fail;
+ }
+
+ file = debugfs_create_file("uc_stats", read_only_mode, dent,
+ 0, &ipa_mhi_uc_stats_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file uc_stats\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
+ &ipa_mhi_client_ctx->use_ipadma);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file use_ipadma\n");
+ goto fail;
+ }
+
+ file = debugfs_create_file("dump_host_channel_ctx_array",
+ read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
+ goto fail;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+#else
+static void ipa_mhi_debugfs_init(void) {}
+static void ipa_mhi_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
+
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
+
+static void ipa_mhi_wq_notify_ready(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
+
+/**
+ * ipa_mhi_notify_wakeup() - Schedule work to notify data available
+ *
+ * This function will schedule a work to notify data available event.
+ * In case this function is called more than once, only one notification will
+ * be sent to MHI client driver. No further notifications will be sent until
+ * IPA MHI state will become STARTED.
+ */
+static void ipa_mhi_notify_wakeup(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_mhi_client_ctx->wakeup_notified) {
+ IPA_MHI_DBG("wakeup already called\n");
+ return;
+ }
+ queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
+ ipa_mhi_client_ctx->wakeup_notified = true;
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
+ *
+ * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
+ * In case IPA MHI is suspended, MHI CONS will be granted after resume.
+ */
+static int ipa_mhi_rm_cons_request(void)
+{
+ unsigned long flags;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+ res = 0;
+ } else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+ ipa_mhi_notify_wakeup();
+ res = -EINPROGRESS;
+ } else if (ipa_mhi_client_ctx->state ==
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
+ /* wakeup event will be trigger after suspend finishes */
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+ res = -EINPROGRESS;
+ } else {
+ res = -EINPROGRESS;
+ }
+
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_DBG("EXIT with %d\n", res);
+ return res;
+}
+
+static int ipa_mhi_rm_cons_release(void)
+{
+ unsigned long flags;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+ complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
+ complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
+ break;
+
+ default:
+ IPA_MHI_ERR("unexpected event %d\n", event);
+ WARN_ON(1);
+ break;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on data available event.
+ */
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+ IPA_MHI_EVENT_DATA_AVAILABLE, 0);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on ready event when IPA uC is loaded
+ */
+static void ipa_mhi_wq_notify_ready(struct work_struct *work)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+ IPA_MHI_EVENT_READY, 0);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_notify_ready() - Schedule work to notify ready
+ *
+ * This function will schedule a work to notify ready event.
+ */
+static void ipa_mhi_notify_ready(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_set_state() - Set new state to IPA MHI
+ * @state: new state
+ *
+ * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
+ * In some state transitions a wakeup request will be triggered.
+ *
+ * Returns: 0 on success, -1 otherwise
+ */
+static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
+{
+ unsigned long flags;
+ int res = -EPERM;
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_DBG("Current state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+ switch (ipa_mhi_client_ctx->state) {
+ case IPA_MHI_STATE_INITIALIZED:
+ if (new_state == IPA_MHI_STATE_READY) {
+ ipa_mhi_notify_ready();
+ res = 0;
+ }
+ break;
+
+ case IPA_MHI_STATE_READY:
+ if (new_state == IPA_MHI_STATE_READY)
+ res = 0;
+ if (new_state == IPA_MHI_STATE_STARTED)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_STARTED:
+ if (new_state == IPA_MHI_STATE_INITIALIZED)
+ res = 0;
+ else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
+ if (new_state == IPA_MHI_STATE_SUSPENDED) {
+ if (ipa_mhi_client_ctx->trigger_wakeup) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_notify_wakeup();
+ }
+ res = 0;
+ } else if (new_state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->wakeup_notified = false;
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ if (ipa_mhi_client_ctx->rm_cons_state ==
+ IPA_MHI_RM_STATE_REQUESTED) {
+ ipa_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state =
+ IPA_MHI_RM_STATE_GRANTED;
+ }
+ res = 0;
+ }
+ break;
+
+ case IPA_MHI_STATE_SUSPENDED:
+ if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_RESUME_IN_PROGRESS:
+ if (new_state == IPA_MHI_STATE_SUSPENDED) {
+ if (ipa_mhi_client_ctx->trigger_wakeup) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_notify_wakeup();
+ }
+ res = 0;
+ } else if (new_state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_client_ctx->wakeup_notified = false;
+ if (ipa_mhi_client_ctx->rm_cons_state ==
+ IPA_MHI_RM_STATE_REQUESTED) {
+ ipa_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state =
+ IPA_MHI_RM_STATE_GRANTED;
+ }
+ res = 0;
+ }
+ break;
+
+ default:
+ IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
+ WARN_ON(1);
+ }
+
+ if (res)
+ IPA_MHI_ERR("Invalid state change to %s\n",
+ MHI_STATE_STR(new_state));
+ else {
+ IPA_MHI_DBG("New state change to %s\n",
+ MHI_STATE_STR(new_state));
+ ipa_mhi_client_ctx->state = new_state;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return res;
+}
+
+static void ipa_mhi_uc_ready_cb(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_set_state(IPA_MHI_STATE_READY);
+ IPA_MHI_FUNC_EXIT();
+}
+
+static void ipa_mhi_uc_wakeup_request_cb(void)
+{
+ unsigned long flags;
+
+ IPA_MHI_FUNC_ENTRY();
+ IPA_MHI_DBG("MHI state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
+ ipa_mhi_notify_wakeup();
+ else if (ipa_mhi_client_ctx->state ==
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+ /* wakeup event will be triggered after suspend finishes */
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_FUNC_EXIT();
+}
+
+static int ipa_mhi_request_prod(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ IPA_MHI_DBG("requesting mhi prod\n");
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+ if (res) {
+ if (res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to request mhi prod %d\n", res);
+ return res;
+ }
+ res = wait_for_completion_timeout(
+ &ipa_mhi_client_ctx->rm_prod_granted_comp,
+ msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+ if (res == 0) {
+ IPA_MHI_ERR("timeout request mhi prod\n");
+ return -ETIME;
+ }
+ }
+
+ IPA_MHI_DBG("mhi prod granted\n");
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+}
+
+static int ipa_mhi_release_prod(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
+
+ IPA_MHI_FUNC_EXIT();
+ return res;
+
+}
+
+/**
+ * ipa_mhi_start() - Start IPA MHI engine
+ * @params: pcie addresses for MHI
+ *
+ * This function is called by MHI client driver on MHI engine start for
+ * handling MHI accelerated channels. This function is called after
+ * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
+ * engine. When this function returns device can move to M0 state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+ int res;
+ struct ipa_mhi_init_engine init_params;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("not initialized\n");
+ return -EPERM;
+ }
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
+ return res;
+ }
+
+ ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
+ ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
+ ipa_mhi_client_ctx->channel_context_array_addr =
+ params->channel_context_array_addr;
+ ipa_mhi_client_ctx->event_context_array_addr =
+ params->event_context_array_addr;
+ IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
+ ipa_mhi_client_ctx->host_ctrl_addr);
+ IPA_MHI_DBG("host_data_addr 0x%x\n",
+ ipa_mhi_client_ctx->host_data_addr);
+ IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
+ ipa_mhi_client_ctx->channel_context_array_addr);
+ IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
+ ipa_mhi_client_ctx->event_context_array_addr);
+
+ /* Add MHI <-> Q6 dependencies to IPA RM */
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res && res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to add dependency %d\n", res);
+ goto fail_add_mhi_q6_dep;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+ if (res && res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to add dependency %d\n", res);
+ goto fail_add_q6_mhi_dep;
+ }
+
+ res = ipa_mhi_request_prod();
+ if (res) {
+ IPA_MHI_ERR("failed request prod %d\n", res);
+ goto fail_request_prod;
+ }
+
+ /* gsi params */
+ init_params.gsi.first_ch_idx =
+ ipa_mhi_client_ctx->first_ch_idx;
+ /* uC params */
+ init_params.uC.first_ch_idx =
+ ipa_mhi_client_ctx->first_ch_idx;
+ init_params.uC.first_er_idx =
+ ipa_mhi_client_ctx->first_er_idx;
+ init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
+ init_params.uC.host_data_addr = params->host_data_addr;
+ init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
+ init_params.uC.msi = &ipa_mhi_client_ctx->msi;
+ init_params.uC.ipa_cached_dl_ul_sync_info =
+ &ipa_cached_dl_ul_sync_info;
+
+ res = ipa_mhi_init_engine(&init_params);
+ if (res) {
+ IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
+ goto fail_init_engine;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_init_engine:
+ ipa_mhi_release_prod();
+fail_request_prod:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+fail_add_q6_mhi_dep:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+fail_add_mhi_q6_dep:
+ ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
+ return res;
+}
+
+/**
+ * ipa_mhi_get_channel_context() - Get corresponding channel context
+ * @ep: IPA ep
+ * @channel_id: Channel ID
+ *
+ * This function will return the corresponding channel context or allocate new
+ * one in case channel context for channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
+ enum ipa_client_type client, u8 channel_id)
+{
+ int ch_idx;
+ struct ipa_mhi_channel_ctx *channels;
+ int max_channels;
+
+ if (IPA_CLIENT_IS_PROD(client)) {
+ channels = ipa_mhi_client_ctx->ul_channels;
+ max_channels = IPA_MHI_MAX_UL_CHANNELS;
+ } else {
+ channels = ipa_mhi_client_ctx->dl_channels;
+ max_channels = IPA_MHI_MAX_DL_CHANNELS;
+ }
+
+ /* find the channel context according to channel id */
+ for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+ if (channels[ch_idx].valid &&
+ channels[ch_idx].id == channel_id)
+ return &channels[ch_idx];
+ }
+
+ /* channel context does not exists, allocate a new one */
+ for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+ if (!channels[ch_idx].valid)
+ break;
+ }
+
+ if (ch_idx == max_channels) {
+ IPA_MHI_ERR("no more channels available\n");
+ return NULL;
+ }
+
+ channels[ch_idx].valid = true;
+ channels[ch_idx].id = channel_id;
+ channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
+ channels[ch_idx].client = client;
+ channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
+
+ return &channels[ch_idx];
+}
+
+/**
+ * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
+ * context
+ * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
+ *
+ * This function will return the corresponding channel context or NULL in case
+ * that channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
+ u32 clnt_hdl)
+{
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
+ if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
+ ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->ul_channels[ch_idx].client)
+ == clnt_hdl)
+ return &ipa_mhi_client_ctx->ul_channels[ch_idx];
+ }
+
+ for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
+ if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
+ ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->dl_channels[ch_idx].client)
+ == clnt_hdl)
+ return &ipa_mhi_client_ctx->dl_channels[ch_idx];
+ }
+
+ return NULL;
+}
+
+static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_DBG("ch_id %d\n", channel->id);
+ IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
+ IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
+ IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
+ IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
+ IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
+ IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
+ IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
+ IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
+ IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
+}
+
+static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
+ channel->ch_ctx_host.erindex);
+
+ IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
+ IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
+ IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
+ IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
+ IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
+ IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
+ IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
+ IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
+}
+
+static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &channel->ch_ctx_host, channel->channel_context_addr,
+ sizeof(channel->ch_ctx_host));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+
+ }
+ ipa_mhi_dump_ch_ctx(channel);
+
+ channel->event_context_addr =
+ ipa_mhi_client_ctx->event_context_array_addr +
+ channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
+ IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
+ channel->event_context_addr);
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &channel->ev_ctx_host, channel->event_context_addr,
+ sizeof(channel->ev_ctx_host));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+
+ }
+ ipa_mhi_dump_ev_ctx(channel);
+
+ return 0;
+}
+
+static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
+{
+ struct ipa_mhi_channel_ctx *channel = notify->user_data;
+
+ IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+ channel->id, channel->client, channel->state);
+ switch (notify->evt_id) {
+ case GSI_EVT_OUT_OF_BUFFERS_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_EVT_OUT_OF_RESOURCES_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_EVT_EVT_RING_EMPTY_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
+ break;
+ default:
+ IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+ IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
+{
+ struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
+
+ IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+ channel->id, channel->client, channel->state);
+ switch (notify->evt_id) {
+ case GSI_CHAN_INVALID_TRE_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+ break;
+ case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_CHAN_HWO_1_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
+ break;
+ default:
+ IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+ IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+
+static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!channel->stop_in_proc) {
+ IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
+ return true;
+ }
+
+ if (ipa_mhi_stop_gsi_channel(channel->client) == true) {
+ channel->stop_in_proc = false;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
+ * @msecs: timeout to wait
+ *
+ * This function will poll until there are no packets pending in uplink channels
+ * or timeout occurred.
+ *
+ * Return code: true - no pending packets in uplink channels
+ * false - timeout occurred
+ */
+static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
+{
+ unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
+ unsigned long jiffies_start = jiffies;
+ bool empty = false;
+ int i;
+
+ IPA_MHI_FUNC_ENTRY();
+ while (!empty) {
+ empty = true;
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+ continue;
+ if (ipa_get_transport_type() ==
+ IPA_TRANSPORT_TYPE_GSI)
+ empty &= ipa_mhi_gsi_channel_empty(
+ &ipa_mhi_client_ctx->ul_channels[i]);
+ else
+ empty &= ipa_mhi_sps_channel_empty(
+ ipa_mhi_client_ctx->ul_channels[i].client);
+ }
+
+ if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+ IPA_MHI_DBG("finished waiting for UL empty\n");
+ break;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
+ IPA_MHI_MAX_UL_CHANNELS == 1)
+ usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+ }
+
+ IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
+
+ IPA_MHI_FUNC_EXIT();
+ return empty;
+}
+
+static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
+{
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+ int i;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ req.source_pipe_bitmask = 0;
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+ continue;
+ req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->ul_channels[i].client);
+ }
+ if (throttle_source) {
+ req.throttle_source_valid = 1;
+ req.throttle_source = 1;
+ }
+ IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
+ req.request_id, req.source_pipe_bitmask,
+ req.throttle_source);
+ res = ipa_qmi_enable_force_clear_datapath_send(&req);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_qmi_enable_force_clear_datapath_send failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_disable_force_clear(u32 request_id)
+{
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
+ res = ipa_qmi_disable_force_clear_datapath_send(&req);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_qmi_disable_force_clear_datapath_send failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_set_holb_on_dl_channels(bool enable,
+ struct ipa_ep_cfg_holb old_holb[])
+{
+ int i;
+ struct ipa_ep_cfg_holb ep_holb;
+ int ep_idx;
+ int res;
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->dl_channels[i].valid)
+ continue;
+ if (ipa_mhi_client_ctx->dl_channels[i].state ==
+ IPA_HW_MHI_CHANNEL_STATE_INVALID)
+ continue;
+ ep_idx = ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->dl_channels[i].client);
+ if (-1 == ep_idx) {
+ IPA_MHI_ERR("Client %u is not mapped\n",
+ ipa_mhi_client_ctx->dl_channels[i].client);
+ ipa_assert();
+ return;
+ }
+ memset(&ep_holb, 0, sizeof(ep_holb));
+ if (enable) {
+ ipa_get_holb(ep_idx, &old_holb[i]);
+ ep_holb.en = 1;
+ ep_holb.tmr_val = 0;
+ } else {
+ ep_holb = old_holb[i];
+ }
+ res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
+ if (res) {
+ IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
+ ipa_assert();
+ return;
+ }
+ }
+}
+
+static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int clnt_hdl;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ clnt_hdl = ipa_get_ep_mapping(channel->client);
+ if (clnt_hdl < 0)
+ return -EFAULT;
+
+ res = ipa_stop_gsi_channel(clnt_hdl);
+ if (res != 0 && res != -GSI_STATUS_AGAIN &&
+ res != -GSI_STATUS_TIMED_OUT) {
+ IPA_MHI_ERR("GSI stop channel failed %d\n", res);
+ return -EFAULT;
+ }
+
+ /* check if channel was stopped completely */
+ if (res)
+ channel->stop_in_proc = true;
+
+ IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
+ "STOP_IN_PROC" : "STOP");
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+ bool empty;
+ struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
+
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_suspend_gsi_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
+ res);
+ return res;
+ }
+ } else {
+ res = ipa_uc_mhi_reset_channel(channel->index);
+ if (res) {
+ IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+ res);
+ return res;
+ }
+ }
+
+ empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+ if (!empty) {
+ IPA_MHI_DBG("%s not empty\n",
+ (ipa_get_transport_type() ==
+ IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
+ res = ipa_mhi_enable_force_clear(
+ ipa_mhi_client_ctx->qmi_req_id, false);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
+ res);
+ ipa_assert();
+ return res;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+ IPA_MHI_DBG("empty=%d\n", empty);
+ } else {
+ /* enable packet drop on all DL channels */
+ ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
+ ipa_generate_tag_process();
+ /* disable packet drop on all DL channels */
+ ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
+
+ res = ipa_disable_sps_pipe(channel->client);
+ if (res) {
+ IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+ ipa_assert();
+ return res;
+ }
+ }
+
+ res =
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
+ res);
+ ipa_assert();
+ return res;
+ }
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_suspend_gsi_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
+ , res);
+ return res;
+ }
+
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+ } else {
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+
+ res = ipa_uc_mhi_reset_channel(channel->index);
+ if (res) {
+ IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+ res);
+ ipa_mhi_start_channel_internal(channel->client);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (IPA_CLIENT_IS_PROD(channel->client))
+ res = ipa_mhi_reset_ul_channel(channel);
+ else
+ res = ipa_mhi_reset_dl_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("failed to reset channel error %d\n", res);
+ return res;
+ }
+
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+{
+ int res;
+ unsigned long flags;
+ struct ipa_mhi_channel_ctx *channel = NULL;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!in || !clnt_hdl) {
+ IPA_MHI_ERR("NULL args\n");
+ return -EINVAL;
+ }
+
+ if (in->sys.client >= IPA_CLIENT_MAX) {
+ IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
+ IPA_MHI_ERR(
+ "Invalid MHI client, client: %d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ IPA_MHI_DBG("channel=%d\n", in->channel_id);
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (!ipa_mhi_client_ctx ||
+ ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
+ IPA_MHI_ERR("IPA MHI was not started\n");
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
+ if (!channel) {
+ IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
+ return -EINVAL;
+ }
+
+ if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
+ channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
+ return -EFAULT;
+ }
+
+ channel->channel_context_addr =
+ ipa_mhi_client_ctx->channel_context_array_addr +
+ channel->id * sizeof(struct ipa_mhi_ch_ctx);
+
+ /* for event context address index needs to read from host */
+
+ IPA_MHI_DBG("client %d channelHandle %d channelIndex %d, state %d\n",
+ channel->client, channel->index, channel->id, channel->state);
+ IPA_MHI_DBG("channel_context_addr 0x%llx\n",
+ channel->channel_context_addr);
+
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ struct ipa_mhi_connect_params_internal internal;
+
+ IPA_MHI_DBG("reading ch/ev context from host\n");
+ res = ipa_mhi_read_ch_ctx(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+ goto fail_start_channel;
+ }
+
+ internal.channel_id = in->channel_id;
+ internal.sys = &in->sys;
+ internal.start.gsi.state = channel->state;
+ internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
+ internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
+ internal.start.gsi.event_context_addr =
+ channel->event_context_addr;
+ internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
+ internal.start.gsi.channel_context_addr =
+ channel->channel_context_addr;
+ internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
+ internal.start.gsi.channel = (void *)channel;
+ internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
+ internal.start.gsi.assert_bit40 =
+ ipa_mhi_client_ctx->assert_bit40;
+ internal.start.gsi.mhi = &channel->ch_scratch.mhi;
+ internal.start.gsi.cached_gsi_evt_ring_hdl =
+ &channel->cached_gsi_evt_ring_hdl;
+ internal.start.gsi.evchid =
+ channel->index + IPA_MHI_GSI_ER_START;
+
+ res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+ goto fail_connect_pipe;
+ }
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ channel->brstmode_enabled =
+ channel->ch_scratch.mhi.burst_mode_enabled;
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(channel->state));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ return res;
+
+ }
+ } else {
+ struct ipa_mhi_connect_params_internal internal;
+
+ internal.channel_id = in->channel_id;
+ internal.sys = &in->sys;
+ internal.start.uC.index = channel->index;
+ internal.start.uC.id = channel->id;
+ internal.start.uC.state = channel->state;
+ res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+ goto fail_connect_pipe;
+ }
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ }
+
+ if (!in->sys.keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+fail_connect_pipe:
+ ipa_mhi_reset_channel(channel);
+fail_start_channel:
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ return -EPERM;
+}
+
+/**
+ * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ * - Send command to uC/GSI to reset corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+ int res;
+ enum ipa_client_type client;
+ static struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("IPA MHI was not initialized\n");
+ return -EINVAL;
+ }
+
+ client = ipa_get_client_mapping(clnt_hdl);
+
+ if (!IPA_CLIENT_IS_MHI(client)) {
+ IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
+ return -EINVAL;
+ }
+
+ channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
+ if (!channel) {
+ IPA_MHI_ERR("invalid clnt index\n");
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
+
+ res = ipa_mhi_reset_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
+ goto fail_reset_channel;
+ }
+
+ res = ipa_disconnect_mhi_pipe(clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR(
+ "IPA core driver failed to disconnect the pipe hdl %d, res %d"
+ , clnt_hdl, res);
+ return res;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+
+ IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+fail_reset_channel:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+ return res;
+}
+
+static int ipa_mhi_wait_for_cons_release(void)
+{
+ unsigned long flags;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ res = wait_for_completion_timeout(
+ &ipa_mhi_client_ctx->rm_cons_comp,
+ msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+ if (res == 0) {
+ IPA_MHI_ERR("timeout release mhi cons\n");
+ return -ETIME;
+ }
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_RUN)
+ continue;
+ IPA_MHI_DBG("suspending channel %d\n",
+ channels[i].id);
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ res = ipa_mhi_suspend_gsi_channel(
+ &channels[i]);
+ else
+ res = ipa_uc_mhi_suspend_channel(
+ channels[i].index);
+
+ if (res) {
+ IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+ i, res);
+ return res;
+ }
+ channels[i].state =
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_stop_event_update_channels(
+ struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ return 0;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+ continue;
+ IPA_MHI_DBG("stop update event channel %d\n",
+ channels[i].id);
+ res = ipa_uc_mhi_stop_event_update_channel(
+ channels[i].index);
+ if (res) {
+ IPA_MHI_ERR("failed stop event channel %d error %d\n",
+ i, res);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static bool ipa_mhi_check_pending_packets_from_host(void)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ return true;
+ }
+ res = ipa_mhi_read_ch_ctx(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+ return true;
+ }
+
+ if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
+ IPA_MHI_DBG("There are pending packets from host\n");
+ IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
+ channel->ch_info.rp, channel->ch_ctx_host.wp);
+
+ return true;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return false;
+}
+
+static int ipa_mhi_resume_channels(bool LPTransitionRejected,
+ struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+ continue;
+ channel = &channels[i];
+ IPA_MHI_DBG("resuming channel %d\n", channel->id);
+
+ res = ipa_mhi_resume_channels_internal(channel->client,
+ LPTransitionRejected, channel->brstmode_enabled,
+ channel->ch_scratch, channel->index);
+
+ if (res) {
+ IPA_MHI_ERR("failed to resume channel %d error %d\n",
+ i, res);
+ return res;
+ }
+
+ channel->stop_in_proc = false;
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
+ * @force:
+ * false: in case of data pending in IPA, MHI channels will not be
+ * suspended and function will fail.
+ * true: in case of data pending in IPA, make sure no further access from
+ * IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
+{
+ int res;
+
+ *force_clear = false;
+
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
+ goto fail_suspend_ul_channel;
+ }
+
+ *empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+ if (!*empty) {
+ if (force) {
+ res = ipa_mhi_enable_force_clear(
+ ipa_mhi_client_ctx->qmi_req_id, false);
+ if (res) {
+ IPA_MHI_ERR("failed to enable force clear\n");
+ ipa_assert();
+ return res;
+ }
+ *force_clear = true;
+ IPA_MHI_DBG("force clear datapath enabled\n");
+
+ *empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+ IPA_MHI_DBG("empty=%d\n", *empty);
+ if (!*empty && ipa_get_transport_type()
+ == IPA_TRANSPORT_TYPE_GSI) {
+ IPA_MHI_ERR("Failed to suspend UL channels\n");
+ if (ipa_mhi_client_ctx->test_mode) {
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+
+ ipa_assert();
+ }
+ } else {
+ IPA_MHI_DBG("IPA not empty\n");
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+ }
+
+ if (*force_clear) {
+ res =
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+ if (res) {
+ IPA_MHI_ERR("failed to disable force clear\n");
+ ipa_assert();
+ return res;
+ }
+ IPA_MHI_DBG("force clear datapath disabled\n");
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+
+ if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ if (ipa_mhi_check_pending_packets_from_host()) {
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+ }
+
+ res = ipa_mhi_stop_event_update_channels(
+ ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_stop_event_update_ul_channels failed %d\n",
+ res);
+ goto fail_suspend_ul_channel;
+ }
+
+ return 0;
+
+fail_suspend_ul_channel:
+ return res;
+}
+
+static bool ipa_mhi_has_open_aggr_frame(void)
+{
+ struct ipa_mhi_channel_ctx *channel;
+ int i;
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+
+ if (!channel->valid)
+ continue;
+
+ if (ipa_has_open_aggr_frame(channel->client))
+ return true;
+ }
+
+ return false;
+}
+
+static void ipa_mhi_update_host_ch_state(bool update_rp)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+
+ if (update_rp) {
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ ipa_assert();
+ return;
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->ch_info.rp,
+ channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, rp),
+ sizeof(channel->ch_info.rp));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ if (!channel->valid)
+ continue;
+
+ if (update_rp) {
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ ipa_assert();
+ return;
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->ch_info.rp,
+ channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, rp),
+ sizeof(channel->ch_info.rp));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ }
+ }
+}
+
+static int ipa_mhi_suspend_dl(bool force)
+{
+ int res;
+
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_suspend_channels for dl failed %d\n", res);
+ goto fail_suspend_dl_channel;
+ }
+
+ res = ipa_mhi_stop_event_update_channels
+ (ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
+ goto fail_stop_event_update_dl_channel;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ if (ipa_mhi_has_open_aggr_frame()) {
+ IPA_MHI_DBG("There is an open aggr frame\n");
+ if (force) {
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+ } else {
+ res = -EAGAIN;
+ goto fail_stop_event_update_dl_channel;
+ }
+ }
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ ipa_mhi_update_host_ch_state(true);
+
+fail_stop_event_update_dl_channel:
+ ipa_mhi_resume_channels(true,
+ ipa_mhi_client_ctx->dl_channels);
+fail_suspend_dl_channel:
+ return res;
+}
+
+/**
+ * ipa_mhi_suspend() - Suspend MHI accelerated channels
+ * @force:
+ * false: in case of data pending in IPA, MHI channels will not be
+ * suspended and function will fail.
+ * true: in case of data pending in IPA, make sure no further access from
+ * IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_suspend(bool force)
+{
+ int res;
+ bool empty;
+ bool force_clear;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ return res;
+ }
+ res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
+ goto fail_suspend_ul_channel;
+ }
+
+ /*
+ * hold IPA clocks and release them after all
+ * IPA RM resource are released to make sure tag process will not start
+ */
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPA_MHI_DBG("release prod\n");
+ res = ipa_mhi_release_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+ goto fail_release_prod;
+ }
+
+ IPA_MHI_DBG("wait for cons release\n");
+ res = ipa_mhi_wait_for_cons_release();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
+ goto fail_release_cons;
+ }
+
+ usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
+
+ res = ipa_mhi_suspend_dl(force);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
+ goto fail_suspend_dl_channel;
+ }
+
+ if (!empty)
+ ipa_set_tag_process_before_gating(false);
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ goto fail_release_cons;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_suspend_dl_channel:
+fail_release_cons:
+ ipa_mhi_request_prod();
+fail_release_prod:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_suspend_ul_channel:
+ ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (force_clear) {
+ if (
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
+ IPA_MHI_ERR("failed to disable force clear\n");
+ ipa_assert();
+ }
+ IPA_MHI_DBG("force clear datapath disabled\n");
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+ return res;
+}
+
+/**
+ * ipa_mhi_resume() - Resume MHI accelerated channels
+ *
+ * This function is called by MHI client driver on MHI resume.
+ * This function is called after MHI channel was suspended.
+ * When this function returns device can move to M0 state.
+ * This function is doing the following:
+ * - Send command to uC/GSI to resume corresponding MHI channel
+ * - Request MHI_PROD in IPA RM
+ * - Resume data to IPA
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_resume(void)
+{
+ int res;
+ bool dl_channel_resumed = false;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ return res;
+ }
+
+ if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
+ /* resume all DL channels */
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+ res);
+ goto fail_resume_dl_channels;
+ }
+ dl_channel_resumed = true;
+
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+ }
+
+ res = ipa_mhi_request_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
+ goto fail_request_prod;
+ }
+
+ /* resume all UL channels */
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
+ goto fail_resume_ul_channels;
+ }
+
+ if (!dl_channel_resumed) {
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+ res);
+ goto fail_resume_dl_channels2;
+ }
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ ipa_mhi_update_host_ch_state(false);
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ goto fail_set_state;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_set_state:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels2:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+fail_resume_ul_channels:
+ ipa_mhi_release_prod();
+fail_request_prod:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels:
+ ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+ return res;
+}
+
+
+static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
+ int num_of_channels)
+{
+ struct ipa_mhi_channel_ctx *channel;
+ int i, res;
+ u32 clnt_hdl;
+
+ for (i = 0; i < num_of_channels; i++) {
+ channel = &channels[i];
+ if (!channel->valid)
+ continue;
+ if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
+ continue;
+ if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ clnt_hdl = ipa_get_ep_mapping(channel->client);
+ IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
+ res = ipa_mhi_disconnect_pipe(clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR(
+ "failed to disconnect pipe %d, err %d\n"
+ , clnt_hdl, res);
+ goto fail;
+ }
+ }
+ res = ipa_mhi_destroy_channel(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_destroy_channel failed %d"
+ , res);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return res;
+}
+
+/**
+ * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
+ *
+ * This function is called by IPA MHI client driver on MHI reset to destroy all
+ * IPA MHI channels.
+ */
+int ipa_mhi_destroy_all_channels(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ /* reset all UL and DL acc channels and its accociated event rings */
+ res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
+ res);
+ return -EPERM;
+ }
+ IPA_MHI_DBG("All UL channels are disconnected\n");
+
+ res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
+ res);
+ return -EPERM;
+ }
+ IPA_MHI_DBG("All DL channels are disconnected\n");
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+
+/**
+ * ipa_mhi_destroy() - Destroy MHI IPA
+ *
+ * This function is called by MHI client driver on MHI reset to destroy all IPA
+ * MHI resources.
+ * When this function returns ipa_mhi can re-initialize.
+ */
+void ipa_mhi_destroy(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
+ return;
+ }
+ /* reset all UL and DL acc channels and its accociated event rings */
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_destroy_all_channels();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
+ res);
+ goto fail;
+ }
+ }
+ IPA_MHI_DBG("All channels are disconnected\n");
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
+ IPA_MHI_DBG("cleanup uC MHI\n");
+ ipa_uc_mhi_cleanup();
+ }
+
+
+ if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED &&
+ ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
+ IPA_MHI_DBG("release prod\n");
+ res = ipa_mhi_release_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+ goto fail;
+ }
+ IPA_MHI_DBG("wait for cons release\n");
+ res = ipa_mhi_wait_for_cons_release();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n",
+ res);
+ goto fail;
+ }
+ usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
+ IPA_MHI_SUSPEND_SLEEP_MAX);
+
+ IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+ if (res) {
+ IPA_MHI_ERR(
+ "Error deleting dependency %d->%d, res=%d\n"
+ , IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS,
+ res);
+ goto fail;
+ }
+ IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res) {
+ IPA_MHI_ERR(
+ "Error deleting dependency %d->%d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS,
+ res);
+ goto fail;
+ }
+ }
+
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+ if (res) {
+ IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_PROD, res);
+ goto fail;
+ }
+
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+ if (res) {
+ IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_CONS, res);
+ goto fail;
+ }
+
+ ipa_mhi_debugfs_destroy();
+ destroy_workqueue(ipa_mhi_client_ctx->wq);
+ kfree(ipa_mhi_client_ctx);
+ ipa_mhi_client_ctx = NULL;
+ IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
+
+ IPA_MHI_FUNC_EXIT();
+ return;
+fail:
+ ipa_assert();
+}
+
+/**
+ * ipa_mhi_init() - Initialize IPA MHI driver
+ * @params: initialization params
+ *
+ * This function is called by MHI client driver on boot to initialize IPA MHI
+ * Driver. When this function returns device can move to READY state.
+ * This function is doing the following:
+ * - Initialize MHI IPA internal data structures
+ * - Create IPA RM resources
+ * - Initialize debugfs
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+ int res;
+ struct ipa_rm_create_params mhi_prod_params;
+ struct ipa_rm_create_params mhi_cons_params;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ if (!params->notify) {
+ IPA_MHI_ERR("null notify function\n");
+ return -EINVAL;
+ }
+
+ if (ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("already initialized\n");
+ return -EPERM;
+ }
+
+ IPA_MHI_DBG("notify = %pF priv = %p\n", params->notify, params->priv);
+ IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
+ params->msi.addr_low, params->msi.addr_hi);
+ IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
+ params->msi.data, params->msi.mask);
+ IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
+ IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
+ IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
+ IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
+ IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
+
+ /* Initialize context */
+ ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("no memory\n");
+ res = -EFAULT;
+ goto fail_alloc_ctx;
+ }
+
+ ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
+ ipa_mhi_client_ctx->cb_notify = params->notify;
+ ipa_mhi_client_ctx->cb_priv = params->priv;
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+ init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ spin_lock_init(&ipa_mhi_client_ctx->state_lock);
+ init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+ ipa_mhi_client_ctx->msi = params->msi;
+ ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
+ ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
+ ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
+ ipa_mhi_client_ctx->qmi_req_id = 0;
+ ipa_mhi_client_ctx->use_ipadma = true;
+ ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
+ ipa_mhi_client_ctx->test_mode = params->test_mode;
+
+ ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
+ if (!ipa_mhi_client_ctx->wq) {
+ IPA_MHI_ERR("failed to create workqueue\n");
+ res = -EFAULT;
+ goto fail_create_wq;
+ }
+
+ /* Create PROD in IPA RM */
+ memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
+ mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
+ mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+ mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
+ res = ipa_rm_create_resource(&mhi_prod_params);
+ if (res) {
+ IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
+ goto fail_create_rm_prod;
+ }
+
+ /* Create CONS in IPA RM */
+ memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
+ mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
+ mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+ mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
+ mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
+ res = ipa_rm_create_resource(&mhi_cons_params);
+ if (res) {
+ IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
+ goto fail_create_rm_cons;
+ }
+
+ /* Initialize uC interface */
+ ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
+ ipa_mhi_uc_wakeup_request_cb);
+ if (ipa_uc_state_check() == 0)
+ ipa_mhi_set_state(IPA_MHI_STATE_READY);
+
+ /* Initialize debugfs */
+ ipa_mhi_debugfs_init();
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+fail_create_rm_prod:
+ destroy_workqueue(ipa_mhi_client_ctx->wq);
+fail_create_wq:
+ kfree(ipa_mhi_client_ctx);
+ ipa_mhi_client_ctx = NULL;
+fail_alloc_ctx:
+ return res;
+}
+
+static void ipa_mhi_cache_dl_ul_sync_info(
+ struct ipa_config_req_msg_v01 *config_req)
+{
+ ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
+ ipa_cached_dl_ul_sync_info.params.UlAccmVal =
+ (config_req->ul_accumulation_time_limit_valid) ?
+ config_req->ul_accumulation_time_limit : 0;
+ ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
+ (config_req->ul_msi_event_threshold_valid) ?
+ config_req->ul_msi_event_threshold : 0;
+ ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
+ (config_req->dl_msi_event_threshold_valid) ?
+ config_req->dl_msi_event_threshold : 0;
+}
+
+/**
+ * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
+ *
+ * This function is called by by IPA QMI service to indicate that IPA CONFIG
+ * message was sent from modem. IPA MHI will update this information to IPA uC
+ * or will cache it until IPA MHI will be initialized.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
+ ipa_mhi_cache_dl_ul_sync_info(config_req);
+ if (ipa_mhi_client_ctx &&
+ ipa_mhi_client_ctx->state !=
+ IPA_MHI_STATE_INITIALIZED)
+ ipa_uc_mhi_send_dl_ul_sync_info(
+ &ipa_cached_dl_ul_sync_info);
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI client driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index fcab2a05ffb5..b7e291c6da63 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -10,6 +10,9 @@
* GNU General Public License for more details.
*/
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
#ifndef _IPA_COMMON_I_H_
#define _IPA_COMMON_I_H_
#include <linux/ipc_logging.h>
@@ -103,6 +106,12 @@ do {\
ipa_assert();\
} while (0)
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
+
enum ipa_active_client_log_type {
EP,
SIMPLE,
@@ -118,6 +127,146 @@ struct ipa_active_client_logging_info {
enum ipa_active_client_log_type type;
};
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+ void *base;
+ dma_addr_t phys_base;
+ u32 size;
+};
+
+/**
+ * enum ipa_hw_mhi_channel_states - MHI channel state machine
+ *
+ * Values are according to MHI specification
+ * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
+ * the host or device.
+ * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
+ * initialized and configured by host, including its channel context and
+ * associated transfer ring. While this state, the channel is not active
+ * and the device does not process transfer.
+ * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
+ * for channels.
+ * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
+ * The device does not process transfers for the channel in this state.
+ * This state is typically used to synchronize the transition to low power
+ * modes.
+ * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
+ * The device does not process transfers for the channel in this state.
+ * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
+ * from the transfer ring associated with the channel.
+ * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
+ * operational scenario.
+ */
+enum ipa_hw_mhi_channel_states {
+ IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0,
+ IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1,
+ IPA_HW_MHI_CHANNEL_STATE_RUN = 2,
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3,
+ IPA_HW_MHI_CHANNEL_STATE_STOP = 4,
+ IPA_HW_MHI_CHANNEL_STATE_ERROR = 5,
+ IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ * command. Parameters are sent as 32b immediate parameters.
+ * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
+ * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
+ * for UL data)
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+union IpaHwMhiDlUlSyncCmdData_t {
+ struct IpaHwMhiDlUlSyncCmdParams_t {
+ u32 isDlUlSyncEnabled:8;
+ u32 UlAccmVal:8;
+ u32 ulMsiEventThreshold:8;
+ u32 dlMsiEventThreshold:8;
+ } params;
+ u32 raw32b;
+};
+
+struct ipa_mhi_ch_ctx {
+ u8 chstate;/*0-7*/
+ u8 brstmode:2;/*8-9*/
+ u8 pollcfg:6;/*10-15*/
+ u16 rsvd;/*16-31*/
+ u32 chtype;
+ u32 erindex;
+ u64 rbase;
+ u64 rlen;
+ u64 rp;
+ u64 wp;
+} __packed;
+
+struct ipa_mhi_ev_ctx {
+ u32 intmodc:16;
+ u32 intmodt:16;
+ u32 ertype;
+ u32 msivec;
+ u64 rbase;
+ u64 rlen;
+ u64 rp;
+ u64 wp;
+} __packed;
+
+struct ipa_mhi_init_uc_engine {
+ struct ipa_mhi_msi_info *msi;
+ u32 mmio_addr;
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info;
+};
+
+struct ipa_mhi_init_gsi_engine {
+ u32 first_ch_idx;
+};
+
+struct ipa_mhi_init_engine {
+ struct ipa_mhi_init_uc_engine uC;
+ struct ipa_mhi_init_gsi_engine gsi;
+};
+
+struct start_gsi_channel {
+ enum ipa_hw_mhi_channel_states state;
+ struct ipa_mhi_msi_info *msi;
+ struct ipa_mhi_ev_ctx *ev_ctx_host;
+ u64 event_context_addr;
+ struct ipa_mhi_ch_ctx *ch_ctx_host;
+ u64 channel_context_addr;
+ void (*ch_err_cb)(struct gsi_chan_err_notify *notify);
+ void (*ev_err_cb)(struct gsi_evt_err_notify *notify);
+ void *channel;
+ bool assert_bit40;
+ struct gsi_mhi_channel_scratch *mhi;
+ unsigned long *cached_gsi_evt_ring_hdl;
+ uint8_t evchid;
+};
+
+struct start_uc_channel {
+ enum ipa_hw_mhi_channel_states state;
+ u8 index;
+ u8 id;
+};
+
+struct start_mhi_channel {
+ struct start_uc_channel uC;
+ struct start_gsi_channel gsi;
+};
+
+struct ipa_mhi_connect_params_internal {
+ struct ipa_sys_connect_params *sys;
+ u8 channel_id;
+ struct start_mhi_channel start;
+};
+
extern const char *ipa_clients_strings[];
#define IPA_IPC_LOGGING(buf, fmt, args...) \
@@ -140,5 +289,45 @@ void *ipa_get_ipc_logbuf(void);
void *ipa_get_ipc_logbuf_low(void);
void ipa_assert(void);
+/* MHI */
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params);
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl);
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client);
+int ipa_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+int ipa_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+int ipa_generate_tag_process(void);
+int ipa_disable_sps_pipe(enum ipa_client_type client);
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client);
+int ipa_mhi_start_channel_internal(enum ipa_client_type client);
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client);
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
+int ipa_mhi_destroy_channel(enum ipa_client_type client);
+
+/* MHI uC */
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init
+ (void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa_uc_mhi_cleanup(void);
+int ipa_uc_mhi_reset_channel(int channelHandle);
+int ipa_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
+
+/* uC */
+int ipa_uc_state_check(void);
+
+/* general */
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
+void ipa_set_tag_process_before_gating(bool val);
+bool ipa_has_open_aggr_frame(enum ipa_client_type client);
#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 7d5daf202112..178ca5a8f28f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -126,8 +126,6 @@
#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
#define IPA_INVALID_L4_PROTOCOL 0xFF
-#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
-#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
(reg |= ((val) << (shift)) & (mask))
@@ -186,18 +184,6 @@ struct ipa_smmu_cb_ctx {
};
/**
- * struct ipa_mem_buffer - IPA memory buffer
- * @base: base
- * @phys_base: physical base address
- * @size: size of memory buffer
- */
-struct ipa_mem_buffer {
- void *base;
- dma_addr_t phys_base;
- u32 size;
-};
-
-/**
* struct ipa_flt_entry - IPA filtering table entry
* @link: entry's link in global filtering enrties list
* @rule: filter rule
@@ -1012,58 +998,6 @@ enum ipa_hw_flags {
};
/**
- * enum ipa_hw_mhi_channel_states - MHI channel state machine
- *
- * Values are according to MHI specification
- * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
- * the host or device.
- * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
- * initialized and configured by host, including its channel context and
- * associated transfer ring. While this state, the channel is not active
- * and the device does not process transfer.
- * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
- * for channels.
- * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
- * The device does not process transfers for the channel in this state.
- * This state is typically used to synchronize the transition to low power
- * modes.
- * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
- * The device does not process transfers for the channel in this state.
- * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
- * from the transfer ring associated with the channel.
- * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
- * operational scenario.
- */
-enum ipa_hw_mhi_channel_states {
- IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0,
- IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1,
- IPA_HW_MHI_CHANNEL_STATE_RUN = 2,
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3,
- IPA_HW_MHI_CHANNEL_STATE_STOP = 4,
- IPA_HW_MHI_CHANNEL_STATE_ERROR = 5,
- IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF
-};
-
-/**
- * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
- * command. Parameters are sent as 32b immediate parameters.
- * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
- * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
- * for UL data)
- * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
- * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
- */
-union IpaHwMhiDlUlSyncCmdData_t {
- struct IpaHwMhiDlUlSyncCmdParams_t {
- u32 isDlUlSyncEnabled:8;
- u32 UlAccmVal:8;
- u32 ulMsiEventThreshold:8;
- u32 dlMsiEventThreshold:8;
- } params;
- u32 raw32b;
-};
-
-/**
* struct ipa_uc_ctx - IPA uC context
* @uc_inited: Indicates if uC interface has been initialized
* @uc_loaded: Indicates if uC has loaded
@@ -1720,21 +1654,30 @@ int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
void ipa2_dma_destroy(void);
/*
- * MHI
+ * MHI APIs for IPA MHI client driver
*/
-int ipa2_mhi_init(struct ipa_mhi_init_params *params);
+int ipa2_init_mhi(struct ipa_mhi_init_params *params);
-int ipa2_mhi_start(struct ipa_mhi_start_params *params);
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params);
-int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
-int ipa2_mhi_disconnect_pipe(u32 clnt_hdl);
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl);
-int ipa2_mhi_suspend(bool force);
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client);
-int ipa2_mhi_resume(void);
+int ipa2_disable_sps_pipe(enum ipa_client_type client);
-void ipa2_mhi_destroy(void);
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_suspend_ul_channels(void);
+
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
/*
* mux id
@@ -1947,12 +1890,10 @@ int ipa_q6_monitor_holb_mitigation(bool enable);
int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
enum ipa_client_type ipa_client);
-int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
-
int ipa_uc_interface_init(void);
int ipa_uc_reset_pipe(enum ipa_client_type ipa_client);
int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable);
-int ipa_uc_state_check(void);
+int ipa2_uc_state_check(void);
int ipa_uc_loaded_check(void);
int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
bool polling_mode, unsigned long timeout_jiffies);
@@ -1966,18 +1907,19 @@ void ipa_dma_async_memcpy_notify_cb(void *priv,
int ipa_uc_update_hw_flags(u32 flags);
-int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
-int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd);
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa2_uc_mhi_cleanup(void);
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
u32 first_evt_idx);
int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
int contexArrayIndex, int channelDirection);
-int ipa_uc_mhi_reset_channel(int channelHandle);
-int ipa_uc_mhi_suspend_channel(int channelHandle);
+int ipa2_uc_mhi_reset_channel(int channelHandle);
+int ipa2_uc_mhi_suspend_channel(int channelHandle);
int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
-int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
-int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size);
int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
u32 ipa_get_num_pipes(void);
u32 ipa_get_sys_yellow_wm(void);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
index ab86bac63136..7c10c4cee150 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ipa.h>
+#include <linux/ipa_mhi.h>
#include "ipa_i.h"
#include "ipa_qmi_service.h"
@@ -30,878 +31,45 @@
#define IPA_MHI_FUNC_EXIT() \
IPA_MHI_DBG("EXIT\n")
-#define IPA_MHI_RM_TIMEOUT_MSEC 10000
-
-#define IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC 5
-
-#define IPA_MHI_MAX_UL_CHANNELS 1
-#define IPA_MHI_MAX_DL_CHANNELS 1
-
-#define IPA_MHI_SUSPEND_SLEEP_MIN 900
-#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
-
-enum ipa_mhi_state {
- IPA_MHI_STATE_INITIALIZED,
- IPA_MHI_STATE_READY,
- IPA_MHI_STATE_STARTED,
- IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
- IPA_MHI_STATE_SUSPENDED,
- IPA_MHI_STATE_RESUME_IN_PROGRESS,
- IPA_MHI_STATE_MAX
-};
-
-static char *ipa_mhi_state_str[] = {
- __stringify(IPA_MHI_STATE_INITIALIZED),
- __stringify(IPA_MHI_STATE_READY),
- __stringify(IPA_MHI_STATE_STARTED),
- __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
- __stringify(IPA_MHI_STATE_SUSPENDED),
- __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
-};
-
-#define MHI_STATE_STR(state) \
- (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
- ipa_mhi_state_str[(state)] : \
- "INVALID")
-
-/**
- * struct ipa_mhi_channel_ctx - MHI Channel context
- * @valid: entry is valid
- * @id: MHI channel ID
- * @hdl: channel handle for uC
- * @client: IPA Client
- * @state: Channel state
- */
-struct ipa_mhi_channel_ctx {
- bool valid;
- u8 id;
- u8 hdl;
- enum ipa_client_type client;
- enum ipa_hw_mhi_channel_states state;
-};
-
-enum ipa_mhi_rm_state {
- IPA_MHI_RM_STATE_RELEASED,
- IPA_MHI_RM_STATE_REQUESTED,
- IPA_MHI_RM_STATE_GRANTED,
- IPA_MHI_RM_STATE_MAX
-};
-
-/**
- * struct ipa_mhi_ctx - IPA MHI context
- * @state: IPA MHI state
- * @state_lock: lock for state read/write operations
- * @msi: Message Signaled Interrupts parameters
- * @mmio_addr: MHI MMIO physical address
- * @first_ch_idx: First channel ID for hardware accelerated channels.
- * @first_er_idx: First event ring ID for hardware accelerated channels.
- * @host_ctrl_addr: Base address of MHI control data structures
- * @host_data_addr: Base address of MHI data buffers
- * @cb_notify: client callback
- * @cb_priv: client private data to be provided in client callback
- * @ul_channels: IPA MHI uplink channel contexts
- * @dl_channels: IPA MHI downlink channel contexts
- * @total_channels: Total number of channels ever connected to IPA MHI
- * @rm_prod_granted_comp: Completion object for MHI producer resource in IPA RM
- * @rm_cons_state: MHI consumer resource state in IPA RM
- * @rm_cons_comp: Completion object for MHI consumer resource in IPA RM
- * @trigger_wakeup: trigger wakeup callback ?
- * @wakeup_notified: MHI Client wakeup function was called
- * @wq: workqueue for wakeup event
- * @qmi_req_id: QMI request unique id
- */
-struct ipa_mhi_ctx {
- enum ipa_mhi_state state;
- spinlock_t state_lock;
- struct ipa_mhi_msi_info msi;
- u32 mmio_addr;
- u32 first_ch_idx;
- u32 first_er_idx;
- u32 host_ctrl_addr;
- u32 host_data_addr;
- mhi_client_cb cb_notify;
- void *cb_priv;
- struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
- struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
- u32 total_channels;
- struct completion rm_prod_granted_comp;
- enum ipa_mhi_rm_state rm_cons_state;
- struct completion rm_cons_comp;
- bool trigger_wakeup;
- bool wakeup_notified;
- struct workqueue_struct *wq;
- u32 qmi_req_id;
-};
-
-static struct ipa_mhi_ctx *ipa_mhi_ctx;
-
-static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
-static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
-
-static void ipa_mhi_wq_notify_ready(struct work_struct *work);
-static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
-
-static union IpaHwMhiDlUlSyncCmdData_t cached_dl_ul_sync_info;
-
-#ifdef CONFIG_DEBUG_FS
-#define IPA_MHI_MAX_MSG_LEN 512
-static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
-static struct dentry *dent;
-
-static char *ipa_mhi_channel_state_str[] = {
- __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
-};
-
-#define MHI_CH_STATE_STR(state) \
- (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
- ipa_mhi_channel_state_str[(state)] : \
- "INVALID")
-
-static ssize_t ipa_mhi_debugfs_stats(struct file *file,
- char __user *ubuf,
- size_t count,
- loff_t *ppos)
-{
- int nbytes = 0;
- int i;
- struct ipa_mhi_channel_ctx *channel;
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "IPA MHI state: %s\n", MHI_STATE_STR(ipa_mhi_ctx->state));
-
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- channel = &ipa_mhi_ctx->ul_channels[i];
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "channel %d: ", i);
- if (channel->valid) {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "ch_id=%d client=%d state=%s",
- channel->id, channel->client,
- MHI_CH_STATE_STR(channel->state));
- } else {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "never connected");
- }
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes, "\n");
- }
-
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- channel = &ipa_mhi_ctx->dl_channels[i];
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "channel %d: ", i);
- if (channel->valid) {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "ch_id=%d client=%d state=%s",
- channel->id, channel->client,
- MHI_CH_STATE_STR(channel->state));
- } else {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "never connected");
- }
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes, "\n");
- }
-
- return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
-}
-
-static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
- char __user *ubuf,
- size_t count,
- loff_t *ppos)
-{
- int nbytes = 0;
-
- nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
-
- return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
-}
-
-const struct file_operations ipa_mhi_stats_ops = {
- .read = ipa_mhi_debugfs_stats,
-};
-
-const struct file_operations ipa_mhi_uc_stats_ops = {
- .read = ipa_mhi_debugfs_uc_stats,
-};
-
-static void ipa_mhi_debugfs_init(void)
-{
- const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
- struct dentry *file;
-
- IPA_MHI_FUNC_ENTRY();
-
- dent = debugfs_create_dir("ipa_mhi", 0);
- if (IS_ERR(dent)) {
- IPA_MHI_ERR("fail to create folder ipa_mhi\n");
- return;
- }
-
- file = debugfs_create_file("stats", read_only_mode, dent,
- 0, &ipa_mhi_stats_ops);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file stats\n");
- goto fail;
- }
-
- file = debugfs_create_file("uc_stats", read_only_mode, dent,
- 0, &ipa_mhi_uc_stats_ops);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file stats\n");
- goto fail;
- }
-
- IPA_MHI_FUNC_EXIT();
- return;
-fail:
- debugfs_remove_recursive(dent);
-}
-
-static void ipa_mhi_debugfs_destroy(void)
-{
- debugfs_remove_recursive(dent);
-}
-
-#else
-static void ipa_mhi_debugfs_init(void) {}
-static void ipa_mhi_debugfs_destroy(void) {}
-#endif /* CONFIG_DEBUG_FS */
-
-
-static void ipa_mhi_cache_dl_ul_sync_info(
- struct ipa_config_req_msg_v01 *config_req)
-{
- cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
- cached_dl_ul_sync_info.params.UlAccmVal =
- (config_req->ul_accumulation_time_limit_valid) ?
- config_req->ul_accumulation_time_limit : 0;
- cached_dl_ul_sync_info.params.ulMsiEventThreshold =
- (config_req->ul_msi_event_threshold_valid) ?
- config_req->ul_msi_event_threshold : 0;
- cached_dl_ul_sync_info.params.dlMsiEventThreshold =
- (config_req->dl_msi_event_threshold_valid) ?
- config_req->dl_msi_event_threshold : 0;
-}
-
-/**
- * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
- *
- * This function is called from IPA MHI workqueue to notify
- * MHI client driver on data available event.
- */
-static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa_mhi_ctx->cb_notify(ipa_mhi_ctx->cb_priv,
- IPA_MHI_EVENT_DATA_AVAILABLE, 0);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa_mhi_notify_wakeup() - Schedule work to notify data available
- *
- * This function will schedule a work to notify data available event.
- * In case this function is called more than once, only one notification will
- * be sent to MHI client driver. No further notifications will be sent until
- * IPA MHI state will become STARTED.
- */
-static void ipa_mhi_notify_wakeup(void)
-{
- IPA_MHI_FUNC_ENTRY();
- if (ipa_mhi_ctx->wakeup_notified) {
- IPADBG("wakeup already called\n");
- return;
- }
- queue_work(ipa_mhi_ctx->wq, &ipa_mhi_notify_wakeup_work);
- ipa_mhi_ctx->wakeup_notified = true;
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
- *
- * This function is called from IPA MHI workqueue to notify
- * MHI client driver on ready event when IPA uC is loaded
- */
-static void ipa_mhi_wq_notify_ready(struct work_struct *work)
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client)
{
- IPA_MHI_FUNC_ENTRY();
- ipa_mhi_ctx->cb_notify(ipa_mhi_ctx->cb_priv,
- IPA_MHI_EVENT_READY, 0);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa_mhi_notify_ready() - Schedule work to notify ready
- *
- * This function will schedule a work to notify ready event.
- */
-static void ipa_mhi_notify_ready(void)
-{
- IPA_MHI_FUNC_ENTRY();
- queue_work(ipa_mhi_ctx->wq, &ipa_mhi_notify_ready_work);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa_mhi_set_state() - Set new state to IPA MHI
- * @state: new state
- *
- * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
- * In some state transitions a wakeup request will be triggered.
- *
- * Returns: 0 on success, -1 otherwise
- */
-static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
-{
- unsigned long flags;
- int res = -EPERM;
-
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- IPA_MHI_DBG("Current state: %s\n", MHI_STATE_STR(ipa_mhi_ctx->state));
-
- switch (ipa_mhi_ctx->state) {
- case IPA_MHI_STATE_INITIALIZED:
- if (new_state == IPA_MHI_STATE_READY) {
- ipa_mhi_notify_ready();
- res = 0;
- }
- break;
-
- case IPA_MHI_STATE_READY:
- if (new_state == IPA_MHI_STATE_READY)
- res = 0;
- if (new_state == IPA_MHI_STATE_STARTED)
- res = 0;
- break;
-
- case IPA_MHI_STATE_STARTED:
- if (new_state == IPA_MHI_STATE_INITIALIZED)
- res = 0;
- else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
- res = 0;
- break;
-
- case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
- if (new_state == IPA_MHI_STATE_SUSPENDED) {
- if (ipa_mhi_ctx->trigger_wakeup) {
- ipa_mhi_ctx->trigger_wakeup = false;
- ipa_mhi_notify_wakeup();
- }
- res = 0;
- } else if (new_state == IPA_MHI_STATE_STARTED) {
- ipa_mhi_ctx->wakeup_notified = false;
- if (ipa_mhi_ctx->rm_cons_state ==
- IPA_MHI_RM_STATE_REQUESTED) {
- ipa_rm_notify_completion(
- IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa_mhi_ctx->rm_cons_state =
- IPA_MHI_RM_STATE_GRANTED;
- }
- res = 0;
- }
- break;
-
- case IPA_MHI_STATE_SUSPENDED:
- if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
- res = 0;
- break;
-
- case IPA_MHI_STATE_RESUME_IN_PROGRESS:
- if (new_state == IPA_MHI_STATE_SUSPENDED) {
- if (ipa_mhi_ctx->trigger_wakeup) {
- ipa_mhi_ctx->trigger_wakeup = false;
- ipa_mhi_notify_wakeup();
- }
- res = 0;
- } else if (new_state == IPA_MHI_STATE_STARTED) {
- ipa_mhi_ctx->wakeup_notified = false;
- if (ipa_mhi_ctx->rm_cons_state ==
- IPA_MHI_RM_STATE_REQUESTED) {
- ipa_rm_notify_completion(
- IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa_mhi_ctx->rm_cons_state =
- IPA_MHI_RM_STATE_GRANTED;
- }
- res = 0;
- }
- break;
-
- default:
- IPA_MHI_ERR("invalied state %d\n", ipa_mhi_ctx->state);
- WARN_ON(1);
- }
-
- if (res)
- IPA_MHI_ERR("Invalid state change to %s\n",
- MHI_STATE_STR(new_state));
- else {
- IPA_MHI_DBG("New state change to %s\n",
- MHI_STATE_STR(new_state));
- ipa_mhi_ctx->state = new_state;
- }
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- return res;
-}
-
-static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
- unsigned long data)
-{
- IPA_MHI_FUNC_ENTRY();
-
- switch (event) {
- case IPA_RM_RESOURCE_GRANTED:
- IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
- complete_all(&ipa_mhi_ctx->rm_prod_granted_comp);
- break;
-
- case IPA_RM_RESOURCE_RELEASED:
- IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
- break;
+ u32 pipe_idx;
+ bool pending;
- default:
- IPA_MHI_ERR("unexpected event %d\n", event);
+ pipe_idx = ipa2_get_ep_mapping(client);
+ if (sps_pipe_pending_desc(ipa_ctx->bam_handle,
+ pipe_idx, &pending)) {
+ IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
WARN_ON(1);
- break;
+ return false;
}
- IPA_MHI_FUNC_EXIT();
-}
-
-static void ipa_mhi_uc_ready_cb(void)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa_mhi_set_state(IPA_MHI_STATE_READY);
- IPA_MHI_FUNC_EXIT();
-}
-
-static void ipa_mhi_uc_wakeup_request_cb(void)
-{
- unsigned long flags;
-
- IPA_MHI_FUNC_ENTRY();
- IPA_MHI_DBG("MHI state: %s\n", MHI_STATE_STR(ipa_mhi_ctx->state));
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- if (ipa_mhi_ctx->state == IPA_MHI_STATE_SUSPENDED) {
- ipa_mhi_notify_wakeup();
- } else if (ipa_mhi_ctx->state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
- /* wakeup event will be triggered after suspend finishes */
- ipa_mhi_ctx->trigger_wakeup = true;
- }
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- IPA_MHI_FUNC_EXIT();
+ return !pending;
}
-/**
- * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
- *
- * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
- * In case IPA MHI is suspended, MHI CONS will be granted after resume.
- */
-static int ipa_mhi_rm_cons_request(void)
+int ipa2_disable_sps_pipe(enum ipa_client_type client)
{
- unsigned long flags;
+ int ipa_ep_index;
int res;
- IPA_MHI_FUNC_ENTRY();
+ ipa_ep_index = ipa2_get_ep_mapping(client);
- IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_ctx->state));
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
- if (ipa_mhi_ctx->state == IPA_MHI_STATE_STARTED) {
- ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
- res = 0;
- } else if (ipa_mhi_ctx->state == IPA_MHI_STATE_SUSPENDED) {
- ipa_mhi_notify_wakeup();
- res = -EINPROGRESS;
- } else if (ipa_mhi_ctx->state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
- /* wakeup event will be trigger after suspend finishes */
- ipa_mhi_ctx->trigger_wakeup = true;
- res = -EINPROGRESS;
- } else {
- res = -EINPROGRESS;
- }
-
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- IPA_MHI_DBG("EXIT with %d\n", res);
- return res;
-}
-
-static int ipa_mhi_rm_cons_release(void)
-{
- unsigned long flags;
-
- IPA_MHI_FUNC_ENTRY();
-
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
- complete_all(&ipa_mhi_ctx->rm_cons_comp);
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_wait_for_cons_release(void)
-{
- unsigned long flags;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- reinit_completion(&ipa_mhi_ctx->rm_cons_comp);
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- if (ipa_mhi_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
-
- res = wait_for_completion_timeout(
- &ipa_mhi_ctx->rm_cons_comp,
- msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
- if (res == 0) {
- IPA_MHI_ERR("timeout release mhi cons\n");
- return -ETIME;
- }
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_request_prod(void)
-{
- int res;
-
- IPA_MHI_FUNC_ENTRY();
-
- reinit_completion(&ipa_mhi_ctx->rm_prod_granted_comp);
- IPA_MHI_DBG("requesting mhi prod\n");
- res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+ res = sps_pipe_disable(ipa_ctx->bam_handle, ipa_ep_index);
if (res) {
- if (res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to request mhi prod %d\n", res);
- return res;
- }
- res = wait_for_completion_timeout(
- &ipa_mhi_ctx->rm_prod_granted_comp,
- msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
- if (res == 0) {
- IPA_MHI_ERR("timeout request mhi prod\n");
- return -ETIME;
- }
- }
-
- IPA_MHI_DBG("mhi prod granted\n");
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-}
-
-static int ipa_mhi_release_prod(void)
-{
- int res;
-
- IPA_MHI_FUNC_ENTRY();
-
- res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
-
- IPA_MHI_FUNC_EXIT();
- return res;
-
-}
-
-/**
- * ipa_mhi_get_channel_context() - Get corresponding channel context
- * @client: IPA client
- * @channel_id: Channel ID
- *
- * This function will return the corresponding channel context or allocate new
- * one in case channel context for channel does not exist.
- */
-static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
- enum ipa_client_type client, u8 channel_id)
-{
- int ch_idx;
- struct ipa_mhi_channel_ctx *channels;
- int max_channels;
-
- if (IPA_CLIENT_IS_PROD(client)) {
- channels = ipa_mhi_ctx->ul_channels;
- max_channels = IPA_MHI_MAX_UL_CHANNELS;
- } else {
- channels = ipa_mhi_ctx->dl_channels;
- max_channels = IPA_MHI_MAX_DL_CHANNELS;
- }
-
- /* find the channel context according to channel id */
- for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
- if (channels[ch_idx].valid &&
- channels[ch_idx].id == channel_id)
- return &channels[ch_idx];
- }
-
- /* channel context does not exists, allocate a new one */
- for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
- if (!channels[ch_idx].valid)
- break;
- }
-
- if (ch_idx == max_channels) {
- IPA_MHI_ERR("no more channels available\n");
- return NULL;
- }
-
- channels[ch_idx].valid = true;
- channels[ch_idx].id = channel_id;
- channels[ch_idx].hdl = ipa_mhi_ctx->total_channels++;
- channels[ch_idx].client = client;
- channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
-
- return &channels[ch_idx];
-}
-
-/**
- * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel context
- * @clnt_hdl: client handle as provided in ipa2_mhi_connect_pipe()
- *
- * This function will return the corresponding channel context or NULL in case
- * that channel does not exist.
- */
-static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
- u32 clnt_hdl)
-{
- int ch_idx;
-
- for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
- if (ipa_mhi_ctx->ul_channels[ch_idx].valid &&
- ipa2_get_ep_mapping(
- ipa_mhi_ctx->ul_channels[ch_idx].client) == clnt_hdl)
- return &ipa_mhi_ctx->ul_channels[ch_idx];
- }
-
- for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
- if (ipa_mhi_ctx->dl_channels[ch_idx].valid &&
- ipa2_get_ep_mapping(
- ipa_mhi_ctx->dl_channels[ch_idx].client) == clnt_hdl)
- return &ipa_mhi_ctx->dl_channels[ch_idx];
- }
-
- return NULL;
-}
-
-static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
-{
- struct ipa_enable_force_clear_datapath_req_msg_v01 req;
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- memset(&req, 0, sizeof(req));
- req.request_id = request_id;
- req.source_pipe_bitmask = 0;
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->ul_channels[i].valid)
- continue;
- req.source_pipe_bitmask |= 1 << ipa2_get_ep_mapping(
- ipa_mhi_ctx->ul_channels[i].client);
- }
- if (throttle_source) {
- req.throttle_source_valid = 1;
- req.throttle_source = 1;
- }
- IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
- req.request_id, req.source_pipe_bitmask,
- req.throttle_source);
- res = qmi_enable_force_clear_datapath_send(&req);
- if (res) {
- IPA_MHI_ERR("qmi_enable_force_clear_datapath_send failed %d\n",
- res);
- return res;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_disable_force_clear(u32 request_id)
-{
- struct ipa_disable_force_clear_datapath_req_msg_v01 req;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- memset(&req, 0, sizeof(req));
- req.request_id = request_id;
- IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
- res = qmi_disable_force_clear_datapath_send(&req);
- if (res) {
- IPA_MHI_ERR("qmi_disable_force_clear_datapath_send failed %d\n",
- res);
+ IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
return res;
}
- IPA_MHI_FUNC_EXIT();
return 0;
}
-/**
- * ipa_mhi_wait_for_bam_empty_timeout() - wait for pending packets in uplink
- * @msecs: timeout to wait
- *
- * This function will poll until there are no packets pending in uplink channels
- * or timeout occurred.
- *
- * Return code: true - no pending packets in uplink channels
- * false - timeout occurred
- */
-static bool ipa_mhi_wait_for_bam_empty_timeout(unsigned int msecs)
-{
- unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
- unsigned long jiffies_start = jiffies;
- bool empty = false;
- bool pending;
- int i;
- u32 pipe_idx;
-
- IPA_MHI_FUNC_ENTRY();
- while (!empty) {
- empty = true;
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->ul_channels[i].valid)
- continue;
- pipe_idx = ipa2_get_ep_mapping(
- ipa_mhi_ctx->ul_channels[i].client);
- if (sps_pipe_pending_desc(ipa_ctx->bam_handle,
- pipe_idx, &pending)) {
- IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
- WARN_ON(1);
- return false;
- }
- empty &= !pending;
- }
-
- if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
- IPA_MHI_DBG("timeout waiting for BAM empty\n");
- break;
- }
- }
- IPA_MHI_DBG("Bam is %s\n", (empty) ? "empty" : "not empty");
- IPA_MHI_FUNC_EXIT();
- return empty;
-}
-
-static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client)
{
int res;
- int i;
- int ep_idx;
- struct ipa_ep_cfg_holb ep_holb;
- struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
- bool empty;
IPA_MHI_FUNC_ENTRY();
- res = ipa_uc_mhi_reset_channel(channel->hdl);
- if (res) {
- IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", res);
- return res;
- }
- empty = ipa_mhi_wait_for_bam_empty_timeout(
- IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
- if (!empty) {
- IPA_MHI_DBG("BAM not empty\n");
- res = ipa_mhi_enable_force_clear(ipa_mhi_ctx->qmi_req_id,
- true);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
- res);
- BUG();
- return res;
- }
-
- /* enable packet drop on all DL channels */
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->dl_channels[i].state ==
- IPA_HW_MHI_CHANNEL_STATE_INVALID)
- continue;
- ep_idx = ipa2_get_ep_mapping(
- ipa_mhi_ctx->dl_channels[i].client);
- if (-1 == ep_idx) {
- IPA_MHI_ERR("Client %u is not mapped\n",
- ipa_mhi_ctx->dl_channels[i].client);
- BUG();
- return -EFAULT;
- }
- memset(&ep_holb, 0, sizeof(ep_holb));
- ep_holb.en = 1;
- ep_holb.tmr_val = 0;
- old_ep_holb[i] = ipa_ctx->ep[ep_idx].holb;
- res = ipa2_cfg_ep_holb(ep_idx, &ep_holb);
- if (res) {
- IPA_MHI_ERR("ipa2_cfg_ep_holb failed %d\n",
- res);
- BUG();
- return res;
- }
- }
-
- res = ipa_tag_process(NULL, 0, HZ);
- if (res)
- IPAERR("TAG process failed\n");
-
- /* disable packet drop on all DL channels */
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->dl_channels[i].state ==
- IPA_HW_MHI_CHANNEL_STATE_INVALID)
- continue;
- ep_idx = ipa2_get_ep_mapping(
- ipa_mhi_ctx->dl_channels[i].client);
- res = ipa2_cfg_ep_holb(ep_idx, &old_ep_holb[i]);
- if (res) {
- IPA_MHI_ERR("ipa2_cfg_ep_holb failed %d\n",
- res);
- BUG();
- return res;
- }
- }
-
- res = sps_pipe_disable(ipa_ctx->bam_handle,
- ipa2_get_ep_mapping(channel->client));
- if (res) {
- IPA_MHI_ERR("sps_pipe_disable failed %d\n", res);
- BUG();
- return res;
- }
-
- res = ipa_mhi_disable_force_clear(ipa_mhi_ctx->qmi_req_id);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
- res);
- BUG();
- return res;
- }
- ipa_mhi_ctx->qmi_req_id++;
- }
- res = ipa_disable_data_path(ipa2_get_ep_mapping(channel->client));
+ res = ipa_disable_data_path(ipa2_get_ep_mapping(client));
if (res) {
IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
return res;
@@ -911,187 +79,23 @@ static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
return 0;
}
-static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client)
{
int res;
IPA_MHI_FUNC_ENTRY();
- res = ipa_disable_data_path(ipa2_get_ep_mapping(channel->client));
- if (res) {
- IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
- return res;
- }
- res = ipa_uc_mhi_reset_channel(channel->hdl);
+ res = ipa_enable_data_path(ipa2_get_ep_mapping(client));
if (res) {
- IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", res);
- goto fail_reset_channel;
- }
- IPA_MHI_FUNC_EXIT();
-
- return 0;
-
-fail_reset_channel:
- ipa_enable_data_path(ipa2_get_ep_mapping(channel->client));
- return res;
-}
-
-static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
-{
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- if (IPA_CLIENT_IS_PROD(channel->client))
- res = ipa_mhi_reset_ul_channel(channel);
- else
- res = ipa_mhi_reset_dl_channel(channel);
- if (res) {
- IPA_MHI_ERR("failed to reset channel error %d\n", res);
+ IPA_MHI_ERR("ipa_enable_data_path failed %d\n", res);
return res;
}
-
- channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-/**
- * ipa2_mhi_init() - Initialize IPA MHI driver
- * @params: initialization params
- *
- * This function is called by MHI client driver on boot to initialize IPA MHI
- * Driver. When this function returns device can move to READY state.
- * This function is doing the following:
- * - Initialize MHI IPA internal data structures
- * - Create IPA RM resources
- * - Initialize debugfs
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa2_mhi_init(struct ipa_mhi_init_params *params)
-{
- int res;
- struct ipa_rm_create_params mhi_prod_params;
- struct ipa_rm_create_params mhi_cons_params;
-
- IPA_MHI_FUNC_ENTRY();
-
- if (!params) {
- IPA_MHI_ERR("null args\n");
- return -EINVAL;
- }
-
- if (!params->notify) {
- IPA_MHI_ERR("null notify function\n");
- return -EINVAL;
- }
-
- if (ipa_mhi_ctx) {
- IPA_MHI_ERR("already initialized\n");
- return -EPERM;
- }
-
- IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
- params->msi.addr_low, params->msi.addr_hi);
- IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
- params->msi.data, params->msi.mask);
- IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
- IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
- IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
- IPA_MHI_DBG("notify = %pF priv = %p\n", params->notify, params->priv);
-
- /* Initialize context */
- ipa_mhi_ctx = kzalloc(sizeof(*ipa_mhi_ctx), GFP_KERNEL);
- if (!ipa_mhi_ctx) {
- IPA_MHI_ERR("no memory\n");
- res = -EFAULT;
- goto fail_alloc_ctx;
- }
-
- ipa_mhi_ctx->state = IPA_MHI_STATE_INITIALIZED;
- ipa_mhi_ctx->msi = params->msi;
- ipa_mhi_ctx->mmio_addr = params->mmio_addr;
- ipa_mhi_ctx->first_ch_idx = params->first_ch_idx;
- ipa_mhi_ctx->first_er_idx = params->first_er_idx;
- ipa_mhi_ctx->cb_notify = params->notify;
- ipa_mhi_ctx->cb_priv = params->priv;
- ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
- ipa_mhi_ctx->qmi_req_id = 0;
- init_completion(&ipa_mhi_ctx->rm_prod_granted_comp);
- spin_lock_init(&ipa_mhi_ctx->state_lock);
- init_completion(&ipa_mhi_ctx->rm_cons_comp);
-
- ipa_mhi_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
- if (!ipa_mhi_ctx->wq) {
- IPA_MHI_ERR("failed to create workqueue\n");
- res = -EFAULT;
- goto fail_create_wq;
- }
-
- /* Initialize debugfs */
- ipa_mhi_debugfs_init();
-
- /* Create PROD in IPA RM */
- memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
- mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
- mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
- mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
- res = ipa_rm_create_resource(&mhi_prod_params);
- if (res) {
- IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
- goto fail_create_rm_prod;
- }
-
- /* Create CONS in IPA RM */
- memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
- mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
- mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
- mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
- mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
- res = ipa_rm_create_resource(&mhi_cons_params);
- if (res) {
- IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
- goto fail_create_rm_cons;
- }
-
- /* Initialize uC interface */
- ipa_uc_mhi_init(ipa_mhi_uc_ready_cb, ipa_mhi_uc_wakeup_request_cb);
- if (ipa_uc_state_check() == 0)
- ipa_mhi_set_state(IPA_MHI_STATE_READY);
-
IPA_MHI_FUNC_EXIT();
return 0;
-
-fail_create_rm_cons:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-fail_create_rm_prod:
- destroy_workqueue(ipa_mhi_ctx->wq);
-fail_create_wq:
- kfree(ipa_mhi_ctx);
- ipa_mhi_ctx = NULL;
-fail_alloc_ctx:
- return res;
}
-/**
- * ipa2_mhi_start() - Start IPA MHI engine
- * @params: pcie addresses for MHI
- *
- * This function is called by MHI client driver on MHI engine start for
- * handling MHI accelerated channels. This function is called after
- * ipa2_mhi_init() was called and can be called after MHI reset to restart MHI
- * engine. When this function returns device can move to M0 state.
- * This function is doing the following:
- * - Send command to uC for initialization of MHI engine
- * - Add dependencies to IPA RM
- * - Request MHI_PROD in IPA RM
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa2_mhi_start(struct ipa_mhi_start_params *params)
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params)
{
int res;
@@ -1102,57 +106,23 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params)
return -EINVAL;
}
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return -EINVAL;
- }
-
- if (ipa_uc_state_check()) {
+ if (ipa2_uc_state_check()) {
IPA_MHI_ERR("IPA uc is not loaded\n");
return -EAGAIN;
}
- res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
- return res;
- }
-
- ipa_mhi_ctx->host_ctrl_addr = params->host_ctrl_addr;
- ipa_mhi_ctx->host_data_addr = params->host_data_addr;
-
- /* Add MHI <-> Q6 dependencies to IPA RM */
- res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
- if (res && res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to add dependency %d\n", res);
- goto fail_add_mhi_q6_dep;
- }
-
- res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_MHI_CONS);
- if (res && res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to add dependency %d\n", res);
- goto fail_add_q6_mhi_dep;
- }
-
- res = ipa_mhi_request_prod();
- if (res) {
- IPA_MHI_ERR("failed request prod %d\n", res);
- goto fail_request_prod;
- }
-
/* Initialize IPA MHI engine */
- res = ipa_uc_mhi_init_engine(&ipa_mhi_ctx->msi, ipa_mhi_ctx->mmio_addr,
- ipa_mhi_ctx->host_ctrl_addr, ipa_mhi_ctx->host_data_addr,
- ipa_mhi_ctx->first_ch_idx, ipa_mhi_ctx->first_er_idx);
+ res = ipa_uc_mhi_init_engine(params->uC.msi, params->uC.mmio_addr,
+ params->uC.host_ctrl_addr, params->uC.host_data_addr,
+ params->uC.first_ch_idx, params->uC.first_er_idx);
if (res) {
IPA_MHI_ERR("failed to start MHI engine %d\n", res);
goto fail_init_engine;
}
/* Update UL/DL sync if valid */
- res = ipa_uc_mhi_send_dl_ul_sync_info(cached_dl_ul_sync_info);
+ res = ipa2_uc_mhi_send_dl_ul_sync_info(
+ params->uC.ipa_cached_dl_ul_sync_info);
if (res) {
IPA_MHI_ERR("failed to update ul/dl sync %d\n", res);
goto fail_init_engine;
@@ -1162,25 +132,16 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params)
return 0;
fail_init_engine:
- ipa_mhi_release_prod();
-fail_request_prod:
- ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_MHI_CONS);
-fail_add_q6_mhi_dep:
- ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
-fail_add_mhi_q6_dep:
- ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
return res;
}
/**
- * ipa2_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * ipa2_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel start.
+ * This function is called by IPA MHI client driver on MHI channel start.
* This function is called after MHI engine was started.
* This function is doing the following:
* - Send command to uC to start corresponding MHI channel
@@ -1189,13 +150,12 @@ fail_add_mhi_q6_dep:
* Return codes: 0 : success
* negative : error
*/
-int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
{
struct ipa_ep_context *ep;
int ipa_ep_idx;
int res;
- struct ipa_mhi_channel_ctx *channel = NULL;
- unsigned long flags;
IPA_MHI_FUNC_ENTRY();
@@ -1204,25 +164,12 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
return -EINVAL;
}
- if (in->sys.client >= IPA_CLIENT_MAX) {
- IPA_MHI_ERR("bad parm client:%d\n", in->sys.client);
- return -EINVAL;
- }
-
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
+ if (in->sys->client >= IPA_CLIENT_MAX) {
+ IPA_MHI_ERR("bad parm client:%d\n", in->sys->client);
return -EINVAL;
}
- spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
- if (!ipa_mhi_ctx || ipa_mhi_ctx->state != IPA_MHI_STATE_STARTED) {
- IPA_MHI_ERR("IPA MHI was not started\n");
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
-
- ipa_ep_idx = ipa2_get_ep_mapping(in->sys.client);
+ ipa_ep_idx = ipa2_get_ep_mapping(in->sys->client);
if (ipa_ep_idx == -1) {
IPA_MHI_ERR("Invalid client.\n");
return -EINVAL;
@@ -1230,17 +177,8 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
ep = &ipa_ctx->ep[ipa_ep_idx];
- channel = ipa_mhi_get_channel_context(in->sys.client,
- in->channel_id);
- if (!channel) {
- IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
- return -EINVAL;
- }
-
IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
- channel->client, channel->hdl, channel->id);
-
- IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+ in->sys->client, in->start.uC.index, in->start.uC.id);
if (ep->valid == 1) {
IPA_MHI_ERR("EP already allocated.\n");
@@ -1249,40 +187,34 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
memset(ep, 0, offsetof(struct ipa_ep_context, sys));
ep->valid = 1;
- ep->skip_ep_cfg = in->sys.skip_ep_cfg;
- ep->client = in->sys.client;
- ep->client_notify = in->sys.notify;
- ep->priv = in->sys.priv;
- ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+ ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+ ep->client = in->sys->client;
+ ep->client_notify = in->sys->notify;
+ ep->priv = in->sys->priv;
+ ep->keep_ipa_awake = in->sys->keep_ipa_awake;
/* start channel in uC */
- if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+ if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
IPA_MHI_DBG("Initializing channel\n");
- res = ipa_uc_mhi_init_channel(ipa_ep_idx, channel->hdl,
- channel->id, (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
+ res = ipa_uc_mhi_init_channel(ipa_ep_idx, in->start.uC.index,
+ in->start.uC.id,
+ (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
if (res) {
IPA_MHI_ERR("init_channel failed %d\n", res);
goto fail_init_channel;
}
- } else if (channel->state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
- if (channel->client != ep->client) {
- IPA_MHI_ERR("previous channel client was %d\n",
- ep->client);
- goto fail_init_channel;
- }
+ } else if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
IPA_MHI_DBG("Starting channel\n");
- res = ipa_uc_mhi_resume_channel(channel->hdl, false);
+ res = ipa_uc_mhi_resume_channel(in->start.uC.index, false);
if (res) {
IPA_MHI_ERR("init_channel failed %d\n", res);
goto fail_init_channel;
}
} else {
- IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
+ IPA_MHI_ERR("Invalid channel state %d\n", in->start.uC.state);
goto fail_init_channel;
}
- channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
-
res = ipa_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
@@ -1291,7 +223,7 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
}
if (!ep->skip_ep_cfg) {
- if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto fail_ep_cfg;
}
@@ -1306,14 +238,11 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys->client))
ipa_install_dflt_flt_rules(ipa_ep_idx);
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
-
ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
- IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
+ IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys->client,
ipa_ep_idx);
IPA_MHI_FUNC_EXIT();
@@ -1323,22 +252,20 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
fail_ep_cfg:
ipa_disable_data_path(ipa_ep_idx);
fail_enable_dp:
- ipa_uc_mhi_reset_channel(channel->hdl);
- channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+ ipa_uc_mhi_reset_channel(in->start.uC.index);
fail_init_channel:
memset(ep, 0, offsetof(struct ipa_ep_context, sys));
fail_ep_exists:
- IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
return -EPERM;
}
/**
- * ipa2_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * ipa2_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel reset.
+ * This function is called by IPA MHI client driver on MHI channel reset.
* This function is called after MHI channel was started.
* This function is doing the following:
* - Send command to uC to reset corresponding MHI channel
@@ -1347,11 +274,9 @@ fail_ep_exists:
* Return codes: 0 : success
* negative : error
*/
-int ipa2_mhi_disconnect_pipe(u32 clnt_hdl)
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
{
struct ipa_ep_context *ep;
- static struct ipa_mhi_channel_ctx *channel;
- int res;
IPA_MHI_FUNC_ENTRY();
@@ -1365,492 +290,29 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl)
return -EINVAL;
}
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return -EINVAL;
- }
-
- channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
- if (!channel) {
- IPAERR("invalid clnt hdl\n");
- return -EINVAL;
- }
-
- ep = &ipa_ctx->ep[clnt_hdl];
-
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
-
- res = ipa_mhi_reset_channel(channel);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
- goto fail_reset_channel;
- }
-
ep->valid = 0;
ipa_delete_dflt_flt_rules(clnt_hdl);
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
-
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
return 0;
-
-fail_reset_channel:
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- return res;
-}
-
-static int ipa_mhi_suspend_ul_channels(void)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_RUN)
- continue;
- IPA_MHI_DBG("suspending channel %d\n",
- ipa_mhi_ctx->ul_channels[i].hdl);
- res = ipa_uc_mhi_suspend_channel(
- ipa_mhi_ctx->ul_channels[i].hdl);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa_mhi_ctx->ul_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_resume_ul_channels(bool LPTransitionRejected)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG("suspending channel %d\n",
- ipa_mhi_ctx->ul_channels[i].hdl);
- res = ipa_uc_mhi_resume_channel(ipa_mhi_ctx->ul_channels[i].hdl,
- LPTransitionRejected);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa_mhi_ctx->ul_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_RUN;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_stop_event_update_ul_channels(void)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG("stop update event channel %d\n",
- ipa_mhi_ctx->ul_channels[i].hdl);
- res = ipa_uc_mhi_stop_event_update_channel(
- ipa_mhi_ctx->ul_channels[i].hdl);
- if (res) {
- IPA_MHI_ERR("failed stop event channel %d error %d\n",
- i, res);
- return res;
- }
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_suspend_dl_channels(void)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_RUN)
- continue;
- IPA_MHI_DBG("suspending channel %d\n",
- ipa_mhi_ctx->dl_channels[i].hdl);
- res = ipa_uc_mhi_suspend_channel(
- ipa_mhi_ctx->dl_channels[i].hdl);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa_mhi_ctx->dl_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
}
-static int ipa_mhi_resume_dl_channels(bool LPTransitionRejected)
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
{
int i;
int res;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG("suspending channel %d\n",
- ipa_mhi_ctx->dl_channels[i].hdl);
- res = ipa_uc_mhi_resume_channel(ipa_mhi_ctx->dl_channels[i].hdl,
- LPTransitionRejected);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa_mhi_ctx->dl_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_RUN;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_stop_event_update_dl_channels(void)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG("stop update event channel %d\n",
- ipa_mhi_ctx->dl_channels[i].hdl);
- res = ipa_uc_mhi_stop_event_update_channel(
- ipa_mhi_ctx->dl_channels[i].hdl);
- if (res) {
- IPA_MHI_ERR("failed stop event channel %d error %d\n",
- i, res);
- return res;
- }
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-/**
- * ipa2_mhi_suspend() - Suspend MHI accelerated channels
- * @force:
- * false: in case of data pending in IPA, MHI channels will not be
- * suspended and function will fail.
- * true: in case of data pending in IPA, make sure no further access from
- * IPA to PCIe is possible. In this case suspend cannot fail.
- *
- * This function is called by MHI client driver on MHI suspend.
- * This function is called after MHI channel was started.
- * When this function returns device can move to M1/M2/M3/D3cold state.
- * This function is doing the following:
- * - Send command to uC to suspend corresponding MHI channel
- * - Make sure no further access is possible from IPA to PCIe
- * - Release MHI_PROD in IPA RM
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa2_mhi_suspend(bool force)
-{
- int res;
- bool bam_empty;
- bool force_clear = false;
-
- IPA_MHI_FUNC_ENTRY();
-
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return -EINVAL;
- }
-
- res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
+ res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected);
if (res) {
- IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+ i, res);
return res;
}
- res = ipa_mhi_suspend_ul_channels();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
- goto fail_suspend_ul_channel;
- }
-
- bam_empty = ipa_mhi_wait_for_bam_empty_timeout(
- IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
- if (!bam_empty) {
- if (force) {
- res = ipa_mhi_enable_force_clear(
- ipa_mhi_ctx->qmi_req_id, false);
- if (res) {
- IPA_MHI_ERR("failed to enable force clear\n");
- BUG();
- return res;
- }
- force_clear = true;
- IPA_MHI_DBG("force clear datapath enabled\n");
-
- bam_empty = ipa_mhi_wait_for_bam_empty_timeout(
- IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
- IPADBG("bam_empty=%d\n", bam_empty);
-
- } else {
- IPA_MHI_DBG("BAM not empty\n");
- res = -EAGAIN;
- goto fail_suspend_ul_channel;
- }
- }
-
- res = ipa_mhi_stop_event_update_ul_channels();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_stop_event_update_ul_channels failed %d\n",
- res);
- goto fail_suspend_ul_channel;
- }
-
- /*
- * in case BAM not empty, hold IPA clocks and release them after all
- * IPA RM resource are released to make sure tag process will not start
- */
- if (!bam_empty)
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
- IPA_MHI_DBG("release prod\n");
- res = ipa_mhi_release_prod();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
- goto fail_release_prod;
- }
-
- IPA_MHI_DBG("wait for cons release\n");
- res = ipa_mhi_wait_for_cons_release();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
- goto fail_release_cons;
- }
-
- usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
-
- res = ipa_mhi_suspend_dl_channels();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_suspend_dl_channels failed %d\n", res);
- goto fail_suspend_dl_channel;
- }
-
- res = ipa_mhi_stop_event_update_dl_channels();
- if (res) {
- IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
- goto fail_stop_event_update_dl_channel;
- }
-
- if (force_clear) {
- res = ipa_mhi_disable_force_clear(ipa_mhi_ctx->qmi_req_id);
- if (res) {
- IPA_MHI_ERR("failed to disable force clear\n");
- BUG();
- return res;
- }
- IPA_MHI_DBG("force clear datapath disabled\n");
- ipa_mhi_ctx->qmi_req_id++;
- }
-
- if (!bam_empty) {
- ipa_ctx->tag_process_before_gating = false;
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- }
-
- res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
- goto fail_release_cons;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-fail_stop_event_update_dl_channel:
- ipa_mhi_resume_dl_channels(true);
-fail_suspend_dl_channel:
-fail_release_cons:
- ipa_mhi_request_prod();
-fail_release_prod:
-fail_suspend_ul_channel:
- ipa_mhi_resume_ul_channels(true);
- ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
- return res;
-}
-
-/**
- * ipa2_mhi_resume() - Resume MHI accelerated channels
- *
- * This function is called by MHI client driver on MHI resume.
- * This function is called after MHI channel was suspended.
- * When this function returns device can move to M0 state.
- * This function is doing the following:
- * - Send command to uC to resume corresponding MHI channel
- * - Request MHI_PROD in IPA RM
- * - Resume data to IPA
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa2_mhi_resume(void)
-{
- int res;
- bool dl_channel_resumed = false;
-
- IPA_MHI_FUNC_ENTRY();
-
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return -EINVAL;
- }
-
- res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
- return res;
- }
-
- if (ipa_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
- /* resume all DL channels */
- res = ipa_mhi_resume_dl_channels(false);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
- res);
- goto fail_resume_dl_channels;
- }
- dl_channel_resumed = true;
-
- ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
- }
-
- res = ipa_mhi_request_prod();
- if (res) {
- IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
- goto fail_request_prod;
- }
-
- /* resume all UL channels */
- res = ipa_mhi_resume_ul_channels(false);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
- goto fail_resume_ul_channels;
- }
-
- if (!dl_channel_resumed) {
- res = ipa_mhi_resume_dl_channels(true);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
- res);
- goto fail_resume_dl_channels2;
- }
- }
-
- res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
- goto fail_set_state;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-fail_set_state:
- ipa_mhi_suspend_dl_channels();
-fail_resume_dl_channels2:
- ipa_mhi_suspend_ul_channels();
-fail_resume_ul_channels:
- ipa_mhi_release_prod();
-fail_request_prod:
- ipa_mhi_suspend_dl_channels();
-fail_resume_dl_channels:
- ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
- return res;
-}
-
-
-/**
- * ipa2_mhi_destroy() - Destroy MHI IPA
- *
- * This function is called by MHI client driver on MHI reset to destroy all IPA
- * MHI resources.
- */
-void ipa2_mhi_destroy(void)
-{
- IPA_MHI_FUNC_ENTRY();
-
- if (unlikely(!ipa_mhi_ctx)) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return;
- }
-
- IPAERR("Not implemented Yet!\n");
- ipa_mhi_debugfs_destroy();
-
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
- *
- * This function is called by by IPA QMI service to indicate that IPA CONFIG
- * message was sent from modem. IPA MHI will update this information to IPA uC
- * or will cache it until IPA MHI will be initialized.
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa_mhi_cache_dl_ul_sync_info(config_req);
-
- if (ipa_mhi_ctx && ipa_mhi_ctx->state != IPA_MHI_STATE_INITIALIZED)
- ipa_uc_mhi_send_dl_ul_sync_info(cached_dl_ul_sync_info);
-
IPA_MHI_FUNC_EXIT();
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 00f18333587b..d5d2abe137f4 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -25,6 +25,7 @@
#include "ipa_qmi_service.h"
#include "ipa_ram_mmap.h"
+#include "../ipa_common_i.h"
#define IPA_Q6_SVC_VERS 1
#define IPA_A5_SVC_VERS 1
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index d529622f1e19..13db6b08ba9d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -284,13 +284,13 @@ bad_uc_top_ofst:
}
/**
- * ipa_uc_state_check() - Check the status of the uC interface
+ * ipa2_uc_state_check() - Check the status of the uC interface
*
* Return value: 0 if the uC is loaded, interface is initialized
* and there was no recent failure in one of the commands.
* A negative value is returned otherwise.
*/
-int ipa_uc_state_check(void)
+int ipa2_uc_state_check(void)
{
if (!ipa_ctx->uc_ctx.uc_inited) {
IPAERR("uC interface not initialized\n");
@@ -309,7 +309,7 @@ int ipa_uc_state_check(void)
return 0;
}
-EXPORT_SYMBOL(ipa_uc_state_check);
+EXPORT_SYMBOL(ipa2_uc_state_check);
/**
* ipa_uc_loaded_check() - Check the uC has been loaded
@@ -384,7 +384,7 @@ static int ipa_uc_panic_notifier(struct notifier_block *this,
IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
goto fail;
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
@@ -593,7 +593,7 @@ int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
- if (ipa_uc_state_check()) {
+ if (ipa2_uc_state_check()) {
IPADBG("uC send command aborted\n");
mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
return -EBADF;
@@ -727,7 +727,7 @@ int ipa_uc_reset_pipe(enum ipa_client_type ipa_client)
* continue with the sequence without resetting the
* pipe.
*/
- if (ipa_uc_state_check()) {
+ if (ipa2_uc_state_check()) {
IPADBG("uC interface will not be used to reset %s pipe %d\n",
IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
ep_idx);
@@ -785,7 +785,7 @@ int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable)
* continue with the sequence without resetting the
* pipe.
*/
- if (ipa_uc_state_check()) {
+ if (ipa2_uc_state_check()) {
IPADBG("uC interface will not be used to reset %s pipe %d\n",
IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
ep_idx);
@@ -849,7 +849,7 @@ int ipa_uc_notify_clk_state(bool enabled)
* If the uC interface has not been initialized yet,
* don't notify the uC on the enable/disable
*/
- if (ipa_uc_state_check()) {
+ if (ipa2_uc_state_check()) {
IPADBG("uC interface will not notify the UC on clock state\n");
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
index ec3814b4e747..08d7363bfab3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
@@ -571,7 +571,7 @@ static void ipa_uc_mhi_event_log_info_hdlr(
}
}
-int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
{
struct ipa_uc_hdlrs hdlrs;
@@ -600,6 +600,23 @@ int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
return 0;
}
+void ipa2_uc_mhi_cleanup(void)
+{
+ struct ipa_uc_hdlrs null_hdlrs = { 0 };
+
+ IPADBG("Enter\n");
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+ return;
+ }
+ ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+ kfree(ipa_uc_mhi_ctx);
+ ipa_uc_mhi_ctx = NULL;
+
+ IPADBG("Done\n");
+}
+
int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
u32 first_evt_idx)
@@ -730,7 +747,7 @@ disable_clks:
}
-int ipa_uc_mhi_reset_channel(int channelHandle)
+int ipa2_uc_mhi_reset_channel(int channelHandle)
{
union IpaHwMhiChangeChannelStateCmdData_t cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
@@ -767,7 +784,7 @@ disable_clks:
return res;
}
-int ipa_uc_mhi_suspend_channel(int channelHandle)
+int ipa2_uc_mhi_suspend_channel(int channelHandle)
{
union IpaHwMhiChangeChannelStateCmdData_t cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
@@ -842,7 +859,7 @@ disable_clks:
return res;
}
-int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle)
{
union IpaHwMhiStopEventUpdateData_t cmd;
int res;
@@ -874,7 +891,7 @@ disable_clks:
return res;
}
-int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
{
int res;
@@ -884,13 +901,14 @@ int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
}
IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
- cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal);
+ cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
- cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);
+ cmd->params.ulMsiEventThreshold,
+ cmd->params.dlMsiEventThreshold);
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- res = ipa_uc_send_cmd(cmd.raw32b,
+ res = ipa_uc_send_cmd(cmd->raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
if (res) {
IPAERR("ipa_uc_send_cmd failed %d\n", res);
@@ -903,7 +921,7 @@ disable_clks:
return res;
}
-int ipa_uc_mhi_print_stats(char *dbg_buff, int size)
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size)
{
int nBytes = 0;
int i;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 40e856bb942c..8ba4f04b31af 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -738,7 +738,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
}
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1005,7 +1005,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1071,7 +1071,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1139,7 +1139,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1239,7 +1239,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1306,7 +1306,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1388,7 +1388,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result)
return result;
@@ -1447,7 +1447,7 @@ int ipa2_uc_reg_rdyCB(
return -EINVAL;
}
- result = ipa_uc_state_check();
+ result = ipa2_uc_state_check();
if (result) {
inout->is_uC_ready = false;
ipa_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 6fd9b4e61e02..9fc67548f6ac 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4948,6 +4948,26 @@ static void *ipa2_get_ipc_logbuf_low(void)
return NULL;
}
+static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ *holb = ipa_ctx->ep[ep_idx].holb;
+}
+
+static int ipa2_generate_tag_process(void)
+{
+ int res;
+
+ res = ipa_tag_process(NULL, 0, HZ);
+ if (res)
+ IPAERR("TAG process failed\n");
+
+ return res;
+}
+
+static void ipa2_set_tag_process_before_gating(bool val)
+{
+ ipa_ctx->tag_process_before_gating = val;
+}
int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
struct ipa_api_controller *api_ctrl)
@@ -4971,6 +4991,9 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
+ api_ctrl->ipa_get_holb = ipa2_get_holb;
+ api_ctrl->ipa_set_tag_process_before_gating =
+ ipa2_set_tag_process_before_gating;
api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
@@ -5044,13 +5067,32 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
- api_ctrl->ipa_mhi_init = ipa2_mhi_init;
- api_ctrl->ipa_mhi_start = ipa2_mhi_start;
- api_ctrl->ipa_mhi_connect_pipe = ipa2_mhi_connect_pipe;
- api_ctrl->ipa_mhi_disconnect_pipe = ipa2_mhi_disconnect_pipe;
- api_ctrl->ipa_mhi_suspend = ipa2_mhi_suspend;
- api_ctrl->ipa_mhi_resume = ipa2_mhi_resume;
- api_ctrl->ipa_mhi_destroy = ipa2_mhi_destroy;
+ api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
+ api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
+ api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
+ api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
+ api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
+ api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
+ api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
+ api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+ qmi_enable_force_clear_datapath_send;
+ api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+ qmi_disable_force_clear_datapath_send;
+ api_ctrl->ipa_mhi_reset_channel_internal =
+ ipa2_mhi_reset_channel_internal;
+ api_ctrl->ipa_mhi_start_channel_internal =
+ ipa2_mhi_start_channel_internal;
+ api_ctrl->ipa_mhi_resume_channels_internal =
+ ipa2_mhi_resume_channels_internal;
+ api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+ ipa2_uc_mhi_send_dl_ul_sync_info;
+ api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
+ api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
+ api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+ ipa2_uc_mhi_stop_event_update_channel;
+ api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
+ api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
+ api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 032f43dbe278..858693c1fc44 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1468,7 +1468,7 @@ static int ipa3_init_smem_region(int memory_region_size,
struct ipahal_imm_cmd_dma_shared_mem cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipa3_desc desc;
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
int rc;
if (memory_region_size == 0)
@@ -1636,7 +1636,7 @@ static void ipa3_q6_avoid_holb(void)
}
static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
- enum ipa_rule_type rlt, const struct ipa3_mem_buffer *mem)
+ enum ipa_rule_type rlt, const struct ipa_mem_buffer *mem)
{
struct ipa3_desc *desc;
struct ipahal_imm_cmd_dma_shared_mem cmd;
@@ -1739,7 +1739,7 @@ free_desc:
}
static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
- enum ipa_rule_type rlt, const struct ipa3_mem_buffer *mem)
+ enum ipa_rule_type rlt, const struct ipa_mem_buffer *mem)
{
struct ipa3_desc *desc;
struct ipahal_imm_cmd_dma_shared_mem cmd;
@@ -1837,7 +1837,7 @@ static int ipa3_q6_clean_q6_tables(void)
struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
int retval;
- struct ipa3_mem_buffer mem = { 0 };
+ struct ipa_mem_buffer mem = { 0 };
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
u64 *entry;
@@ -2146,7 +2146,7 @@ int _ipa_init_sram_v3_0(void)
int _ipa_init_hdr_v3_0(void)
{
struct ipa3_desc desc = { 0 };
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_hdr_init_local cmd = {0};
struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
@@ -2250,7 +2250,7 @@ int _ipa_init_hdr_v3_0(void)
int _ipa_init_rt4_v3(void)
{
struct ipa3_desc desc = { 0 };
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
@@ -2324,7 +2324,7 @@ free_mem:
int _ipa_init_rt6_v3(void)
{
struct ipa3_desc desc = { 0 };
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
@@ -2398,7 +2398,7 @@ free_mem:
int _ipa_init_flt4_v3(void)
{
struct ipa3_desc desc = { 0 };
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
@@ -2491,7 +2491,7 @@ free_mem:
int _ipa_init_flt6_v3(void)
{
struct ipa3_desc desc = { 0 };
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index c1a7fdbd12d4..2a48b2bd488f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -769,7 +769,7 @@ static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify)
static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep,
struct gsi_chan_props *orig_chan_props,
- struct ipa3_mem_buffer *chan_dma)
+ struct ipa_mem_buffer *chan_dma)
{
struct gsi_chan_props chan_props;
enum gsi_status gsi_res;
@@ -837,7 +837,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
enum gsi_status gsi_res;
struct gsi_chan_props orig_chan_props;
union gsi_channel_scratch orig_chan_scratch;
- struct ipa3_mem_buffer chan_dma;
+ struct ipa_mem_buffer chan_dma;
void *buff;
dma_addr_t dma_addr;
struct gsi_xfer_elem xfer_elem;
@@ -863,7 +863,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
IPAERR("Error getting channel properties: %d\n", gsi_res);
return -EFAULT;
}
- memset(&chan_dma, 0, sizeof(struct ipa3_mem_buffer));
+ memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer));
result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props,
&chan_dma);
if (result)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 9d1ff18d7ed0..483b2ca118fa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -838,7 +838,7 @@ void ipa3_dma_destroy(void)
*
* @priv -not in use.
* @evt - event name - IPA_RECIVE.
- * @data -the ipa3_mem_buffer.
+ * @data -the ipa_mem_buffer.
*/
void ipa3_dma_async_memcpy_notify_cb(void *priv
, enum ipa_dp_evt_type evt, unsigned long data)
@@ -847,11 +847,11 @@ void ipa3_dma_async_memcpy_notify_cb(void *priv
struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
struct ipa3_sys_context *sys;
unsigned long flags;
- struct ipa3_mem_buffer *mem_info;
+ struct ipa_mem_buffer *mem_info;
IPADMA_FUNC_ENTRY();
- mem_info = (struct ipa3_mem_buffer *)data;
+ mem_info = (struct ipa_mem_buffer *)data;
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
sys = ipa3_ctx->ep[ep_idx].sys;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f2d60cb14212..3b5ce662e3a4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -73,7 +73,7 @@ static void ipa3_alloc_wlan_rx_common_cache(u32 size);
static void ipa3_cleanup_wlan_rx_common_cache(void);
static void ipa3_wq_repl_rx(struct work_struct *work);
static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
- struct ipa3_mem_buffer *mem_info);
+ struct ipa_mem_buffer *mem_info);
static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
struct ipa3_ep_context *ep);
static int ipa_populate_tag_field(struct ipa3_desc *desc,
@@ -2854,7 +2854,7 @@ static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
}
static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
- struct ipa3_mem_buffer *mem_info)
+ struct ipa_mem_buffer *mem_info)
{
IPADBG_LOW("ENTER.\n");
if (unlikely(list_empty(&sys->head_desc_list))) {
@@ -3785,7 +3785,7 @@ static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
struct ipa3_sys_context *sys_ptr;
struct ipa3_rx_pkt_wrapper *rx_pkt;
struct gsi_chan_xfer_notify xfer_notify;
- struct ipa3_mem_buffer mem_info = {0};
+ struct ipa_mem_buffer mem_info = {0};
enum ipa_client_type client;
if (sys->ep->bytes_xfered_valid) {
@@ -3841,7 +3841,7 @@ static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
struct sps_iovec iov;
int ret;
int cnt = 0;
- struct ipa3_mem_buffer mem_info = {0};
+ struct ipa_mem_buffer mem_info = {0};
while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
!atomic_read(&sys->curr_polling_state))) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 4044c28c96e0..f49b16525211 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -317,7 +317,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
* Return: 0 on success, negative on failure
*/
static int ipa_alloc_init_flt_tbl_hdr(enum ipa_ip_type ip,
- struct ipa3_mem_buffer *hash_hdr, struct ipa3_mem_buffer *nhash_hdr)
+ struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr)
{
int num_hdrs;
u64 *hash_entr;
@@ -489,7 +489,7 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
int res;
struct ipa3_flt_entry *entry;
u8 *tbl_mem_buf;
- struct ipa3_mem_buffer tbl_mem;
+ struct ipa_mem_buffer tbl_mem;
struct ipa3_flt_tbl *tbl;
int i;
int hdr_idx = 0;
@@ -612,8 +612,8 @@ err:
* Return: 0 on success, negative on failure
*/
static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
- struct ipa3_mem_buffer *hash_hdr, struct ipa3_mem_buffer *nhash_hdr,
- struct ipa3_mem_buffer *hash_bdy, struct ipa3_mem_buffer *nhash_bdy)
+ struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr,
+ struct ipa_mem_buffer *hash_bdy, struct ipa_mem_buffer *nhash_bdy)
{
u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
u32 hash_bdy_sz;
@@ -724,7 +724,7 @@ no_flt_tbls:
* Return: true if enough space available or false in other cases
*/
static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
- enum ipa_rule_type rlt, struct ipa3_mem_buffer *bdy)
+ enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
{
u16 avail;
@@ -827,8 +827,8 @@ static bool ipa_flt_skip_pipe_config(int pipe)
*/
int __ipa_commit_flt_v3(enum ipa_ip_type ip)
{
- struct ipa3_mem_buffer hash_bdy, nhash_bdy;
- struct ipa3_mem_buffer hash_hdr, nhash_hdr;
+ struct ipa_mem_buffer hash_bdy, nhash_bdy;
+ struct ipa_mem_buffer hash_hdr, nhash_hdr;
int rc = 0;
struct ipa3_desc *desc;
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index f0f79d066f0b..6a14d75a795f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -28,7 +28,7 @@ static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
*
* Returns: 0 on success, negative on failure
*/
-static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
+static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
{
struct ipa3_hdr_entry *entry;
@@ -61,7 +61,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
return 0;
}
-static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
+static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
u32 hdr_base_addr)
{
struct ipa3_hdr_proc_ctx_entry *entry;
@@ -96,7 +96,7 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
* Returns: 0 on success, negative on failure
*/
static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
- struct ipa3_mem_buffer *mem, struct ipa3_mem_buffer *aligned_mem)
+ struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
{
u32 hdr_base_addr;
@@ -133,9 +133,9 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
int __ipa_commit_hdr_v3_0(void)
{
struct ipa3_desc desc[2];
- struct ipa3_mem_buffer hdr_mem;
- struct ipa3_mem_buffer ctx_mem;
- struct ipa3_mem_buffer aligned_ctx_mem;
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer ctx_mem;
+ struct ipa_mem_buffer aligned_ctx_mem;
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index e7e5d2852712..2331adb8d7e1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -154,9 +154,6 @@
#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
#define IPA_INVALID_L4_PROTOCOL 0xFF
-#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
-#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
-
#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
(((start_ofst) + 127) & ~127)
#define IPA_RT_FLT_HW_RULE_BUF_SIZE (256)
@@ -204,8 +201,6 @@
#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
-#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
-#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15
#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000)
@@ -247,18 +242,6 @@ struct ipa_smmu_cb_ctx {
};
/**
- * struct ipa3_mem_buffer - IPA memory buffer
- * @base: base
- * @phys_base: physical base address
- * @size: size of memory buffer
- */
-struct ipa3_mem_buffer {
- void *base;
- dma_addr_t phys_base;
- u32 size;
-};
-
-/**
* struct ipa3_flt_entry - IPA filtering table entry
* @link: entry's link in global filtering enrties list
* @rule: filter rule
@@ -311,8 +294,8 @@ struct ipa3_rt_tbl {
u32 cookie;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
- struct ipa3_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
- struct ipa3_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
int id;
struct idr rule_ids;
};
@@ -449,8 +432,8 @@ struct ipa3_flt_tbl {
u32 rule_cnt;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
- struct ipa3_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
- struct ipa3_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
bool sticky_rear;
struct idr rule_ids;
};
@@ -745,14 +728,14 @@ enum ipa3_desc_type {
*/
struct ipa3_tx_pkt_wrapper {
enum ipa3_desc_type type;
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct work_struct work;
struct list_head link;
void (*callback)(void *user1, int user2);
void *user1;
int user2;
struct ipa3_sys_context *sys;
- struct ipa3_mem_buffer mult;
+ struct ipa_mem_buffer mult;
u32 cnt;
void *bounce;
bool no_unmap_dma;
@@ -1189,58 +1172,6 @@ enum ipa3_hw_flags {
};
/**
- * enum ipa3_hw_mhi_channel_states - MHI channel state machine
- *
- * Values are according to MHI specification
- * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
- * the host or device.
- * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
- * initialized and configured by host, including its channel context and
- * associated transfer ring. While this state, the channel is not active
- * and the device does not process transfer.
- * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
- * for channels.
- * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
- * The device does not process transfers for the channel in this state.
- * This state is typically used to synchronize the transition to low power
- * modes.
- * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
- * The device does not process transfers for the channel in this state.
- * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
- * from the transfer ring associated with the channel.
- * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
- * operational scenario.
- */
-enum ipa3_hw_mhi_channel_states {
- IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0,
- IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1,
- IPA_HW_MHI_CHANNEL_STATE_RUN = 2,
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3,
- IPA_HW_MHI_CHANNEL_STATE_STOP = 4,
- IPA_HW_MHI_CHANNEL_STATE_ERROR = 5,
- IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF
-};
-
-/**
- * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
- * command. Parameters are sent as 32b immediate parameters.
- * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
- * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
- * for UL data)
- * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
- * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
- */
-union IpaHwMhiDlUlSyncCmdData_t {
- struct IpaHwMhiDlUlSyncCmdParams_t {
- u32 isDlUlSyncEnabled:8;
- u32 UlAccmVal:8;
- u32 ulMsiEventThreshold:8;
- u32 dlMsiEventThreshold:8;
- } params;
- u32 raw32b;
-};
-
-/**
* struct ipa3_uc_ctx - IPA uC context
* @uc_inited: Indicates if uC interface has been initialized
* @uc_loaded: Indicates if uC has loaded
@@ -1467,8 +1398,8 @@ struct ipa3_context {
uint aggregation_time_limit;
bool hdr_tbl_lcl;
bool hdr_proc_ctx_tbl_lcl;
- struct ipa3_mem_buffer hdr_mem;
- struct ipa3_mem_buffer hdr_proc_ctx_mem;
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer hdr_proc_ctx_mem;
bool ip4_rt_tbl_hash_lcl;
bool ip4_rt_tbl_nhash_lcl;
bool ip6_rt_tbl_hash_lcl;
@@ -1477,7 +1408,7 @@ struct ipa3_context {
bool ip4_flt_tbl_nhash_lcl;
bool ip6_flt_tbl_hash_lcl;
bool ip6_flt_tbl_nhash_lcl;
- struct ipa3_mem_buffer empty_rt_tbl_mem;
+ struct ipa_mem_buffer empty_rt_tbl_mem;
struct gen_pool *pipe_mem_pool;
struct dma_pool *dma_pool;
struct ipa3_active_clients ipa3_active_clients;
@@ -1961,19 +1892,28 @@ void ipa3_dma_destroy(void);
/*
* MHI
*/
-int ipa3_mhi_init(struct ipa_mhi_init_params *params);
-int ipa3_mhi_start(struct ipa_mhi_start_params *params);
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa3_connect_mhi_pipe(
+ struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client);
-int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client);
-int ipa3_mhi_disconnect_pipe(u32 clnt_hdl);
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client);
-int ipa3_mhi_suspend(bool force);
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client);
-int ipa3_mhi_resume(void);
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
-void ipa3_mhi_destroy(void);
+int ipa3_mhi_destroy_channel(enum ipa_client_type client);
/*
* mux id
@@ -2165,6 +2105,8 @@ int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
enum ipa_client_type ipa_client);
int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
int ipa3_uc_interface_init(void);
int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client);
@@ -2185,7 +2127,7 @@ int ipa3_uc_update_hw_flags(u32 flags);
int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
void ipa3_uc_mhi_cleanup(void);
-int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd);
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
u32 first_evt_idx);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 517093adbe81..14e2f1f4c510 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -17,6 +17,8 @@
#include <linux/mutex.h>
#include <linux/ipa.h>
#include <linux/msm_gsi.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
#include "ipa_i.h"
#include "ipa_qmi_service.h"
@@ -58,56 +60,12 @@
#define IPA_MHI_FUNC_EXIT() \
IPA_MHI_DBG_LOW("EXIT\n")
-#define IPA_MHI_GSI_ER_START 10
-#define IPA_MHI_GSI_ER_END 16
-
-#define IPA_MHI_RM_TIMEOUT_MSEC 10000
-
-#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
-
#define IPA_MHI_MAX_UL_CHANNELS 1
#define IPA_MHI_MAX_DL_CHANNELS 1
-#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
- (IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
-#error not enought event rings for MHI
-#endif
-
-#define IPA_MHI_SUSPEND_SLEEP_MIN 900
-#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
-
/* bit #40 in address should be asserted for MHI transfers over pcie */
#define IPA_MHI_HOST_ADDR_COND(addr) \
- ((ipa3_mhi_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
-
-enum ipa3_mhi_state {
- IPA_MHI_STATE_INITIALIZED,
- IPA_MHI_STATE_READY,
- IPA_MHI_STATE_STARTED,
- IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
- IPA_MHI_STATE_SUSPENDED,
- IPA_MHI_STATE_RESUME_IN_PROGRESS,
- IPA_MHI_STATE_MAX
-};
-
-static char *ipa3_mhi_state_str[] = {
- __stringify(IPA_MHI_STATE_INITIALIZED),
- __stringify(IPA_MHI_STATE_READY),
- __stringify(IPA_MHI_STATE_STARTED),
- __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
- __stringify(IPA_MHI_STATE_SUSPENDED),
- __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
-};
-
-#define MHI_STATE_STR(state) \
- (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
- ipa3_mhi_state_str[(state)] : \
- "INVALID")
-
-enum ipa_mhi_dma_dir {
- IPA_MHI_DMA_TO_HOST,
- IPA_MHI_DMA_FROM_HOST,
-};
+ ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
/**
* enum ipa3_mhi_burst_mode - MHI channel burst mode state
@@ -132,979 +90,22 @@ enum ipa3_mhi_polling_mode {
IPA_MHI_POLLING_MODE_POLL_MODE,
};
-struct ipa3_mhi_ch_ctx {
- u8 chstate;/*0-7*/
- u8 brstmode:2;/*8-9*/
- u8 pollcfg:6;/*10-15*/
- u16 rsvd;/*16-31*/
- u32 chtype;
- u32 erindex;
- u64 rbase;
- u64 rlen;
- u64 rp;
- u64 wp;
-} __packed;
-
-struct ipa3_mhi_ev_ctx {
- u32 intmodc:16;
- u32 intmodt:16;
- u32 ertype;
- u32 msivec;
- u64 rbase;
- u64 rlen;
- u64 rp;
- u64 wp;
-} __packed;
-
-/**
- * struct ipa3_mhi_channel_ctx - MHI Channel context
- * @valid: entry is valid
- * @id: MHI channel ID
- * @index: channel handle for uC
- * @ep: IPA endpoint context
- * @state: Channel state
- * @stop_in_proc: flag to indicate if channel was stopped completely
- * @ch_info: information about channel occupancy
- * @channel_context_addr : the channel context address in host address space
- * @ch_ctx_host: MHI Channel context
- * @event_context_addr: the event context address in host address space
- * @ev_ctx_host: MHI event context
- * @cached_gsi_evt_ring_hdl: GSI channel event ring handle
- * @brstmode_enabled: is burst mode enabled for this channel?
- * @ch_scratch: the channel scratch configuration
- */
-struct ipa3_mhi_channel_ctx {
- bool valid;
- u8 id;
- u8 index;
- struct ipa3_ep_context *ep;
- enum ipa3_hw_mhi_channel_states state;
- bool stop_in_proc;
- struct gsi_chan_info ch_info;
- u64 channel_context_addr;
- struct ipa3_mhi_ch_ctx ch_ctx_host;
- u64 event_context_addr;
- struct ipa3_mhi_ev_ctx ev_ctx_host;
- unsigned long cached_gsi_evt_ring_hdl;
- bool brstmode_enabled;
- union __packed gsi_channel_scratch ch_scratch;
-};
-
-enum ipa3_mhi_rm_state {
- IPA_MHI_RM_STATE_RELEASED,
- IPA_MHI_RM_STATE_REQUESTED,
- IPA_MHI_RM_STATE_GRANTED,
- IPA_MHI_RM_STATE_MAX
-};
-
-/**
- * struct ipa3_mhi_ctx - IPA MHI context
- * @state: IPA MHI state
- * @state_lock: lock for state read/write operations
- * @msi: Message Signaled Interrupts parameters
- * @mmio_addr: MHI MMIO physical address
- * @first_ch_idx: First channel ID for hardware accelerated channels.
- * @first_er_idx: First event ring ID for hardware accelerated channels.
- * @host_ctrl_addr: Base address of MHI control data structures
- * @host_data_addr: Base address of MHI data buffers
- * @channel_context_addr: channel context array address in host address space
- * @event_context_addr: event context array address in host address space
- * @cb_notify: client callback
- * @cb_priv: client private data to be provided in client callback
- * @ul_channels: IPA MHI uplink channel contexts
- * @dl_channels: IPA MHI downlink channel contexts
- * @total_channels: Total number of channels ever connected to IPA MHI
- * @rm_prod_granted_comp: Completion object for MHI producer resource in IPA RM
- * @rm_cons_state: MHI consumer resource state in IPA RM
- * @rm_cons_comp: Completion object for MHI consumer resource in IPA RM
- * @trigger_wakeup: trigger wakeup callback ?
- * @wakeup_notified: MHI Client wakeup function was called
- * @wq: workqueue for wakeup event
- * @qmi_req_id: QMI request unique id
- * @use_ipadma: use IPADMA to access host space
- * @assert_bit40: should assert bit 40 in order to access hots space.
- * if PCIe iATU is configured then not need to assert bit40
- * @ test_mode: flag to indicate if IPA MHI is in unit test mode
- */
-struct ipa3_mhi_ctx {
- enum ipa3_mhi_state state;
- spinlock_t state_lock;
- struct ipa_mhi_msi_info msi;
- u32 mmio_addr;
- u32 first_ch_idx;
- u32 first_er_idx;
- u32 host_ctrl_addr;
- u32 host_data_addr;
- u64 channel_context_array_addr;
- u64 event_context_array_addr;
- mhi_client_cb cb_notify;
- void *cb_priv;
- struct ipa3_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
- struct ipa3_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
- u32 total_channels;
- struct completion rm_prod_granted_comp;
- enum ipa3_mhi_rm_state rm_cons_state;
- struct completion rm_cons_comp;
- bool trigger_wakeup;
- bool wakeup_notified;
- struct workqueue_struct *wq;
- u32 qmi_req_id;
- u32 use_ipadma;
- bool assert_bit40;
- bool test_mode;
-};
-
-static struct ipa3_mhi_ctx *ipa3_mhi_ctx;
-
-static void ipa3_mhi_wq_notify_wakeup(struct work_struct *work);
-static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa3_mhi_wq_notify_wakeup);
-
-static void ipa3_mhi_wq_notify_ready(struct work_struct *work);
-static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa3_mhi_wq_notify_ready);
-
-static union IpaHwMhiDlUlSyncCmdData_t ipa3_cached_dl_ul_sync_info;
-
-#ifdef CONFIG_DEBUG_FS
-#define IPA_MHI_MAX_MSG_LEN 512
-static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
-static struct dentry *dent;
-
-static char *ipa3_mhi_channel_state_str[] = {
- __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
- __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
-};
-
-#define MHI_CH_STATE_STR(state) \
- (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
- ipa3_mhi_channel_state_str[(state)] : \
- "INVALID")
-
-static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
- u64 host_addr, int size)
-{
- struct ipa3_mem_buffer mem;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
-
- if (ipa3_mhi_ctx->use_ipadma) {
- host_addr = IPA_MHI_HOST_ADDR_COND(host_addr);
-
- mem.size = size;
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
- &mem.phys_base, GFP_KERNEL);
- if (!mem.base) {
- IPAERR("dma_alloc_coherent failed, DMA buff size %d\n",
- mem.size);
- return -ENOMEM;
- }
-
- if (dir == IPA_MHI_DMA_FROM_HOST) {
- res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
- size);
- if (res) {
- IPAERR("ipa_dma_sync_memcpy from host fail%d\n",
- res);
- goto fail_memcopy;
- }
- memcpy(dev_addr, mem.base, size);
- } else {
- memcpy(mem.base, dev_addr, size);
- res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
- size);
- if (res) {
- IPAERR("ipa_dma_sync_memcpy to host fail %d\n",
- res);
- goto fail_memcopy;
- }
- }
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
- mem.phys_base);
- } else {
- void *host_ptr;
-
- if (!ipa3_mhi_ctx->test_mode)
- host_ptr = ioremap(host_addr, size);
- else
- host_ptr = phys_to_virt(host_addr);
- if (!host_ptr) {
- IPAERR("ioremap failed for 0x%llx\n", host_addr);
- return -EFAULT;
- }
- if (dir == IPA_MHI_DMA_FROM_HOST)
- memcpy(dev_addr, host_ptr, size);
- else
- memcpy(host_ptr, dev_addr, size);
- if (!ipa3_mhi_ctx->test_mode)
- iounmap(host_ptr);
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-fail_memcopy:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
- mem.phys_base);
- return res;
-}
-
-static int ipa3_mhi_print_channel_info(struct ipa3_mhi_channel_ctx *channel,
- char *buff, int len)
-{
- int nbytes = 0;
-
- if (channel->valid) {
- nbytes += scnprintf(&buff[nbytes],
- len - nbytes,
- "channel idx=%d ch_id=%d client=%d state=%s\n",
- channel->index, channel->id, channel->ep->client,
- MHI_CH_STATE_STR(channel->state));
-
- nbytes += scnprintf(&buff[nbytes],
- len - nbytes,
- " stop_in_proc=%d gsi_chan_hdl=%ld\n",
- channel->stop_in_proc, channel->ep->gsi_chan_hdl);
-
- nbytes += scnprintf(&buff[nbytes],
- len - nbytes,
- " ch_ctx=%llx\n",
- channel->channel_context_addr);
-
- nbytes += scnprintf(&buff[nbytes],
- len - nbytes,
- " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
- channel->ep->gsi_evt_ring_hdl,
- channel->event_context_addr);
- }
- return nbytes;
-}
-
-static int ipa3_mhi_print_host_channel_ctx_info(
- struct ipa3_mhi_channel_ctx *channel, char *buff, int len)
-{
- int res, nbytes = 0;
- struct ipa3_mhi_ch_ctx ch_ctx_host;
-
- memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
-
- /* reading ch context from host */
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
- &ch_ctx_host, channel->channel_context_addr,
- sizeof(ch_ctx_host));
- if (res) {
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "Failed to read from host %d\n", res);
- return nbytes;
- }
-
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "ch_id: %d\n", channel->id);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "chstate: 0x%x\n", ch_ctx_host.chstate);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "brstmode: 0x%x\n", ch_ctx_host.brstmode);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "chtype: 0x%x\n", ch_ctx_host.chtype);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "erindex: 0x%x\n", ch_ctx_host.erindex);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "rbase: 0x%llx\n", ch_ctx_host.rbase);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "rlen: 0x%llx\n", ch_ctx_host.rlen);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "rp: 0x%llx\n", ch_ctx_host.rp);
- nbytes += scnprintf(&buff[nbytes], len - nbytes,
- "wp: 0x%llx\n", ch_ctx_host.wp);
-
- return nbytes;
-}
-
-static ssize_t ipa3_mhi_debugfs_stats(struct file *file,
- char __user *ubuf,
- size_t count,
- loff_t *ppos)
-{
- int nbytes = 0;
- int i;
- struct ipa3_mhi_channel_ctx *channel;
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "IPA MHI state: %s\n", MHI_STATE_STR(ipa3_mhi_ctx->state));
-
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->ul_channels[i];
- nbytes += ipa3_mhi_print_channel_info(channel,
- &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
- }
-
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->dl_channels[i];
- nbytes += ipa3_mhi_print_channel_info(channel,
- &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
- }
-
- return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
-}
-
-static ssize_t ipa3_mhi_debugfs_uc_stats(struct file *file,
- char __user *ubuf,
- size_t count,
- loff_t *ppos)
-{
- int nbytes = 0;
-
- nbytes += ipa3_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
- return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
-}
-
-static ssize_t ipa3_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
- char __user *ubuf,
- size_t count,
- loff_t *ppos)
-{
- int i, nbytes = 0;
- struct ipa3_mhi_channel_ctx *channel;
-
- if (ipa3_mhi_ctx->state == IPA_MHI_STATE_INITIALIZED ||
- ipa3_mhi_ctx->state == IPA_MHI_STATE_READY) {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "Cannot dump host channel context ");
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "before IPA MHI was STARTED\n");
- return simple_read_from_buffer(ubuf, count, ppos,
- dbg_buff, nbytes);
- }
- if (ipa3_mhi_ctx->state == IPA_MHI_STATE_SUSPENDED) {
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "IPA MHI is suspended, cannot dump channel ctx array");
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- " from host -PCIe can be in D3 state\n");
- return simple_read_from_buffer(ubuf, count, ppos,
- dbg_buff, nbytes);
- }
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "channel contex array - dump from host\n");
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "***** UL channels *******\n");
-
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->ul_channels[i];
- if (!channel->valid)
- continue;
- nbytes += ipa3_mhi_print_host_channel_ctx_info(channel,
- &dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes);
- }
-
- nbytes += scnprintf(&dbg_buff[nbytes],
- IPA_MHI_MAX_MSG_LEN - nbytes,
- "\n***** DL channels *******\n");
-
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->dl_channels[i];
- if (!channel->valid)
- continue;
- nbytes += ipa3_mhi_print_host_channel_ctx_info(channel,
- &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
- }
-
- return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
-}
-
-const struct file_operations ipa3_mhi_stats_ops = {
- .read = ipa3_mhi_debugfs_stats,
-};
-
-const struct file_operations ipa3_mhi_uc_stats_ops = {
- .read = ipa3_mhi_debugfs_uc_stats,
-};
-
-const struct file_operations ipa3_mhi_dump_host_ch_ctx_ops = {
- .read = ipa3_mhi_debugfs_dump_host_ch_ctx_arr,
-};
-
-
-static void ipa3_mhi_debugfs_init(void)
-{
- const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
- const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
- S_IWUSR | S_IWGRP;
- struct dentry *file;
-
- IPA_MHI_FUNC_ENTRY();
-
- dent = debugfs_create_dir("ipa_mhi", 0);
- if (IS_ERR(dent)) {
- IPA_MHI_ERR("fail to create folder ipa_mhi\n");
- return;
- }
-
- file = debugfs_create_file("stats", read_only_mode, dent,
- 0, &ipa3_mhi_stats_ops);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file stats\n");
- goto fail;
- }
-
- file = debugfs_create_file("uc_stats", read_only_mode, dent,
- 0, &ipa3_mhi_uc_stats_ops);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file uc_stats\n");
- goto fail;
- }
-
- file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
- &ipa3_mhi_ctx->use_ipadma);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file use_ipadma\n");
- goto fail;
- }
-
- file = debugfs_create_file("dump_host_channel_ctx_array",
- read_only_mode, dent, 0, &ipa3_mhi_dump_host_ch_ctx_ops);
- if (!file || IS_ERR(file)) {
- IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
- goto fail;
- }
-
- IPA_MHI_FUNC_EXIT();
- return;
-fail:
- debugfs_remove_recursive(dent);
-}
-
-static void ipa3_mhi_debugfs_destroy(void)
-{
- debugfs_remove_recursive(dent);
-}
-
-#else
-static void ipa3_mhi_debugfs_init(void) {}
-static void ipa3_mhi_debugfs_destroy(void) {}
-#endif /* CONFIG_DEBUG_FS */
-
-
-static void ipa3_mhi_cache_dl_ul_sync_info(
- struct ipa_config_req_msg_v01 *config_req)
-{
- ipa3_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
- ipa3_cached_dl_ul_sync_info.params.UlAccmVal =
- (config_req->ul_accumulation_time_limit_valid) ?
- config_req->ul_accumulation_time_limit : 0;
- ipa3_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
- (config_req->ul_msi_event_threshold_valid) ?
- config_req->ul_msi_event_threshold : 0;
- ipa3_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
- (config_req->dl_msi_event_threshold_valid) ?
- config_req->dl_msi_event_threshold : 0;
-}
-
-/**
- * ipa3_mhi_wq_notify_wakeup() - Notify MHI client on data available
- *
- * This function is called from IPA MHI workqueue to notify
- * MHI client driver on data available event.
- */
-static void ipa3_mhi_wq_notify_wakeup(struct work_struct *work)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa3_mhi_ctx->cb_notify(ipa3_mhi_ctx->cb_priv,
- IPA_MHI_EVENT_DATA_AVAILABLE, 0);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa3_mhi_notify_wakeup() - Schedule work to notify data available
- *
- * This function will schedule a work to notify data available event.
- * In case this function is called more than once, only one notification will
- * be sent to MHI client driver. No further notifications will be sent until
- * IPA MHI state will become STARTED.
- */
-static void ipa3_mhi_notify_wakeup(void)
-{
- IPA_MHI_FUNC_ENTRY();
- if (ipa3_mhi_ctx->wakeup_notified) {
- IPADBG("wakeup already called\n");
- return;
- }
- queue_work(ipa3_mhi_ctx->wq, &ipa_mhi_notify_wakeup_work);
- ipa3_mhi_ctx->wakeup_notified = true;
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa3_mhi_wq_notify_ready() - Notify MHI client on ready
- *
- * This function is called from IPA MHI workqueue to notify
- * MHI client driver on ready event when IPA uC is loaded
- */
-static void ipa3_mhi_wq_notify_ready(struct work_struct *work)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa3_mhi_ctx->cb_notify(ipa3_mhi_ctx->cb_priv,
- IPA_MHI_EVENT_READY, 0);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa3_mhi_notify_ready() - Schedule work to notify ready
- *
- * This function will schedule a work to notify ready event.
- */
-static void ipa3_mhi_notify_ready(void)
-{
- IPA_MHI_FUNC_ENTRY();
- queue_work(ipa3_mhi_ctx->wq, &ipa_mhi_notify_ready_work);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa3_mhi_set_state() - Set new state to IPA MHI
- * @state: new state
- *
- * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
- * In some state transitions a wakeup request will be triggered.
- *
- * Returns: 0 on success, -1 otherwise
- */
-static int ipa3_mhi_set_state(enum ipa3_mhi_state new_state)
-{
- unsigned long flags;
- int res = -EPERM;
-
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- IPA_MHI_DBG("Current state: %s\n", MHI_STATE_STR(ipa3_mhi_ctx->state));
-
- switch (ipa3_mhi_ctx->state) {
- case IPA_MHI_STATE_INITIALIZED:
- if (new_state == IPA_MHI_STATE_READY) {
- ipa3_mhi_notify_ready();
- res = 0;
- }
- break;
-
- case IPA_MHI_STATE_READY:
- if (new_state == IPA_MHI_STATE_READY)
- res = 0;
- if (new_state == IPA_MHI_STATE_STARTED)
- res = 0;
- break;
-
- case IPA_MHI_STATE_STARTED:
- if (new_state == IPA_MHI_STATE_INITIALIZED)
- res = 0;
- else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
- res = 0;
- break;
-
- case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
- if (new_state == IPA_MHI_STATE_SUSPENDED) {
- if (ipa3_mhi_ctx->trigger_wakeup) {
- ipa3_mhi_ctx->trigger_wakeup = false;
- ipa3_mhi_notify_wakeup();
- }
- res = 0;
- } else if (new_state == IPA_MHI_STATE_STARTED) {
- ipa3_mhi_ctx->wakeup_notified = false;
- ipa3_mhi_ctx->trigger_wakeup = false;
- if (ipa3_mhi_ctx->rm_cons_state ==
- IPA_MHI_RM_STATE_REQUESTED) {
- ipa_rm_notify_completion(
- IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa3_mhi_ctx->rm_cons_state =
- IPA_MHI_RM_STATE_GRANTED;
- }
- res = 0;
- }
- break;
-
- case IPA_MHI_STATE_SUSPENDED:
- if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
- res = 0;
- break;
-
- case IPA_MHI_STATE_RESUME_IN_PROGRESS:
- if (new_state == IPA_MHI_STATE_SUSPENDED) {
- if (ipa3_mhi_ctx->trigger_wakeup) {
- ipa3_mhi_ctx->trigger_wakeup = false;
- ipa3_mhi_notify_wakeup();
- }
- res = 0;
- } else if (new_state == IPA_MHI_STATE_STARTED) {
- ipa3_mhi_ctx->trigger_wakeup = false;
- ipa3_mhi_ctx->wakeup_notified = false;
- if (ipa3_mhi_ctx->rm_cons_state ==
- IPA_MHI_RM_STATE_REQUESTED) {
- ipa_rm_notify_completion(
- IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa3_mhi_ctx->rm_cons_state =
- IPA_MHI_RM_STATE_GRANTED;
- }
- res = 0;
- }
- break;
-
- default:
- IPA_MHI_ERR("Invalid state %d\n", ipa3_mhi_ctx->state);
- WARN_ON(1);
- }
-
- if (res)
- IPA_MHI_ERR("Invalid state change to %s\n",
- MHI_STATE_STR(new_state));
- else {
- IPA_MHI_DBG("New state change to %s\n",
- MHI_STATE_STR(new_state));
- ipa3_mhi_ctx->state = new_state;
- }
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- return res;
-}
-
-static void ipa3_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
- unsigned long data)
-{
- IPA_MHI_FUNC_ENTRY();
-
- switch (event) {
- case IPA_RM_RESOURCE_GRANTED:
- IPA_MHI_DBG_LOW("IPA_RM_RESOURCE_GRANTED\n");
- complete_all(&ipa3_mhi_ctx->rm_prod_granted_comp);
- break;
-
- case IPA_RM_RESOURCE_RELEASED:
- IPA_MHI_DBG_LOW("IPA_RM_RESOURCE_RELEASED\n");
- break;
-
- default:
- IPA_MHI_ERR("unexpected event %d\n", event);
- WARN_ON(1);
- break;
- }
-
- IPA_MHI_FUNC_EXIT();
-}
-
-static void ipa3_mhi_uc_ready_cb(void)
-{
- IPA_MHI_FUNC_ENTRY();
- ipa3_mhi_set_state(IPA_MHI_STATE_READY);
- IPA_MHI_FUNC_EXIT();
-}
-
-static void ipa3_mhi_uc_wakeup_request_cb(void)
-{
- unsigned long flags;
-
- IPA_MHI_FUNC_ENTRY();
- IPA_MHI_DBG("MHI state: %s\n", MHI_STATE_STR(ipa3_mhi_ctx->state));
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- if (ipa3_mhi_ctx->state == IPA_MHI_STATE_SUSPENDED)
- ipa3_mhi_notify_wakeup();
- else if (ipa3_mhi_ctx->state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
- /* wakeup event will be triggered after suspend finishes */
- ipa3_mhi_ctx->trigger_wakeup = true;
-
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- IPA_MHI_FUNC_EXIT();
-}
-
-/**
- * ipa3_mhi_rm_cons_request() - callback function for IPA RM request resource
- *
- * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
- * In case IPA MHI is suspended, MHI CONS will be granted after resume.
- */
-static int ipa3_mhi_rm_cons_request(void)
-{
- unsigned long flags;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
-
- IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa3_mhi_ctx->state));
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
- if (ipa3_mhi_ctx->state == IPA_MHI_STATE_STARTED) {
- ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
- res = 0;
- } else if (ipa3_mhi_ctx->state == IPA_MHI_STATE_SUSPENDED) {
- ipa3_mhi_notify_wakeup();
- res = -EINPROGRESS;
- } else if (ipa3_mhi_ctx->state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
- /* wakeup event will be trigger after suspend finishes */
- ipa3_mhi_ctx->trigger_wakeup = true;
- res = -EINPROGRESS;
- } else {
- res = -EINPROGRESS;
- }
-
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- IPA_MHI_DBG_LOW("EXIT with %d\n", res);
- return res;
-}
-
-static int ipa3_mhi_rm_cons_release(void)
-{
- unsigned long flags;
-
- IPA_MHI_FUNC_ENTRY();
-
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
- complete_all(&ipa3_mhi_ctx->rm_cons_comp);
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_wait_for_cons_release(void)
-{
- unsigned long flags;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- reinit_completion(&ipa3_mhi_ctx->rm_cons_comp);
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- if (ipa3_mhi_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
-
- res = wait_for_completion_timeout(
- &ipa3_mhi_ctx->rm_cons_comp,
- msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
- if (res == 0) {
- IPA_MHI_ERR("timeout release mhi cons\n");
- return -ETIME;
- }
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_request_prod(void)
-{
- int res;
-
- IPA_MHI_FUNC_ENTRY();
-
- reinit_completion(&ipa3_mhi_ctx->rm_prod_granted_comp);
- IPA_MHI_DBG_LOW("requesting mhi prod\n");
- res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
- if (res) {
- if (res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to request mhi prod %d\n", res);
- return res;
- }
- res = wait_for_completion_timeout(
- &ipa3_mhi_ctx->rm_prod_granted_comp,
- msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
- if (res == 0) {
- IPA_MHI_ERR("timeout request mhi prod\n");
- return -ETIME;
- }
- }
-
- IPA_MHI_DBG_LOW("mhi prod granted\n");
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-}
-
-static int ipa3_mhi_release_prod(void)
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
{
int res;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
IPA_MHI_FUNC_ENTRY();
-
- res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
-
- IPA_MHI_FUNC_EXIT();
- return res;
-
-}
-
-/**
- * ipa3_mhi_get_channel_context() - Get corresponding channel context
- * @ep: IPA ep
- * @channel_id: Channel ID
- *
- * This function will return the corresponding channel context or allocate new
- * one in case channel context for channel does not exist.
- */
-static struct ipa3_mhi_channel_ctx *ipa3_mhi_get_channel_context(
- struct ipa3_ep_context *ep, u8 channel_id)
-{
- int ch_idx;
- struct ipa3_mhi_channel_ctx *channels;
- int max_channels;
-
- if (IPA_CLIENT_IS_PROD(ep->client)) {
- channels = ipa3_mhi_ctx->ul_channels;
- max_channels = IPA_MHI_MAX_UL_CHANNELS;
- } else {
- channels = ipa3_mhi_ctx->dl_channels;
- max_channels = IPA_MHI_MAX_DL_CHANNELS;
- }
-
- /* find the channel context according to channel id */
- for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
- if (channels[ch_idx].valid &&
- channels[ch_idx].id == channel_id)
- return &channels[ch_idx];
- }
-
- /* channel context does not exists, allocate a new one */
- for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
- if (!channels[ch_idx].valid)
- break;
- }
-
- if (ch_idx == max_channels) {
- IPA_MHI_ERR("no more channels available\n");
- return NULL;
- }
-
- channels[ch_idx].valid = true;
- channels[ch_idx].id = channel_id;
- channels[ch_idx].index = ipa3_mhi_ctx->total_channels++;
- channels[ch_idx].ep = ep;
- channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
-
- return &channels[ch_idx];
-}
-
-/**
- * ipa3_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
- * context
- * @clnt_hdl: client handle as provided in ipa3_mhi_connect_pipe()
- *
- * This function will return the corresponding channel context or NULL in case
- * that channel does not exist.
- */
-static struct ipa3_mhi_channel_ctx *ipa3_mhi_get_channel_context_by_clnt_hdl(
- u32 clnt_hdl)
-{
- int ch_idx;
-
- for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
- if (ipa3_mhi_ctx->ul_channels[ch_idx].valid &&
- ipa3_get_ep_mapping(
- ipa3_mhi_ctx->ul_channels[ch_idx].ep->client) == clnt_hdl)
- return &ipa3_mhi_ctx->ul_channels[ch_idx];
- }
-
- for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
- if (ipa3_mhi_ctx->dl_channels[ch_idx].valid &&
- ipa3_get_ep_mapping(
- ipa3_mhi_ctx->dl_channels[ch_idx].ep->client) == clnt_hdl)
- return &ipa3_mhi_ctx->dl_channels[ch_idx];
- }
-
- return NULL;
-}
-
-static int ipa3_mhi_enable_force_clear(u32 request_id, bool throttle_source)
-{
- struct ipa_enable_force_clear_datapath_req_msg_v01 req;
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- memset(&req, 0, sizeof(req));
- req.request_id = request_id;
- req.source_pipe_bitmask = 0;
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->ul_channels[i].valid)
- continue;
- req.source_pipe_bitmask |= 1 << ipa3_get_ep_mapping(
- ipa3_mhi_ctx->ul_channels[i].ep->client);
- }
- if (throttle_source) {
- req.throttle_source_valid = 1;
- req.throttle_source = 1;
- }
- IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
- req.request_id, req.source_pipe_bitmask,
- req.throttle_source);
- res = ipa3_qmi_enable_force_clear_datapath_send(&req);
- if (res) {
- IPA_MHI_ERR(
- "ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
- res);
- return res;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_disable_force_clear(u32 request_id)
-{
- struct ipa_disable_force_clear_datapath_req_msg_v01 req;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- memset(&req, 0, sizeof(req));
- req.request_id = request_id;
- IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
- res = ipa3_qmi_disable_force_clear_datapath_send(&req);
- if (res) {
- IPA_MHI_ERR(
- "ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
- res);
- return res;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static bool ipa3_mhi_sps_channel_empty(struct ipa3_mhi_channel_ctx *channel)
-{
- u32 pipe_idx;
- bool pending;
-
- pipe_idx = ipa3_get_ep_mapping(channel->ep->client);
- if (sps_pipe_pending_desc(ipa3_ctx->bam_handle,
- pipe_idx, &pending)) {
- IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
- WARN_ON(1);
- return false;
- }
-
- return !pending;
-}
-
-static bool ipa3_mhi_gsi_channel_empty(struct ipa3_mhi_channel_ctx *channel)
-{
- int res;
- IPA_MHI_FUNC_ENTRY();
-
- if (!channel->stop_in_proc) {
- IPA_MHI_DBG_LOW("Channel is not in STOP_IN_PROC\n");
- return true;
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPA_MHI_ERR("Invalid client.\n");
+ return -EINVAL;
}
- IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n",
- channel->ep->gsi_chan_hdl);
- res = gsi_stop_channel(channel->ep->gsi_chan_hdl);
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
if (res != 0 &&
res != -GSI_STATUS_AGAIN &&
res != -GSI_STATUS_TIMED_OUT) {
@@ -1116,139 +117,21 @@ static bool ipa3_mhi_gsi_channel_empty(struct ipa3_mhi_channel_ctx *channel)
if (res == 0) {
IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
- channel->ep->gsi_chan_hdl);
- channel->stop_in_proc = false;
+ ep->gsi_chan_hdl);
return true;
}
return false;
}
-/**
- * ipa3_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
- * @msecs: timeout to wait
- *
- * This function will poll until there are no packets pending in uplink channels
- * or timeout occurred.
- *
- * Return code: true - no pending packets in uplink channels
- * false - timeout occurred
- */
-static bool ipa3_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
-{
- unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
- unsigned long jiffies_start = jiffies;
- bool empty = false;
- int i;
-
- IPA_MHI_FUNC_ENTRY();
- while (!empty) {
- empty = true;
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI)
- empty &= ipa3_mhi_gsi_channel_empty(
- &ipa3_mhi_ctx->ul_channels[i]);
- else
- empty &= ipa3_mhi_sps_channel_empty(
- &ipa3_mhi_ctx->ul_channels[i]);
- }
-
- if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
- IPA_MHI_DBG_LOW("timeout waiting for UL empty\n");
- break;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI &&
- IPA_MHI_MAX_UL_CHANNELS == 1)
- usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
- IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
- }
-
- IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
-
- IPA_MHI_FUNC_EXIT();
- return empty;
-}
-
-static void ipa3_mhi_set_holb_on_dl_channels(bool enable,
- struct ipa_ep_cfg_holb old_holb[])
-{
- int i;
- struct ipa_ep_cfg_holb ep_holb;
- int ep_idx;
- int res;
-
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->dl_channels[i].state ==
- IPA_HW_MHI_CHANNEL_STATE_INVALID)
- continue;
- ep_idx = ipa3_get_ep_mapping(
- ipa3_mhi_ctx->dl_channels[i].ep->client);
- if (-1 == ep_idx) {
- IPA_MHI_ERR("Client %u is not mapped\n",
- ipa3_mhi_ctx->dl_channels[i].ep->client);
- BUG();
- return;
- }
- memset(&ep_holb, 0, sizeof(ep_holb));
- if (enable) {
- ep_holb.en = 1;
- ep_holb.tmr_val = 0;
- old_holb[i] = ipa3_ctx->ep[ep_idx].holb;
- } else {
- ep_holb = old_holb[i];
- }
- res = ipa3_cfg_ep_holb(ep_idx, &ep_holb);
- if (res) {
- IPA_MHI_ERR("ipa3_cfg_ep_holb failed %d\n", res);
- BUG();
- return;
- }
- }
-}
-
-static int ipa3_mhi_suspend_gsi_channel(struct ipa3_mhi_channel_ctx *channel)
-{
- int res;
- int clnt_hdl;
-
- IPA_MHI_FUNC_ENTRY();
-
- clnt_hdl = ipa3_get_ep_mapping(channel->ep->client);
- if (clnt_hdl < 0)
- return -EFAULT;
-
- res = ipa3_stop_gsi_channel(clnt_hdl);
- if (res != 0 && res != -GSI_STATUS_AGAIN &&
- res != -GSI_STATUS_TIMED_OUT) {
- IPA_MHI_ERR("GSI stop channel failed %d\n", res);
- return -EFAULT;
- }
-
- /* check if channel was stopped completely */
- if (res)
- channel->stop_in_proc = true;
-
- IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
- "STOP_IN_PROC" : "STOP");
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_reset_gsi_channel(struct ipa3_mhi_channel_ctx *channel)
+static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
{
int res;
int clnt_hdl;
IPA_MHI_FUNC_ENTRY();
- clnt_hdl = ipa3_get_ep_mapping(channel->ep->client);
+ clnt_hdl = ipa3_get_ep_mapping(client);
if (clnt_hdl < 0)
return -EFAULT;
@@ -1262,87 +145,20 @@ static int ipa3_mhi_reset_gsi_channel(struct ipa3_mhi_channel_ctx *channel)
return 0;
}
-static int ipa3_mhi_reset_ul_channel(struct ipa3_mhi_channel_ctx *channel)
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
{
int res;
- struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
- bool empty;
IPA_MHI_FUNC_ENTRY();
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = ipa3_mhi_suspend_gsi_channel(channel);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_suspend_gsi_channel failed %d\n",
- res);
- return res;
- }
- } else {
- res = ipa3_uc_mhi_reset_channel(channel->index);
- if (res) {
- IPA_MHI_ERR("ipa3_uc_mhi_reset_channel failed %d\n",
- res);
- return res;
- }
- }
- empty = ipa3_mhi_wait_for_ul_empty_timeout(
- IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
- if (!empty) {
- IPA_MHI_DBG("%s not empty\n",
- (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
- res = ipa3_mhi_enable_force_clear(ipa3_mhi_ctx->qmi_req_id,
- false);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_enable_force_clear failed %d\n",
- res);
- BUG();
- return res;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- empty = ipa3_mhi_wait_for_ul_empty_timeout(
- IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
-
- IPADBG("empty=%d\n", empty);
- } else {
- /* enable packet drop on all DL channels */
- ipa3_mhi_set_holb_on_dl_channels(true, old_ep_holb);
- res = ipa3_tag_process(NULL, 0, HZ);
- if (res)
- IPAERR("TAG process failed\n");
-
- /* disable packet drop on all DL channels */
- ipa3_mhi_set_holb_on_dl_channels(false, old_ep_holb);
- res = sps_pipe_disable(ipa3_ctx->bam_handle,
- ipa3_get_ep_mapping(channel->ep->client));
- if (res) {
- IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
- BUG();
- return res;
- }
- }
-
- res = ipa3_mhi_disable_force_clear(ipa3_mhi_ctx->qmi_req_id);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_disable_force_clear failed %d\n",
- res);
- BUG();
- return res;
- }
- ipa3_mhi_ctx->qmi_req_id++;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = ipa3_mhi_reset_gsi_channel(channel);
- if (res) {
- IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
- BUG();
- return res;
- }
+ res = ipa3_mhi_reset_gsi_channel(client);
+ if (res) {
+ IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
+ ipa_assert();
+ return res;
}
- res = ipa3_disable_data_path(ipa3_get_ep_mapping(channel->ep->client));
+ res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
if (res) {
IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
return res;
@@ -1352,342 +168,117 @@ static int ipa3_mhi_reset_ul_channel(struct ipa3_mhi_channel_ctx *channel)
return 0;
}
-static int ipa3_mhi_reset_dl_channel(struct ipa3_mhi_channel_ctx *channel)
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
{
int res;
IPA_MHI_FUNC_ENTRY();
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = ipa3_mhi_suspend_gsi_channel(channel);
- if (res) {
- IPAERR("ipa3_mhi_suspend_gsi_channel failed %d\n", res);
- return res;
- }
-
- res = ipa3_mhi_reset_gsi_channel(channel);
- if (res) {
- IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
- return res;
- }
- res = ipa3_disable_data_path(
- ipa3_get_ep_mapping(channel->ep->client));
- if (res) {
- IPA_MHI_ERR("ipa3_disable_data_path failed\n");
- return res;
- }
- } else {
- res = ipa3_disable_data_path(
- ipa3_get_ep_mapping(channel->ep->client));
- if (res) {
- IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
- return res;
- }
-
- res = ipa3_uc_mhi_reset_channel(channel->index);
- if (res) {
- IPA_MHI_ERR("ipa3_uc_mhi_reset_channel failed %d\n",
- res);
- ipa3_enable_data_path(
- ipa3_get_ep_mapping(channel->ep->client));
- return res;
- }
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_reset_channel(struct ipa3_mhi_channel_ctx *channel)
-{
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- if (IPA_CLIENT_IS_PROD(channel->ep->client))
- res = ipa3_mhi_reset_ul_channel(channel);
- else
- res = ipa3_mhi_reset_dl_channel(channel);
+ res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
if (res) {
- IPA_MHI_ERR("failed to reset channel error %d\n", res);
+ IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
return res;
}
-
- channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->state, channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed %d\n", res);
- return res;
- }
- }
-
IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa_mhi_start_uc_channel(struct ipa3_mhi_channel_ctx *channel,
- int ipa_ep_idx)
-{
- int res;
- struct ipa3_ep_context *ep;
-
- IPA_MHI_FUNC_ENTRY();
-
- ep = channel->ep;
- if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
- IPA_MHI_DBG("Initializing channel\n");
- res = ipa3_uc_mhi_init_channel(ipa_ep_idx, channel->index,
- channel->id, (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
- if (res) {
- IPA_MHI_ERR("init_channel failed %d\n", res);
- return res;
- }
- } else if (channel->state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
- if (channel->ep != ep) {
- IPA_MHI_ERR("previous channel client was %d\n",
- ep->client);
- return res;
- }
- IPA_MHI_DBG("Starting channel\n");
- res = ipa3_uc_mhi_resume_channel(channel->index, false);
- if (res) {
- IPA_MHI_ERR("init_channel failed %d\n", res);
- return res;
- }
- } else {
- IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
- return -EFAULT;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static void ipa_mhi_dump_ch_ctx(struct ipa3_mhi_channel_ctx *channel)
-{
- IPA_MHI_DBG_LOW("ch_id %d\n", channel->id);
- IPA_MHI_DBG_LOW("chstate 0x%x\n", channel->ch_ctx_host.chstate);
- IPA_MHI_DBG_LOW("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
- IPA_MHI_DBG_LOW("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
- IPA_MHI_DBG_LOW("chtype 0x%x\n", channel->ch_ctx_host.chtype);
- IPA_MHI_DBG_LOW("erindex 0x%x\n", channel->ch_ctx_host.erindex);
- IPA_MHI_DBG_LOW("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
- IPA_MHI_DBG_LOW("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
- IPA_MHI_DBG_LOW("rp 0x%llx\n", channel->ch_ctx_host.rp);
- IPA_MHI_DBG_LOW("wp 0x%llx\n", channel->ch_ctx_host.wp);
-}
-
-static void ipa_mhi_dump_ev_ctx(struct ipa3_mhi_channel_ctx *channel)
-{
- IPA_MHI_DBG_LOW("ch_id %d event id %d\n", channel->id,
- channel->ch_ctx_host.erindex);
-
- IPA_MHI_DBG_LOW("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
- IPA_MHI_DBG_LOW("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
- IPA_MHI_DBG_LOW("ertype 0x%x\n", channel->ev_ctx_host.ertype);
- IPA_MHI_DBG_LOW("msivec 0x%x\n", channel->ev_ctx_host.msivec);
- IPA_MHI_DBG_LOW("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
- IPA_MHI_DBG_LOW("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
- IPA_MHI_DBG_LOW("rp 0x%llx\n", channel->ev_ctx_host.rp);
- IPA_MHI_DBG_LOW("wp 0x%llx\n", channel->ev_ctx_host.wp);
-}
-
-static int ipa_mhi_read_ch_ctx(struct ipa3_mhi_channel_ctx *channel)
-{
- int res;
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
- &channel->ch_ctx_host, channel->channel_context_addr,
- sizeof(channel->ch_ctx_host));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed %d\n", res);
- return res;
-
- }
- ipa_mhi_dump_ch_ctx(channel);
-
- channel->event_context_addr = ipa3_mhi_ctx->event_context_array_addr +
- channel->ch_ctx_host.erindex * sizeof(struct ipa3_mhi_ev_ctx);
- IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
- channel->event_context_addr);
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
- &channel->ev_ctx_host, channel->event_context_addr,
- sizeof(channel->ev_ctx_host));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed %d\n", res);
- return res;
-
- }
- ipa_mhi_dump_ev_ctx(channel);
return 0;
}
-static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
+static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
+ struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
{
- struct ipa3_mhi_channel_ctx *channel = notify->user_data;
-
- IPAERR("channel id=%d client=%d state=%d\n",
- channel->id, channel->ep->client, channel->state);
- switch (notify->evt_id) {
- case GSI_EVT_OUT_OF_BUFFERS_ERR:
- IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
- break;
- case GSI_EVT_OUT_OF_RESOURCES_ERR:
- IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
- break;
- case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
- IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
- break;
- case GSI_EVT_EVT_RING_EMPTY_ERR:
- IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
- break;
- default:
- IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
- }
- IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
-}
-
-static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
-{
- struct ipa3_mhi_channel_ctx *channel = notify->chan_user_data;
-
- IPAERR("channel id=%d client=%d state=%d\n",
- channel->id, channel->ep->client, channel->state);
- switch (notify->evt_id) {
- case GSI_CHAN_INVALID_TRE_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
- break;
- case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
- break;
- case GSI_CHAN_OUT_OF_BUFFERS_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
- break;
- case GSI_CHAN_OUT_OF_RESOURCES_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
- break;
- case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
- break;
- case GSI_CHAN_HWO_1_ERR:
- IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
- break;
- default:
- IPAERR("Unexpected err evt: %d\n", notify->evt_id);
- }
- IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
-}
-
-static int ipa3_mhi_get_ch_poll_cfg(struct ipa3_mhi_channel_ctx *channel,
- int ring_size)
-{
- switch (channel->ch_ctx_host.pollcfg) {
+ switch (ch_ctx_host->pollcfg) {
case 0:
/*set default polling configuration according to MHI spec*/
- if (IPA_CLIENT_IS_PROD(channel->ep->client))
+ if (IPA_CLIENT_IS_PROD(client))
return 7;
else
return (ring_size/2)/8;
break;
default:
- return channel->ch_ctx_host.pollcfg;
+ return ch_ctx_host->pollcfg;
}
}
-static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
- int ipa_ep_idx)
+static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
+ int ipa_ep_idx, struct start_gsi_channel *params)
{
int res;
- struct ipa3_ep_context *ep;
struct gsi_evt_ring_props ev_props;
- struct ipa_mhi_msi_info msi;
+ struct ipa_mhi_msi_info *msi;
struct gsi_chan_props ch_props;
union __packed gsi_channel_scratch ch_scratch;
+ struct ipa3_ep_context *ep;
struct ipa_gsi_ep_config *ep_cfg;
IPA_MHI_FUNC_ENTRY();
- if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
- channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
- IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
- return -EFAULT;
- }
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
- msi = ipa3_mhi_ctx->msi;
- ep = channel->ep;
+ msi = params->msi;
ep_cfg = ipa_get_gsi_ep_info(ipa_ep_idx);
if (!ep_cfg) {
IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
return -EPERM;
}
- IPA_MHI_DBG("reading ch/ev context from host\n");
- res = ipa_mhi_read_ch_ctx(channel);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
- return res;
- }
/* allocate event ring only for the first time pipe is connected */
- if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+ if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
IPA_MHI_DBG("allocating event ring\n");
memset(&ev_props, 0, sizeof(ev_props));
ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
ev_props.intr = GSI_INTR_MSI;
ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
- ev_props.ring_len = channel->ev_ctx_host.rlen;
+ ev_props.ring_len = params->ev_ctx_host->rlen;
ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
- channel->ev_ctx_host.rbase);
- ev_props.int_modt = channel->ev_ctx_host.intmodt *
+ params->ev_ctx_host->rbase);
+ ev_props.int_modt = params->ev_ctx_host->intmodt *
IPA_SLEEP_CLK_RATE_KHZ;
- ev_props.int_modc = channel->ev_ctx_host.intmodc;
- ev_props.intvec = ((msi.data & ~msi.mask) |
- (channel->ev_ctx_host.msivec & msi.mask));
+ ev_props.int_modc = params->ev_ctx_host->intmodc;
+ ev_props.intvec = ((msi->data & ~msi->mask) |
+ (params->ev_ctx_host->msivec & msi->mask));
ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
- (((u64)msi.addr_hi << 32) | msi.addr_low));
+ (((u64)msi->addr_hi << 32) | msi->addr_low));
ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
- channel->event_context_addr +
- offsetof(struct ipa3_mhi_ev_ctx, rp));
+ params->event_context_addr +
+ offsetof(struct ipa_mhi_ev_ctx, rp));
ev_props.exclusive = true;
- ev_props.err_cb = ipa_mhi_gsi_ev_err_cb;
- ev_props.user_data = channel;
+ ev_props.err_cb = params->ev_err_cb;
+ ev_props.user_data = params->channel;
ev_props.evchid_valid = true;
- ev_props.evchid = channel->index + IPA_MHI_GSI_ER_START;
+ ev_props.evchid = params->evchid;
res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
- &channel->ep->gsi_evt_ring_hdl);
+ &ep->gsi_evt_ring_hdl);
if (res) {
IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
goto fail_alloc_evt;
return res;
}
-
- channel->cached_gsi_evt_ring_hdl =
- channel->ep->gsi_evt_ring_hdl;
+ IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
+ client,
+ ep->gsi_evt_ring_hdl);
+ *params->cached_gsi_evt_ring_hdl =
+ ep->gsi_evt_ring_hdl;
}
memset(&ch_props, 0, sizeof(ch_props));
ch_props.prot = GSI_CHAN_PROT_MHI;
- ch_props.dir = IPA_CLIENT_IS_PROD(ep->client) ?
+ ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
- ch_props.evt_ring_hdl = channel->cached_gsi_evt_ring_hdl;
+ ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
- ch_props.ring_len = channel->ch_ctx_host.rlen;
+ ch_props.ring_len = params->ch_ctx_host->rlen;
ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
- channel->ch_ctx_host.rbase);
+ params->ch_ctx_host->rbase);
ch_props.use_db_eng = GSI_CHAN_DB_MODE;
ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
ch_props.low_weight = 1;
- ch_props.err_cb = ipa_mhi_gsi_ch_err_cb;
- ch_props.chan_user_data = channel;
+ ch_props.err_cb = params->ch_err_cb;
+ ch_props.chan_user_data = params->channel;
res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
- &channel->ep->gsi_chan_hdl);
+ &ep->gsi_chan_hdl);
if (res) {
IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
res);
@@ -1696,36 +287,36 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
- channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, wp));
- ch_scratch.mhi.assert_bit40 = ipa3_mhi_ctx->assert_bit40;
+ params->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, wp));
+ ch_scratch.mhi.assert_bit40 = params->assert_bit40;
ch_scratch.mhi.max_outstanding_tre =
ep_cfg->ipa_if_tlv * ch_props.re_size;
ch_scratch.mhi.outstanding_threshold =
min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
ch_scratch.mhi.oob_mod_threshold = 4;
- if (channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
- channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+ if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+ params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
ch_scratch.mhi.burst_mode_enabled = true;
ch_scratch.mhi.polling_configuration =
- ipa3_mhi_get_ch_poll_cfg(channel,
+ ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
(ch_props.ring_len / ch_props.re_size));
ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
} else {
ch_scratch.mhi.burst_mode_enabled = false;
}
- res = gsi_write_channel_scratch(channel->ep->gsi_chan_hdl,
+ res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
ch_scratch);
if (res) {
IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
res);
goto fail_ch_scratch;
}
- channel->brstmode_enabled = ch_scratch.mhi.burst_mode_enabled;
- channel->ch_scratch.mhi = ch_scratch.mhi;
+
+ *params->mhi = ch_scratch.mhi;
IPA_MHI_DBG("Starting channel\n");
- res = gsi_start_channel(channel->ep->gsi_chan_hdl);
+ res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
goto fail_ch_start;
@@ -1736,157 +327,15 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
fail_ch_start:
fail_ch_scratch:
- gsi_dealloc_channel(channel->ep->gsi_chan_hdl);
+ gsi_dealloc_channel(ep->gsi_chan_hdl);
fail_alloc_ch:
- gsi_dealloc_evt_ring(channel->ep->gsi_evt_ring_hdl);
- channel->ep->gsi_evt_ring_hdl = ~0;
+ gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ ep->gsi_evt_ring_hdl = ~0;
fail_alloc_evt:
return res;
}
-/**
- * ipa3_mhi_init() - Initialize IPA MHI driver
- * @params: initialization params
- *
- * This function is called by MHI client driver on boot to initialize IPA MHI
- * Driver. When this function returns device can move to READY state.
- * This function is doing the following:
- * - Initialize MHI IPA internal data structures
- * - Create IPA RM resources
- * - Initialize debugfs
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa3_mhi_init(struct ipa_mhi_init_params *params)
-{
- int res;
- struct ipa_rm_create_params mhi_prod_params;
- struct ipa_rm_create_params mhi_cons_params;
-
- IPA_MHI_FUNC_ENTRY();
-
- if (!params) {
- IPA_MHI_ERR("null args\n");
- return -EINVAL;
- }
-
- if (!params->notify) {
- IPA_MHI_ERR("null notify function\n");
- return -EINVAL;
- }
-
- if (ipa3_mhi_ctx) {
- IPA_MHI_ERR("already initialized\n");
- return -EPERM;
- }
-
- IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
- params->msi.addr_low, params->msi.addr_hi);
- IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
- params->msi.data, params->msi.mask);
- IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
- IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
- IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
- IPA_MHI_DBG("notify = %pF priv = %p\n", params->notify, params->priv);
- IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
- IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
-
- /* Initialize context */
- ipa3_mhi_ctx = kzalloc(sizeof(*ipa3_mhi_ctx), GFP_KERNEL);
- if (!ipa3_mhi_ctx) {
- IPA_MHI_ERR("no memory\n");
- res = -EFAULT;
- goto fail_alloc_ctx;
- }
-
- ipa3_mhi_ctx->state = IPA_MHI_STATE_INITIALIZED;
- ipa3_mhi_ctx->msi = params->msi;
- ipa3_mhi_ctx->mmio_addr = params->mmio_addr;
- ipa3_mhi_ctx->first_ch_idx = params->first_ch_idx;
- ipa3_mhi_ctx->first_er_idx = params->first_er_idx;
- ipa3_mhi_ctx->cb_notify = params->notify;
- ipa3_mhi_ctx->cb_priv = params->priv;
- ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
- ipa3_mhi_ctx->qmi_req_id = 0;
- ipa3_mhi_ctx->use_ipadma = true;
- ipa3_mhi_ctx->assert_bit40 = !!params->assert_bit40;
- ipa3_mhi_ctx->test_mode = params->test_mode;
- init_completion(&ipa3_mhi_ctx->rm_prod_granted_comp);
- spin_lock_init(&ipa3_mhi_ctx->state_lock);
- init_completion(&ipa3_mhi_ctx->rm_cons_comp);
-
- ipa3_mhi_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
- if (!ipa3_mhi_ctx->wq) {
- IPA_MHI_ERR("failed to create workqueue\n");
- res = -EFAULT;
- goto fail_create_wq;
- }
-
- /* Create PROD in IPA RM */
- memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
- mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
- mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
- mhi_prod_params.reg_params.notify_cb = ipa3_mhi_rm_prod_notify;
- res = ipa_rm_create_resource(&mhi_prod_params);
- if (res) {
- IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
- goto fail_create_rm_prod;
- }
-
- /* Create CONS in IPA RM */
- memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
- mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
- mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
- mhi_cons_params.request_resource = ipa3_mhi_rm_cons_request;
- mhi_cons_params.release_resource = ipa3_mhi_rm_cons_release;
- res = ipa_rm_create_resource(&mhi_cons_params);
- if (res) {
- IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
- goto fail_create_rm_cons;
- }
-
- /* (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- * we need to move to READY state only after
- * HPS/DPS/GSI firmware are loaded.
- */
-
- /* Initialize uC interface */
- ipa3_uc_mhi_init(ipa3_mhi_uc_ready_cb,
- ipa3_mhi_uc_wakeup_request_cb);
- if (ipa3_uc_state_check() == 0)
- ipa3_mhi_set_state(IPA_MHI_STATE_READY);
-
- /* Initialize debugfs */
- ipa3_mhi_debugfs_init();
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-fail_create_rm_cons:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-fail_create_rm_prod:
- destroy_workqueue(ipa3_mhi_ctx->wq);
-fail_create_wq:
- kfree(ipa3_mhi_ctx);
- ipa3_mhi_ctx = NULL;
-fail_alloc_ctx:
- return res;
-}
-
-/**
- * ipa3_mhi_start() - Start IPA MHI engine
- * @params: pcie addresses for MHI
- *
- * This function is called by MHI client driver on MHI engine start for
- * handling MHI accelerated channels. This function is called after
- * ipa3_mhi_init() was called and can be called after MHI reset to restart MHI
- * engine. When this function returns device can move to M0 state.
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa3_mhi_start(struct ipa_mhi_start_params *params)
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
{
int res;
struct gsi_device_scratch gsi_scratch;
@@ -1899,131 +348,50 @@ int ipa3_mhi_start(struct ipa_mhi_start_params *params)
return -EINVAL;
}
- if (!ipa3_mhi_ctx) {
- IPA_MHI_ERR("not initialized\n");
- return -EPERM;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS &&
- ipa3_uc_state_check()) {
- IPA_MHI_ERR("IPA uc is not loaded\n");
- return -EAGAIN;
+ /* Initialize IPA MHI engine */
+ gsi_ep_info = ipa_get_gsi_ep_info(
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
+ if (!gsi_ep_info) {
+ IPAERR("MHI PROD has no ep allocated\n");
+ ipa_assert();
}
-
- res = ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);
+ memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+ gsi_scratch.mhi_base_chan_idx_valid = true;
+ gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
+ params->gsi.first_ch_idx;
+ res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+ &gsi_scratch);
if (res) {
- IPA_MHI_ERR("ipa3_mhi_set_state %d\n", res);
- return res;
- }
-
- ipa3_mhi_ctx->host_ctrl_addr = params->host_ctrl_addr;
- ipa3_mhi_ctx->host_data_addr = params->host_data_addr;
- ipa3_mhi_ctx->channel_context_array_addr =
- params->channel_context_array_addr;
- ipa3_mhi_ctx->event_context_array_addr =
- params->event_context_array_addr;
- IPADBG("host_ctrl_addr 0x%x\n", ipa3_mhi_ctx->host_ctrl_addr);
- IPADBG("host_data_addr 0x%x\n", ipa3_mhi_ctx->host_data_addr);
- IPADBG("channel_context_array_addr 0x%llx\n",
- ipa3_mhi_ctx->channel_context_array_addr);
- IPADBG("event_context_array_addr 0x%llx\n",
- ipa3_mhi_ctx->event_context_array_addr);
-
- /* Add MHI <-> Q6 dependencies to IPA RM */
- res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
- if (res && res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to add dependency %d\n", res);
- goto fail_add_mhi_q6_dep;
- }
-
- res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_MHI_CONS);
- if (res && res != -EINPROGRESS) {
- IPA_MHI_ERR("failed to add dependency %d\n", res);
- goto fail_add_q6_mhi_dep;
- }
-
- res = ipa3_mhi_request_prod();
- if (res) {
- IPA_MHI_ERR("failed request prod %d\n", res);
- goto fail_request_prod;
- }
-
- /* Initialize IPA MHI engine */
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- gsi_ep_info = ipa_get_gsi_ep_info(
- ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
- if (!gsi_ep_info) {
- IPAERR("MHI PROD has no ep allocated\n");
- BUG();
- }
- memset(&gsi_scratch, 0, sizeof(gsi_scratch));
- gsi_scratch.mhi_base_chan_idx_valid = true;
- gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
- ipa3_mhi_ctx->first_ch_idx;
- res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
- &gsi_scratch);
- if (res) {
- IPA_MHI_ERR("failed to write device scratch %d\n", res);
- goto fail_init_engine;
- }
- } else {
- res = ipa3_uc_mhi_init_engine(&ipa3_mhi_ctx->msi,
- ipa3_mhi_ctx->mmio_addr,
- ipa3_mhi_ctx->host_ctrl_addr,
- ipa3_mhi_ctx->host_data_addr,
- ipa3_mhi_ctx->first_ch_idx,
- ipa3_mhi_ctx->first_er_idx);
- if (res) {
- IPA_MHI_ERR("failed to start MHI engine %d\n", res);
- goto fail_init_engine;
- }
-
- /* Update UL/DL sync if valid */
- res = ipa3_uc_mhi_send_dl_ul_sync_info(
- ipa3_cached_dl_ul_sync_info);
- if (res) {
- IPA_MHI_ERR("failed to update ul/dl sync %d\n", res);
- goto fail_init_engine;
- }
+ IPA_MHI_ERR("failed to write device scratch %d\n", res);
+ goto fail_init_engine;
}
IPA_MHI_FUNC_EXIT();
return 0;
fail_init_engine:
- ipa3_mhi_release_prod();
-fail_request_prod:
- ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_MHI_CONS);
-fail_add_q6_mhi_dep:
- ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
-fail_add_mhi_q6_dep:
- ipa3_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
return res;
}
/**
- * ipa3_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel start.
+ * This function is called by IPA MHI client driver on MHI channel start.
* This function is called after MHI engine was started.
*
* Return codes: 0 : success
* negative : error
*/
-int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
{
struct ipa3_ep_context *ep;
int ipa_ep_idx;
int res;
- struct ipa3_mhi_channel_ctx *channel = NULL;
- unsigned long flags;
+ enum ipa_client_type client;
IPA_MHI_FUNC_ENTRY();
@@ -2032,27 +400,8 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
return -EINVAL;
}
- if (in->sys.client >= IPA_CLIENT_MAX) {
- IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
- return -EINVAL;
- }
-
- if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
- IPA_MHI_ERR("Invalid MHI client, client: %d\n", in->sys.client);
- return -EINVAL;
- }
-
- IPA_MHI_DBG("channel=%d\n", in->channel_id);
-
- spin_lock_irqsave(&ipa3_mhi_ctx->state_lock, flags);
- if (!ipa3_mhi_ctx || ipa3_mhi_ctx->state != IPA_MHI_STATE_STARTED) {
- IPA_MHI_ERR("IPA MHI was not started\n");
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&ipa3_mhi_ctx->state_lock, flags);
-
- ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+ client = in->sys->client;
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx == -1) {
IPA_MHI_ERR("Invalid client.\n");
return -EINVAL;
@@ -2067,70 +416,29 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
ep->valid = 1;
- ep->skip_ep_cfg = in->sys.skip_ep_cfg;
- ep->client = in->sys.client;
- ep->client_notify = in->sys.notify;
- ep->priv = in->sys.priv;
- ep->keep_ipa_awake = in->sys.keep_ipa_awake;
-
- channel = ipa3_mhi_get_channel_context(ep,
- in->channel_id);
- if (!channel) {
- IPA_MHI_ERR("ipa3_mhi_get_channel_context failed\n");
- res = -EINVAL;
- goto fail_init_channel;
- }
-
- channel->channel_context_addr =
- ipa3_mhi_ctx->channel_context_array_addr +
- channel->id * sizeof(struct ipa3_mhi_ch_ctx);
-
- /* for event context address index needs to read from host */
-
- IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
- channel->ep->client, channel->index, channel->id);
- IPA_MHI_DBG("channel_context_addr 0x%llx\n",
- channel->channel_context_addr);
-
- IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+ ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+ ep->client = client;
+ ep->client_notify = in->sys->notify;
+ ep->priv = in->sys->priv;
+ ep->keep_ipa_awake = in->sys->keep_ipa_awake;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = ipa_mhi_start_gsi_channel(channel, ipa_ep_idx);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
- res);
- goto fail_start_channel;
- }
- channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->state, channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed\n");
- return res;
-
- }
- } else {
- res = ipa_mhi_start_uc_channel(channel, ipa_ep_idx);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_start_uc_channel failed %d\n",
- res);
- goto fail_start_channel;
- }
- channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ res = ipa_mhi_start_gsi_channel(client,
+ ipa_ep_idx, &in->start.gsi);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
+ res);
+ goto fail_start_channel;
}
res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
ipa_ep_idx);
- goto fail_enable_dp;
+ goto fail_ep_cfg;
}
if (!ep->skip_ep_cfg) {
- if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto fail_ep_cfg;
}
@@ -2145,14 +453,11 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
-
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
- IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
+ IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
ipa_ep_idx);
IPA_MHI_FUNC_EXIT();
@@ -2161,21 +466,17 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
fail_ep_cfg:
ipa3_disable_data_path(ipa_ep_idx);
-fail_enable_dp:
- ipa3_mhi_reset_channel(channel);
fail_start_channel:
- IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
-fail_init_channel:
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
return -EPERM;
}
/**
- * ipa3_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
* MHI channel
* @clnt_hdl: client handle for this pipe
*
- * This function is called by MHI client driver on MHI channel reset.
+ * This function is called by IPA MHI client driver on MHI channel reset.
* This function is called after MHI channel was started.
* This function is doing the following:
* - Send command to uC/GSI to reset corresponding MHI channel
@@ -2184,10 +485,9 @@ fail_init_channel:
* Return codes: 0 : success
* negative : error
*/
-int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
{
struct ipa3_ep_context *ep;
- static struct ipa3_mhi_channel_ctx *channel;
int res;
IPA_MHI_FUNC_ENTRY();
@@ -2202,34 +502,10 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
return -EINVAL;
}
- if (!ipa3_mhi_ctx) {
- IPA_MHI_ERR("IPA MHI was not initialized\n");
- return -EINVAL;
- }
-
- if (!IPA_CLIENT_IS_MHI(ipa3_ctx->ep[clnt_hdl].client)) {
- IPAERR("invalid IPA MHI client, client: %d\n",
- ipa3_ctx->ep[clnt_hdl].client);
- return -EINVAL;
- }
-
- channel = ipa3_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
- if (!channel) {
- IPAERR("invalid clnt index\n");
- return -EINVAL;
- }
ep = &ipa3_ctx->ep[clnt_hdl];
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- res = ipa3_mhi_reset_channel(channel);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_reset_channel failed %d\n", res);
- goto fail_reset_channel;
- }
-
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = gsi_dealloc_channel(channel->ep->gsi_chan_hdl);
+ res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (res) {
IPAERR("gsi_dealloc_channel failed %d\n", res);
goto fail_reset_channel;
@@ -2238,838 +514,133 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
ep->valid = 0;
ipa3_delete_dflt_flt_rules(clnt_hdl);
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
return 0;
fail_reset_channel:
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
-
-static int ipa3_mhi_suspend_ul_channels(void)
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
{
- int i;
int res;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_RUN)
- continue;
- IPA_MHI_DBG_LOW("suspending channel %d\n",
- ipa3_mhi_ctx->ul_channels[i].id);
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- res = ipa3_mhi_suspend_gsi_channel(
- &ipa3_mhi_ctx->ul_channels[i]);
- else
- res = ipa3_uc_mhi_suspend_channel(
- ipa3_mhi_ctx->ul_channels[i].index);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa3_mhi_ctx->ul_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected)
-{
- int i;
- int res;
- struct ipa3_mhi_channel_ctx *channel;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- channel = &ipa3_mhi_ctx->ul_channels[i];
- IPA_MHI_DBG_LOW("resuming channel %d\n", channel->id);
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- if (channel->brstmode_enabled &&
- !LPTransitionRejected) {
- /*
- * set polling mode bit to DB mode before
- * resuming the channel
- */
- res = gsi_write_channel_scratch(
- channel->ep->gsi_chan_hdl,
- channel->ch_scratch);
- if (res) {
- IPA_MHI_ERR("write ch scratch fail %d\n"
- , res);
- return res;
- }
- }
- res = gsi_start_channel(channel->ep->gsi_chan_hdl);
- } else {
- res = ipa3_uc_mhi_resume_channel(channel->index,
- LPTransitionRejected);
- }
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (brstmode_enabled && !LPTransitionRejected) {
+ /*
+ * set polling mode bit to DB mode before
+ * resuming the channel
+ */
+ res = gsi_write_channel_scratch(
+ ep->gsi_chan_hdl, ch_scratch);
if (res) {
- IPA_MHI_ERR("failed to resume channel %d error %d\n",
- i, res);
+ IPA_MHI_ERR("write ch scratch fail %d\n"
+ , res);
return res;
}
-
- channel->stop_in_proc = false;
- channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
}
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_stop_event_update_ul_channels(void)
-{
- int i;
- int res;
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- return 0;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->ul_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->ul_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG_LOW("stop update event channel %d\n",
- ipa3_mhi_ctx->ul_channels[i].id);
- res = ipa3_uc_mhi_stop_event_update_channel(
- ipa3_mhi_ctx->ul_channels[i].index);
- if (res) {
- IPA_MHI_ERR("failed stop event channel %d error %d\n",
- i, res);
- return res;
- }
+ if (res) {
+ IPA_MHI_ERR("failed to resume channel error %d\n",
+ res);
+ return res;
}
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
-
-static int ipa3_mhi_suspend_dl_channels(void)
-{
- int i;
- int res;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_RUN)
- continue;
- IPA_MHI_DBG_LOW("suspending channel %d\n",
- ipa3_mhi_ctx->dl_channels[i].id);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- res = ipa3_mhi_suspend_gsi_channel(
- &ipa3_mhi_ctx->dl_channels[i]);
- else
- res = ipa3_uc_mhi_suspend_channel(
- ipa3_mhi_ctx->dl_channels[i].index);
- if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
- return res;
- }
- ipa3_mhi_ctx->dl_channels[i].state =
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPA_MHI_ERR("failed to resume channel error %d\n", res);
+ return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
-static int ipa3_mhi_resume_dl_channels(bool LPTransitionRejected)
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info)
{
- int i;
+ int ipa_ep_idx;
int res;
- struct ipa3_mhi_channel_ctx *channel;
+ struct ipa3_ep_context *ep;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- channel = &ipa3_mhi_ctx->dl_channels[i];
- IPA_MHI_DBG_LOW("resuming channel %d\n", channel->id);
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- if (channel->brstmode_enabled &&
- !LPTransitionRejected) {
- /*
- * set polling mode bit to DB mode before
- * resuming the channel
- */
- res = gsi_write_channel_scratch(
- channel->ep->gsi_chan_hdl,
- channel->ch_scratch);
- if (res) {
- IPA_MHI_ERR("write ch scratch fail %d\n"
- , res);
- return res;
- }
- }
- res = gsi_start_channel(channel->ep->gsi_chan_hdl);
- } else {
- res = ipa3_uc_mhi_resume_channel(channel->index,
- LPTransitionRejected);
- }
- if (res) {
- IPA_MHI_ERR("failed to resume channel %d error %d\n",
- i, res);
- return res;
- }
- channel->stop_in_proc = false;
- channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
- }
- IPA_MHI_FUNC_EXIT();
- return 0;
-}
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
-static int ipa3_mhi_stop_event_update_dl_channels(void)
-{
- int i;
- int res;
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- return 0;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- if (!ipa3_mhi_ctx->dl_channels[i].valid)
- continue;
- if (ipa3_mhi_ctx->dl_channels[i].state !=
- IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
- continue;
- IPA_MHI_DBG_LOW("stop update event channel %d\n",
- ipa3_mhi_ctx->dl_channels[i].id);
- res = ipa3_uc_mhi_stop_event_update_channel(
- ipa3_mhi_ctx->dl_channels[i].index);
- if (res) {
- IPA_MHI_ERR("failed stop event channel %d error %d\n",
- i, res);
- return res;
- }
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
+ if (res) {
+ IPAERR("gsi_query_channel_info failed\n");
+ return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
-static bool ipa3_mhi_check_pending_packets_from_host(void)
-{
- int i;
- int res;
- struct ipa3_mhi_channel_ctx *channel;
-
- IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->ul_channels[i];
- if (!channel->valid)
- continue;
-
- res = gsi_query_channel_info(channel->ep->gsi_chan_hdl,
- &channel->ch_info);
- if (res) {
- IPAERR("gsi_query_channel_info failed\n");
- return true;
- }
- res = ipa_mhi_read_ch_ctx(channel);
- if (res) {
- IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
- return true;
- }
-
- if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
- IPA_MHI_DBG("There are pending packets from host\n");
- IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
- channel->ch_info.rp, channel->ch_ctx_host.wp);
-
- return true;
- }
- }
-
- IPA_MHI_FUNC_EXIT();
- return false;
-}
-
-static void ipa3_mhi_update_host_ch_state(bool update_rp)
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
{
- int i;
- int res;
- struct ipa3_mhi_channel_ctx *channel;
-
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->ul_channels[i];
- if (!channel->valid)
- continue;
-
- if (update_rp) {
- res = gsi_query_channel_info(channel->ep->gsi_chan_hdl,
- &channel->ch_info);
- if (res) {
- IPAERR("gsi_query_channel_info failed\n");
- BUG();
- return;
- }
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->ch_info.rp,
- channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, rp),
- sizeof(channel->ch_info.rp));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed\n");
- BUG();
- return;
- }
- }
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->state, channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed\n");
- BUG();
- return;
- }
- }
-
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->dl_channels[i];
- if (!channel->valid)
- continue;
-
- if (update_rp) {
- res = gsi_query_channel_info(channel->ep->gsi_chan_hdl,
- &channel->ch_info);
- if (res) {
- IPAERR("gsi_query_channel_info failed\n");
- BUG();
- return;
- }
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->ch_info.rp,
- channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, rp),
- sizeof(channel->ch_info.rp));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed\n");
- BUG();
- return;
- }
- }
-
- res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
- &channel->state, channel->channel_context_addr +
- offsetof(struct ipa3_mhi_ch_ctx, chstate),
- sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate));
- if (res) {
- IPAERR("ipa_mhi_read_write_host failed\n");
- BUG();
- }
- }
-}
-
-static bool ipa3_mhi_has_open_aggr_frame(void)
-{
- struct ipa3_mhi_channel_ctx *channel;
u32 aggr_state_active;
- int i;
int ipa_ep_idx;
aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
- for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
- channel = &ipa3_mhi_ctx->dl_channels[i];
-
- if (!channel->valid)
- continue;
-
- ipa_ep_idx = ipa_get_ep_mapping(channel->ep->client);
- if (ipa_ep_idx == -1) {
- BUG();
- return false;
- }
-
- if ((1 << ipa_ep_idx) & aggr_state_active)
- return true;
- }
-
- return false;
-}
-
-
-/**
- * ipa3_mhi_suspend() - Suspend MHI accelerated channels
- * @force:
- * false: in case of data pending in IPA, MHI channels will not be
- * suspended and function will fail.
- * true: in case of data pending in IPA, make sure no further access from
- * IPA to PCIe is possible. In this case suspend cannot fail.
- *
- * This function is called by MHI client driver on MHI suspend.
- * This function is called after MHI channel was started.
- * When this function returns device can move to M1/M2/M3/D3cold state.
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa3_mhi_suspend(bool force)
-{
- int res;
- bool empty;
- bool force_clear = false;
-
- IPA_MHI_FUNC_ENTRY();
-
- res = ipa3_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_set_state failed %d\n", res);
- return res;
- }
-
- res = ipa3_mhi_suspend_ul_channels();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_suspend_ul_channels failed %d\n", res);
- goto fail_suspend_ul_channel;
- }
-
- empty = ipa3_mhi_wait_for_ul_empty_timeout(
- IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
-
- if (!empty) {
- if (force) {
- res = ipa3_mhi_enable_force_clear(
- ipa3_mhi_ctx->qmi_req_id, false);
- if (res) {
- IPA_MHI_ERR("failed to enable force clear\n");
- BUG();
- return res;
- }
- force_clear = true;
- IPA_MHI_DBG_LOW("force clear datapath enabled\n");
-
- empty = ipa3_mhi_wait_for_ul_empty_timeout(
- IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
- IPADBG("empty=%d\n", empty);
- if (!empty && ipa3_ctx->transport_prototype
- == IPA_TRANSPORT_TYPE_GSI) {
- IPA_MHI_ERR("Failed to suspend UL channels\n");
- if (ipa3_mhi_ctx->test_mode) {
- res = -EAGAIN;
- goto fail_suspend_ul_channel;
- }
-
- BUG();
- }
- } else {
- IPA_MHI_DBG("IPA not empty\n");
- res = -EAGAIN;
- goto fail_suspend_ul_channel;
- }
- }
-
- if (force_clear) {
- res = ipa3_mhi_disable_force_clear(ipa3_mhi_ctx->qmi_req_id);
- if (res) {
- IPA_MHI_ERR("failed to disable force clear\n");
- BUG();
- return res;
- }
- IPA_MHI_DBG_LOW("force clear datapath disabled\n");
- ipa3_mhi_ctx->qmi_req_id++;
- }
-
- if (!force && ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- if (ipa3_mhi_check_pending_packets_from_host()) {
- res = -EAGAIN;
- goto fail_suspend_ul_channel;
- }
- }
-
- res = ipa3_mhi_stop_event_update_ul_channels();
- if (res) {
- IPA_MHI_ERR(
- "ipa3_mhi_stop_event_update_ul_channels failed %d\n",
- res);
- goto fail_suspend_ul_channel;
- }
-
- /*
- * hold IPA clocks and release them after all
- * IPA RM resource are released to make sure tag process will not start
- */
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
- IPA_MHI_DBG_LOW("release prod\n");
- res = ipa3_mhi_release_prod();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_release_prod failed %d\n", res);
- goto fail_release_prod;
- }
-
- IPA_MHI_DBG_LOW("wait for cons release\n");
- res = ipa3_mhi_wait_for_cons_release();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_wait_for_cons_release failed %d\n", res);
- goto fail_release_cons;
- }
-
- usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
-
- res = ipa3_mhi_suspend_dl_channels();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_suspend_dl_channels failed %d\n", res);
- goto fail_suspend_dl_channel;
- }
-
- res = ipa3_mhi_stop_event_update_dl_channels();
- if (res) {
- IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
- goto fail_stop_event_update_dl_channel;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- if (ipa3_mhi_has_open_aggr_frame()) {
- IPA_MHI_DBG("There is an open aggr frame\n");
- if (force) {
- ipa3_mhi_ctx->trigger_wakeup = true;
- } else {
- res = -EAGAIN;
- goto fail_stop_event_update_dl_channel;
- }
- }
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- ipa3_mhi_update_host_ch_state(true);
-
- if (!empty)
- ipa3_ctx->tag_process_before_gating = false;
-
- res = ipa3_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_set_state failed %d\n", res);
- goto fail_release_cons;
+ ipa_ep_idx = ipa_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ ipa_assert();
+ return false;
}
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- IPA_MHI_FUNC_EXIT();
- return 0;
+ if ((1 << ipa_ep_idx) & aggr_state_active)
+ return true;
-fail_stop_event_update_dl_channel:
- ipa3_mhi_resume_dl_channels(true);
-fail_suspend_dl_channel:
-fail_release_cons:
- ipa3_mhi_request_prod();
-fail_release_prod:
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-fail_suspend_ul_channel:
- ipa3_mhi_resume_ul_channels(true);
- ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);
- if (force_clear) {
- if (ipa3_mhi_disable_force_clear(ipa3_mhi_ctx->qmi_req_id)) {
- IPA_MHI_ERR("failed to disable force clear\n");
- BUG();
- }
- IPA_MHI_DBG_LOW("force clear datapath disabled\n");
- ipa3_mhi_ctx->qmi_req_id++;
- }
- return res;
+ return false;
}
-/**
- * ipa3_mhi_resume() - Resume MHI accelerated channels
- *
- * This function is called by MHI client driver on MHI resume.
- * This function is called after MHI channel was suspended.
- * When this function returns device can move to M0 state.
- * This function is doing the following:
- * - Send command to uC/GSI to resume corresponding MHI channel
- * - Request MHI_PROD in IPA RM
- * - Resume data to IPA
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa3_mhi_resume(void)
+int ipa3_mhi_destroy_channel(enum ipa_client_type client)
{
int res;
- bool dl_channel_resumed = false;
-
- IPA_MHI_FUNC_ENTRY();
-
- res = ipa3_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_set_state failed %d\n", res);
- return res;
- }
-
- if (ipa3_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
- /* resume all DL channels */
- res = ipa3_mhi_resume_dl_channels(false);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_resume_dl_channels failed %d\n",
- res);
- goto fail_resume_dl_channels;
- }
- dl_channel_resumed = true;
-
- ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
- IPA_RM_RESOURCE_MHI_CONS);
- ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
- }
-
- res = ipa3_mhi_request_prod();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_request_prod failed %d\n", res);
- goto fail_request_prod;
- }
-
- /* resume all UL channels */
- res = ipa3_mhi_resume_ul_channels(false);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_resume_ul_channels failed %d\n", res);
- goto fail_resume_ul_channels;
- }
-
- if (!dl_channel_resumed) {
- res = ipa3_mhi_resume_dl_channels(false);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_resume_dl_channels failed %d\n",
- res);
- goto fail_resume_dl_channels2;
- }
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- ipa3_mhi_update_host_ch_state(false);
-
- res = ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_set_state failed %d\n", res);
- goto fail_set_state;
- }
-
- IPA_MHI_FUNC_EXIT();
- return 0;
-
-fail_set_state:
- ipa3_mhi_suspend_dl_channels();
-fail_resume_dl_channels2:
- ipa3_mhi_suspend_ul_channels();
-fail_resume_ul_channels:
- ipa3_mhi_release_prod();
-fail_request_prod:
- ipa3_mhi_suspend_dl_channels();
-fail_resume_dl_channels:
- ipa3_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
- return res;
-}
-
-static int ipa3_mhi_destroy_channels(struct ipa3_mhi_channel_ctx *channels,
- int num_of_channels)
-{
- struct ipa3_mhi_channel_ctx *channel;
- int i, res;
- u32 clnt_hdl;
-
- for (i = 0; i < num_of_channels; i++) {
- channel = &channels[i];
- if (!channel->valid)
- continue;
- if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
- continue;
- if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
- clnt_hdl = ipa3_get_ep_mapping(channel->ep->client);
- IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
- res = ipa3_mhi_disconnect_pipe(clnt_hdl);
- if (res) {
- IPAERR("failed to disconnect pipe %d, err %d\n",
- clnt_hdl, res);
- goto fail;
- }
- }
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
- channel->ep->gsi_evt_ring_hdl, clnt_hdl);
- res = gsi_reset_evt_ring(channel->ep->gsi_evt_ring_hdl);
- if (res) {
- IPAERR(" failed to reset evt ring %lu, err %d\n"
- , channel->ep->gsi_evt_ring_hdl, res);
- goto fail;
- }
- res = gsi_dealloc_evt_ring(
- channel->ep->gsi_evt_ring_hdl);
- if (res) {
- IPAERR("dealloc evt ring %lu failed, err %d\n"
- , channel->ep->gsi_evt_ring_hdl, res);
- goto fail;
- }
- }
- }
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
- return 0;
-fail:
- return res;
-}
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
-/**
- * ipa3_mhi_destroy() - Destroy MHI IPA
- *
- * This function is called by MHI client driver on MHI reset to destroy all IPA
- * MHI resources.
- * When this function returns ipa_mhi can re-initialize.
- */
-void ipa3_mhi_destroy(void)
-{
- int res;
+ IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
+ ep->gsi_evt_ring_hdl, ipa_ep_idx);
- IPA_MHI_FUNC_ENTRY();
- if (!ipa3_mhi_ctx) {
- IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
- return;
- }
- /* reset all UL and DL acc channels and its accociated event rings */
- res = ipa3_mhi_destroy_channels(ipa3_mhi_ctx->ul_channels,
- IPA_MHI_MAX_UL_CHANNELS);
+ res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (res) {
- IPAERR("ipa3_mhi_destroy_channels(ul_channels) failed %d\n",
- res);
- goto fail;
- }
- IPA_MHI_DBG("All UL channels are disconnected\n");
-
- res = ipa3_mhi_destroy_channels(ipa3_mhi_ctx->dl_channels,
- IPA_MHI_MAX_DL_CHANNELS);
- if (res) {
- IPAERR("ipa3_mhi_destroy_channels(dl_channels) failed %d\n",
- res);
+ IPAERR(" failed to reset evt ring %lu, err %d\n"
+ , ep->gsi_evt_ring_hdl, res);
goto fail;
}
- IPA_MHI_DBG("All DL channels are disconnected\n");
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
- IPA_MHI_DBG("cleanup uC MHI\n");
- ipa3_uc_mhi_cleanup();
- }
+ IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
+ ep->gsi_evt_ring_hdl, ipa_ep_idx);
- if (ipa3_mhi_ctx->state != IPA_MHI_STATE_INITIALIZED &&
- ipa3_mhi_ctx->state != IPA_MHI_STATE_READY) {
- IPA_MHI_DBG("release prod\n");
- res = ipa3_mhi_release_prod();
- if (res) {
- IPA_MHI_ERR("ipa3_mhi_release_prod failed %d\n", res);
- goto fail;
- }
- IPA_MHI_DBG("wait for cons release\n");
- res = ipa3_mhi_wait_for_cons_release();
- if (res) {
- IPAERR("ipa3_mhi_wait_for_cons_release failed %d\n",
- res);
- goto fail;
- }
- usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
- IPA_MHI_SUSPEND_SLEEP_MAX);
-
- IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
- res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_MHI_CONS);
- if (res) {
- IPAERR("Error deleting dependency %d->%d, res=%d\n",
- IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS, res);
- goto fail;
- }
- IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
- res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
- if (res) {
- IPAERR("Error deleting dependency %d->%d, res=%d\n",
- IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS, res);
- goto fail;
- }
- }
-
- res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
- if (res) {
- IPAERR("Error deleting resource %d, res=%d\n",
- IPA_RM_RESOURCE_MHI_PROD, res);
- goto fail;
- }
-
- res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+ res = gsi_dealloc_evt_ring(
+ ep->gsi_evt_ring_hdl);
if (res) {
- IPAERR("Error deleting resource %d, res=%d\n",
- IPA_RM_RESOURCE_MHI_CONS, res);
+ IPAERR("dealloc evt ring %lu failed, err %d\n"
+ , ep->gsi_evt_ring_hdl, res);
goto fail;
}
- ipa3_mhi_debugfs_destroy();
- destroy_workqueue(ipa3_mhi_ctx->wq);
- kfree(ipa3_mhi_ctx);
- ipa3_mhi_ctx = NULL;
- IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
-
- IPA_MHI_FUNC_EXIT();
- return;
-fail:
- BUG();
- return;
-}
-
-/**
- * ipa3_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
- *
- * This function is called by by IPA QMI service to indicate that IPA CONFIG
- * message was sent from modem. IPA MHI will update this information to IPA uC
- * or will cache it until IPA MHI will be initialized.
- *
- * Return codes: 0 : success
- * negative : error
- */
-int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
-{
- IPA_MHI_FUNC_ENTRY();
-
- if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
- ipa3_mhi_cache_dl_ul_sync_info(config_req);
- if (ipa3_mhi_ctx &&
- ipa3_mhi_ctx->state != IPA_MHI_STATE_INITIALIZED)
- ipa3_uc_mhi_send_dl_ul_sync_info(
- ipa3_cached_dl_ul_sync_info);
- }
-
- IPA_MHI_FUNC_EXIT();
return 0;
+fail:
+ return res;
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 6444dcf3cfdc..ce399c2beb95 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -219,7 +219,7 @@ static int handle_ipa_config_req(void *req_h, void *req)
memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
IPAWANDBG("Received IPA CONFIG Request\n");
- rc = ipa3_mhi_handle_ipa_config_req(
+ rc = ipa_mhi_handle_ipa_config_req(
(struct ipa_config_req_msg_v01 *)req);
if (rc) {
IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index b70fd03d492e..0e3cecb5e4a8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -149,7 +149,7 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
{
struct ipa3_rt_tbl_set *set;
struct ipa3_rt_tbl *tbl;
- struct ipa3_mem_buffer tbl_mem;
+ struct ipa_mem_buffer tbl_mem;
u8 *tbl_mem_buf;
struct ipa3_rt_entry *entry;
int res;
@@ -314,7 +314,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
* Return: 0 on success, negative on failure
*/
static int ipa_alloc_init_rt_tbl_hdr(enum ipa_ip_type ip,
- struct ipa3_mem_buffer *hash_hdr, struct ipa3_mem_buffer *nhash_hdr)
+ struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr)
{
int num_index;
u64 *hash_entr;
@@ -483,8 +483,8 @@ static void ipa_get_rt_tbl_lcl_bdy_size(enum ipa_ip_type ip,
* Return: 0 on success, negative on failure
*/
static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
- struct ipa3_mem_buffer *hash_hdr, struct ipa3_mem_buffer *nhash_hdr,
- struct ipa3_mem_buffer *hash_bdy, struct ipa3_mem_buffer *nhash_bdy)
+ struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr,
+ struct ipa_mem_buffer *hash_bdy, struct ipa_mem_buffer *nhash_bdy)
{
u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
u32 apps_start_idx;
@@ -604,7 +604,7 @@ no_rt_tbls:
* Return: true if enough space available or false in other cases
*/
static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
- enum ipa_rule_type rlt, struct ipa3_mem_buffer *bdy)
+ enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
{
u16 avail;
@@ -639,8 +639,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
struct ipahal_imm_cmd_pyld *cmd_pyld[5];
int num_cmd = 0;
- struct ipa3_mem_buffer hash_bdy, nhash_bdy;
- struct ipa3_mem_buffer hash_hdr, nhash_hdr;
+ struct ipa_mem_buffer hash_bdy, nhash_bdy;
+ struct ipa_mem_buffer hash_hdr, nhash_hdr;
u32 num_modem_rt_index;
int rc = 0;
u32 lcl_hash_hdr, lcl_nhash_hdr;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 7bc11a339633..cb47773e8a39 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -960,7 +960,7 @@ void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
{
int res;
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct IpaHwMemCopyData_t *cmd;
IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
index ef884837afb0..7949d91bd3a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -622,7 +622,7 @@ int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
u32 first_evt_idx)
{
int res;
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
struct IpaHwMhiInitCmdData_t *init_cmd_data;
struct IpaHwMhiMsiCmdData_t *msi_cmd;
@@ -887,7 +887,7 @@ disable_clks:
return res;
}
-int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
{
int res;
@@ -897,13 +897,14 @@ int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
}
IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
- cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal);
+ cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
- cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);
+ cmd->params.ulMsiEventThreshold,
+ cmd->params.dlMsiEventThreshold);
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- res = ipa3_uc_send_cmd(cmd.raw32b,
+ res = ipa3_uc_send_cmd(cmd->raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 9f4f3a6ae62d..393ae2a41abb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -735,7 +735,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
int ipa_ep_idx;
int result = -EFAULT;
struct ipa3_ep_context *ep;
- struct ipa3_mem_buffer cmd;
+ struct ipa_mem_buffer cmd;
struct IpaHwWdiTxSetUpCmdData_t *tx;
struct IpaHwWdiRxSetUpCmdData_t *rx;
struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5574ef88398b..34d0ad53b469 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4455,6 +4455,16 @@ static void *ipa3_get_ipc_logbuf_low(void)
return NULL;
}
+static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ *holb = ipa3_ctx->ep[ep_idx].holb;
+}
+
+static void ipa3_set_tag_process_before_gating(bool val)
+{
+ ipa3_ctx->tag_process_before_gating = val;
+}
+
int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
struct ipa_api_controller *api_ctrl)
{
@@ -4477,6 +4487,9 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
+ api_ctrl->ipa_get_holb = ipa3_get_holb;
+ api_ctrl->ipa_set_tag_process_before_gating =
+ ipa3_set_tag_process_before_gating;
api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
@@ -4548,13 +4561,32 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
- api_ctrl->ipa_mhi_init = ipa3_mhi_init;
- api_ctrl->ipa_mhi_start = ipa3_mhi_start;
- api_ctrl->ipa_mhi_connect_pipe = ipa3_mhi_connect_pipe;
- api_ctrl->ipa_mhi_disconnect_pipe = ipa3_mhi_disconnect_pipe;
- api_ctrl->ipa_mhi_suspend = ipa3_mhi_suspend;
- api_ctrl->ipa_mhi_resume = ipa3_mhi_resume;
- api_ctrl->ipa_mhi_destroy = ipa3_mhi_destroy;
+ api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
+ api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
+ api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
+ api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
+ api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
+ api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+ ipa3_qmi_enable_force_clear_datapath_send;
+ api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+ ipa3_qmi_disable_force_clear_datapath_send;
+ api_ctrl->ipa_mhi_reset_channel_internal =
+ ipa3_mhi_reset_channel_internal;
+ api_ctrl->ipa_mhi_start_channel_internal =
+ ipa3_mhi_start_channel_internal;
+ api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
+ api_ctrl->ipa_mhi_resume_channels_internal =
+ ipa3_mhi_resume_channels_internal;
+ api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
+ api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
+ api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+ ipa3_uc_mhi_send_dl_ul_sync_info;
+ api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
+ api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
+ api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+ ipa3_uc_mhi_stop_event_update_channel;
+ api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
+ api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
@@ -4822,7 +4854,7 @@ void ipa3_suspend_apps_pipes(bool suspend)
*/
int ipa3_inject_dma_task_for_gsi(void)
{
- static struct ipa3_mem_buffer mem = {0};
+ static struct ipa_mem_buffer mem = {0};
struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
static struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipa3_desc desc = {0};
@@ -4879,7 +4911,7 @@ int ipa3_inject_dma_task_for_gsi(void)
*/
int ipa3_stop_gsi_channel(u32 clnt_hdl)
{
- struct ipa3_mem_buffer mem;
+ struct ipa_mem_buffer mem;
int res = 0;
int i;
struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 142263be23aa..d131fdc3046c 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -26,7 +26,7 @@
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/msm_ep_pcie.h>
-#include <linux/ipa.h>
+#include <linux/ipa_mhi.h>
#include <linux/vmalloc.h>
#include "mhi.h"
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
index 12a4fb229922..4456f9b35a14 100644
--- a/drivers/platform/msm/mhi_dev/mhi_sm.c
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -16,7 +16,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
-#include <linux/ipa.h>
+#include <linux/ipa_mhi.h>
#include "mhi_hwio.h"
#include "mhi_sm.h"
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index ca43e476b7f3..7d5b03b654cf 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -530,6 +530,15 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* disable Type-C factory mode */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ FACTORY_MODE_DETECTION_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't disable Type-C factory mode rc=%d\n", rc);
+ return rc;
+ }
+
return rc;
}
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index b4f1553056f8..ac71f2c75472 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -164,9 +164,14 @@ do { \
#define QPNP_LO_INDEX_MASK 0x3F
/* LPG DTEST */
-#define QPNP_LPG_DTEST_LINE_MAX 4
+#define QPNP_LPG_DTEST_LINE_MAX 4
#define QPNP_LPG_DTEST_OUTPUT_MAX 5
-#define QPNP_DTEST_OUTPUT_MASK 0x07
+#define QPNP_LPG_DTEST_OUTPUT_MASK 0x07
+
+/* PWM DTEST */
+#define QPNP_PWM_DTEST_LINE_MAX 2
+#define QPNP_PWM_DTEST_OUTPUT_MAX 2
+#define QPNP_PWM_DTEST_OUTPUT_MASK 0x03
#define NUM_CLOCKS 3
#define QPNP_PWM_M_MAX 7
@@ -201,6 +206,13 @@ do { \
#define QPNP_PWM_SIZE_7_8_BIT 0x6
#define QPNP_PWM_SIZE_6_7_9_BIT 0xB
+/*
+ * Registers that don't need to be cached are defined below from an offset
+ * of SPMI_LPG_REG_BASE_OFFSET.
+ */
+#define QPNP_LPG_SEC_ACCESS 0x90
+#define QPNP_LPG_DTEST 0xA2
+
/* Supported time levels */
enum time_level {
LVL_NSEC,
@@ -243,8 +255,6 @@ enum qpnp_lpg_registers_list {
QPNP_PAUSE_LO_MULTIPLIER_MSB,
QPNP_HI_INDEX,
QPNP_LO_INDEX,
- QPNP_LPG_SEC_ACCESS = QPNP_LO_INDEX + 121,
- QPNP_LPG_DTEST = QPNP_LO_INDEX + 139,
QPNP_TOTAL_LPG_SPMI_REGISTERS
};
@@ -565,6 +575,7 @@ static void qpnp_lpg_calc_pwm_value(struct _qpnp_pwm_config *pwm_config,
max_pwm_value = (1 << pwm_config->period.pwm_size) - 1;
if (pwm_config->pwm_value > max_pwm_value)
pwm_config->pwm_value = max_pwm_value;
+ pr_debug("pwm_value: %d\n", pwm_config->pwm_value);
}
static int qpnp_lpg_change_table(struct qpnp_pwm_chip *chip,
@@ -683,6 +694,7 @@ static int qpnp_lpg_save_pwm_value(struct qpnp_pwm_chip *chip)
value = pwm_config->pwm_value;
mask = QPNP_PWM_VALUE_LSB_MASK;
+ pr_debug("pwm_lsb value:%d\n", value & mask);
rc = qpnp_lpg_save_and_write(value, mask,
&chip->qpnp_lpg_registers[QPNP_PWM_VALUE_LSB],
SPMI_LPG_REG_ADDR(lpg_config->base_addr,
@@ -695,6 +707,7 @@ static int qpnp_lpg_save_pwm_value(struct qpnp_pwm_chip *chip)
mask = QPNP_PWM_VALUE_MSB_MASK;
+ pr_debug("pwm_msb value:%d\n", value);
rc = qpnp_lpg_save_and_write(value, mask,
&chip->qpnp_lpg_registers[QPNP_PWM_VALUE_MSB],
SPMI_LPG_REG_ADDR(lpg_config->base_addr,
@@ -732,12 +745,30 @@ static int qpnp_lpg_configure_pattern(struct qpnp_pwm_chip *chip)
QPNP_LPG_PATTERN_CONFIG), 1, chip);
}
+static int qpnp_lpg_glitch_removal(struct qpnp_pwm_chip *chip, bool enable)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value, mask;
+
+ qpnp_set_pwm_type_config(&value, enable ? 1 : 0, 0, 0, 0);
+
+ mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK |
+ QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK;
+
+ pr_debug("pwm_type_config: %d\n", value);
+ return qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PWM_TYPE_CONFIG), 1, chip);
+}
+
static int qpnp_lpg_configure_pwm(struct qpnp_pwm_chip *chip)
{
struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
- int rc;
- u8 value, mask;
+ int rc;
+ pr_debug("pwm_size_clk: %d\n",
+ chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]);
rc = regmap_write(chip->regmap,
SPMI_LPG_REG_ADDR(lpg_config->base_addr,
QPNP_LPG_PWM_SIZE_CLK),
@@ -746,6 +777,8 @@ static int qpnp_lpg_configure_pwm(struct qpnp_pwm_chip *chip)
if (rc)
return rc;
+ pr_debug("pwm_freq_prediv_clk: %d\n",
+ chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK]);
rc = regmap_write(chip->regmap,
SPMI_LPG_REG_ADDR(lpg_config->base_addr,
QPNP_LPG_PWM_FREQ_PREDIV_CLK),
@@ -753,15 +786,13 @@ static int qpnp_lpg_configure_pwm(struct qpnp_pwm_chip *chip)
if (rc)
return rc;
- qpnp_set_pwm_type_config(&value, 1, 0, 0, 0);
-
- mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK |
- QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK;
-
- return qpnp_lpg_save_and_write(value, mask,
- &chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG],
- SPMI_LPG_REG_ADDR(lpg_config->base_addr,
- QPNP_LPG_PWM_TYPE_CONFIG), 1, chip);
+ /* Disable glitch removal when LPG/PWM is configured */
+ rc = qpnp_lpg_glitch_removal(chip, false);
+ if (rc) {
+ pr_err("Error in disabling glitch control, rc=%d\n", rc);
+ return rc;
+ }
+ return rc;
}
static int qpnp_configure_pwm_control(struct qpnp_pwm_chip *chip)
@@ -997,22 +1028,10 @@ static int qpnp_dtest_config(struct qpnp_pwm_chip *chip, bool enable)
{
struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
u8 value;
+ u8 mask;
u16 addr;
int rc = 0;
- if (!chip->dtest_output) {
- pr_err("DTEST output not configured for channel %d\n",
- chip->channel_id);
- return -EPERM;
- }
-
- if (chip->dtest_line > QPNP_LPG_DTEST_LINE_MAX ||
- chip->dtest_output > QPNP_LPG_DTEST_OUTPUT_MAX) {
- pr_err("DTEST line/output values are improper for channel %d\n",
- chip->channel_id);
- return -EINVAL;
- }
-
value = 0xA5;
addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_SEC_ACCESS);
@@ -1027,8 +1046,13 @@ static int qpnp_dtest_config(struct qpnp_pwm_chip *chip, bool enable)
addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
QPNP_LPG_DTEST + chip->dtest_line - 1);
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+ mask = QPNP_PWM_DTEST_OUTPUT_MASK;
+ else
+ mask = QPNP_LPG_DTEST_OUTPUT_MASK;
+
if (enable)
- value = chip->dtest_output & QPNP_DTEST_OUTPUT_MASK;
+ value = chip->dtest_output & mask;
else
value = 0;
@@ -1153,6 +1177,7 @@ static int qpnp_lpg_configure_pwm_state(struct qpnp_pwm_chip *chip,
pr_err("Failed to configure TEST mode\n");
}
+ pr_debug("pwm_enable_control: %d\n", value);
rc = qpnp_lpg_save_and_write(value, mask,
&chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
SPMI_LPG_REG_ADDR(lpg_config->base_addr,
@@ -1192,8 +1217,20 @@ static int _pwm_config(struct qpnp_pwm_chip *chip,
if (rc)
goto out;
- if (!rc && chip->enabled)
+ if (!rc && chip->enabled) {
rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
+ if (rc) {
+ pr_err("Error in configuring pwm state, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Enable the glitch removal after PWM is enabled */
+ rc = qpnp_lpg_glitch_removal(chip, true);
+ if (rc) {
+ pr_err("Error in enabling glitch control, rc=%d\n", rc);
+ return rc;
+ }
+ }
pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n",
(unsigned)duty_value, (unsigned)period_value,
@@ -1685,6 +1722,17 @@ static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
return rc;
}
+ if (period < chip->pwm_config.pwm_duty || period > PM_PWM_PERIOD_MAX ||
+ period < PM_PWM_PERIOD_MIN) {
+ pr_err("Invalid pwm period(%d) or duty(%d)\n", period,
+ chip->pwm_config.pwm_duty);
+ return -EINVAL;
+ }
+
+ qpnp_lpg_calc_period(LVL_USEC, period, chip);
+ qpnp_lpg_save_period(chip);
+ chip->pwm_config.pwm_period = period;
+
rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period);
return rc;
@@ -1953,17 +2001,32 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
}
}
- rc = of_property_read_u32(of_node, "qcom,lpg-dtest-line",
+ rc = of_property_read_u32(of_node, "qcom,dtest-line",
&chip->dtest_line);
if (rc) {
chip->in_test_mode = 0;
} else {
- chip->in_test_mode = 1;
rc = of_property_read_u32(of_node, "qcom,dtest-output",
&chip->dtest_output);
if (rc) {
pr_err("Missing DTEST output configuration\n");
- chip->dtest_output = 0;
+ return rc;
+ }
+ chip->in_test_mode = 1;
+ }
+
+ if (chip->in_test_mode) {
+ if ((chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) &&
+ (chip->dtest_line > QPNP_PWM_DTEST_LINE_MAX ||
+ chip->dtest_output > QPNP_PWM_DTEST_OUTPUT_MAX)) {
+ pr_err("DTEST line/output values are improper for PWM channel %d\n",
+ chip->channel_id);
+ return -EINVAL;
+ } else if (chip->dtest_line > QPNP_LPG_DTEST_LINE_MAX ||
+ chip->dtest_output > QPNP_LPG_DTEST_OUTPUT_MAX) {
+ pr_err("DTEST line/output values are improper for LPG channel %d\n",
+ chip->channel_id);
+ return -EINVAL;
}
}
@@ -2049,8 +2112,10 @@ static int qpnp_pwm_probe(struct platform_device *pdev)
rc = qpnp_parse_dt_config(pdev, pwm_chip);
- if (rc)
+ if (rc) {
+ pr_err("Failed parsing DT parameters, rc=%d\n", rc);
goto failed_config;
+ }
pwm_chip->chip.dev = &pdev->dev;
pwm_chip->chip.ops = &qpnp_pwm_ops;
@@ -2066,6 +2131,8 @@ static int qpnp_pwm_probe(struct platform_device *pdev)
if (pwm_chip->channel_owner)
pwm_chip->chip.pwms[0].label = pwm_chip->channel_owner;
+ pr_debug("PWM device channel:%d probed successfully\n",
+ pwm_chip->channel_id);
return 0;
failed_insert:
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index cc5a7b4de476..b40431515b8e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -467,6 +467,18 @@ config MSM_RUN_QUEUE_STATS
This information is exported to usespace via sysfs entries and userspace
algorithms uses info and decide when to turn on/off the cpu cores.
+config MSM_JTAGV8
+ bool "Debug and ETM trace support across power collapse for ARMv8"
+ default y if CORESIGHT_SOURCE_ETM4X
+ help
+ Enables support for debugging (specifically breakpoints) and ETM
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on ARMv8 target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ If unsure, say 'N' here to avoid potential power, performance and
+ memory penalty.
+
config MSM_BOOT_STATS
bool "Use MSM boot stats reporting"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index a2fcc3c36922..027e52439301 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_kryo.o
+obj-$(CONFIG_MSM_JTAGV8) += jtag-fuse.o jtagv8.o jtagv8-etm.o
obj-$(CONFIG_MSM_KERNEL_PROTECT) += kernel_protect.o
obj-$(CONFIG_MSM_RTB) += msm_rtb-hotplug.o
obj-$(CONFIG_QCOM_REMOTEQDSS) += remoteqdss.o
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index f42067ef73fe..435b43f0c10f 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -20,6 +20,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -60,7 +62,8 @@ struct icnss_driver_event {
enum cnss_driver_state {
ICNSS_WLFW_QMI_CONNECTED,
ICNSS_FW_READY,
- ICNSS_DRIVER_PROBED
+ ICNSS_DRIVER_PROBED,
+ ICNSS_FW_TEST_MODE,
};
#ifdef ICNSS_PANIC
@@ -131,6 +134,7 @@ static struct icnss_data {
icnss_mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
bool skip_qmi;
struct completion driver_unregister;
+ struct dentry *root_dentry;
} *penv;
static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
@@ -1105,72 +1109,6 @@ out:
}
EXPORT_SYMBOL(icnss_unregister_driver);
-int icnss_register_ce_irq(unsigned int ce_id,
- irqreturn_t (*handler)(int, void *),
- unsigned long flags, const char *name)
-{
- int ret = 0;
- unsigned int irq;
- struct ce_irq_list *irq_entry;
-
- if (!penv || !penv->pdev) {
- ret = -ENODEV;
- goto out;
- }
- if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
- pr_err("icnss: Invalid CE ID %d\n", ce_id);
- ret = -EINVAL;
- goto out;
- }
- irq = penv->ce_irqs[ce_id];
- irq_entry = &penv->ce_irq_list[ce_id];
-
- if (irq_entry->handler || irq_entry->irq) {
- pr_err("icnss: handler already registered %d\n", irq);
- ret = -EEXIST;
- goto out;
- }
-
- ret = request_irq(irq, handler, IRQF_SHARED, name, &penv->pdev->dev);
- if (ret) {
- pr_err("icnss: IRQ not registered %d\n", irq);
- ret = -EINVAL;
- goto out;
- }
- irq_entry->irq = irq;
- irq_entry->handler = handler;
- pr_debug("icnss: IRQ registered %d\n", irq);
-out:
- return ret;
-
-}
-EXPORT_SYMBOL(icnss_register_ce_irq);
-
-int icnss_unregister_ce_irq(unsigned int ce_id)
-{
- int ret = 0;
- unsigned int irq;
- struct ce_irq_list *irq_entry;
-
- if (!penv || !penv->pdev) {
- ret = -ENODEV;
- goto out;
- }
- irq = penv->ce_irqs[ce_id];
- irq_entry = &penv->ce_irq_list[ce_id];
- if (!irq_entry->handler || !irq_entry->irq) {
- pr_err("icnss: handler not registered %d\n", irq);
- ret = -EEXIST;
- goto out;
- }
- free_irq(irq, &penv->pdev->dev);
- irq_entry->irq = 0;
- irq_entry->handler = NULL;
-out:
- return ret;
-}
-EXPORT_SYMBOL(icnss_unregister_ce_irq);
-
int icnss_ce_request_irq(unsigned int ce_id,
irqreturn_t (*handler)(int, void *),
unsigned long flags, const char *name, void *ctx)
@@ -1380,38 +1318,6 @@ int icnss_get_ce_id(int irq)
}
EXPORT_SYMBOL(icnss_get_ce_id);
-static ssize_t icnss_wlan_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- int val;
- int ret;
-
- if (!penv)
- return -ENODEV;
-
- ret = kstrtoint(buf, 0, &val);
- if (ret)
- return ret;
-
- if (val == ICNSS_WALTEST || val == ICNSS_CCPM) {
- pr_debug("%s: WLAN Test Mode -> %d\n", __func__, val);
- ret = icnss_wlan_enable(NULL, val, NULL);
- if (ret)
- pr_err("%s: WLAN Test Mode %d failed with %d\n",
- __func__, val, ret);
- } else {
- pr_err("%s: Mode %d is not supported from command line\n",
- __func__, val);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static DEVICE_ATTR(icnss_wlan_mode, S_IWUSR, NULL, icnss_wlan_mode_store);
-
static struct clk *icnss_clock_init(struct device *dev, const char *cname)
{
struct clk *c;
@@ -1594,6 +1500,191 @@ static int icnss_release_resources(void)
return ret;
}
+static int icnss_test_mode_show(struct seq_file *s, void *data)
+{
+ struct icnss_data *priv = s->private;
+
+ seq_puts(s, "0 : Test mode disable\n");
+ seq_puts(s, "1 : WLAN Firmware test\n");
+ seq_puts(s, "2 : CCPM test\n");
+
+ seq_puts(s, "\n");
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+ seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+ goto out;
+ }
+
+ if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+ seq_puts(s, "Machine mode is running, can't run test mode!\n");
+ goto out;
+ }
+
+ if (test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+ seq_puts(s, "Test mode is running!\n");
+ goto out;
+ }
+
+ seq_puts(s, "Test can be run, Have fun!\n");
+
+out:
+ seq_puts(s, "\n");
+ return 0;
+}
+
+static int icnss_test_mode_fw_test_off(struct icnss_data *priv)
+{
+ int ret;
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+ pr_err("Firmware is not ready yet!, wait for FW READY: state: 0x%lx\n",
+ priv->state);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+ pr_err("Machine mode is running, can't run test mode: state: 0x%lx\n",
+ priv->state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+ pr_err("Test mode not started, state: 0x%lx\n", priv->state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ icnss_wlan_disable(ICNSS_OFF);
+
+ ret = icnss_hw_power_off(priv);
+
+ clear_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+out:
+ return ret;
+}
+static int icnss_test_mode_fw_test(struct icnss_data *priv,
+ enum icnss_driver_mode mode)
+{
+ int ret;
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+ pr_err("Firmware is not ready yet!, wait for FW READY, state: 0x%lx\n",
+ priv->state);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+ pr_err("Machine mode is running, can't run test mode, state: 0x%lx\n",
+ priv->state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+ pr_err("Test mode already started, state: 0x%lx\n",
+ priv->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = icnss_hw_power_on(priv);
+ if (ret < 0)
+ goto out;
+
+ set_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+ ret = icnss_wlan_enable(NULL, mode, NULL);
+ if (ret)
+ goto power_off;
+
+ return 0;
+
+power_off:
+ icnss_hw_power_off(priv);
+ clear_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+out:
+ return ret;
+}
+
+static ssize_t icnss_test_mode_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct icnss_data *priv =
+ ((struct seq_file *)fp->private_data)->private;
+ int ret;
+ u32 val;
+
+ ret = kstrtou32_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ ret = icnss_test_mode_fw_test_off(priv);
+ break;
+ case 1:
+ ret = icnss_test_mode_fw_test(priv, ICNSS_WALTEST);
+ break;
+ case 2:
+ ret = icnss_test_mode_fw_test(priv, ICNSS_CCPM);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int icnss_test_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icnss_test_mode_show, inode->i_private);
+}
+
+static const struct file_operations icnss_test_mode_fops = {
+ .read = seq_read,
+ .write = icnss_test_mode_write,
+ .release = single_release,
+ .open = icnss_test_mode_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int icnss_debugfs_create(struct icnss_data *priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("icnss", 0);
+
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ pr_err("Unable to create debugfs %d\n", ret);
+ goto out;
+ }
+
+ priv->root_dentry = root_dentry;
+
+ debugfs_create_file("test_mode", S_IRUSR | S_IWUSR,
+ root_dentry, priv, &icnss_test_mode_fops);
+
+out:
+ return ret;
+}
+
+static void icnss_debugfs_destroy(struct icnss_data *priv)
+{
+ debugfs_remove_recursive(priv->root_dentry);
+}
+
static int icnss_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -1713,20 +1804,13 @@ static int icnss_probe(struct platform_device *pdev)
penv->skip_qmi = of_property_read_bool(dev->of_node,
"qcom,skip-qmi");
- ret = device_create_file(dev, &dev_attr_icnss_wlan_mode);
- if (ret) {
- pr_err("%s: wlan_mode sys file creation failed\n",
- __func__);
- goto err_wlan_mode;
- }
-
spin_lock_init(&penv->event_lock);
penv->event_wq = alloc_workqueue("icnss_driver_event", 0, 0);
if (!penv->event_wq) {
pr_err("%s: workqueue creation failed\n", __func__);
ret = -EFAULT;
- goto err_workqueue;
+ goto err_smmu_clock_enable;
}
INIT_WORK(&penv->event_work, icnss_driver_event_work);
@@ -1742,6 +1826,8 @@ static int icnss_probe(struct platform_device *pdev)
goto err_qmi;
}
+ icnss_debugfs_create(penv);
+
pr_info("icnss: Platform driver probed successfully\n");
return ret;
@@ -1749,11 +1835,6 @@ static int icnss_probe(struct platform_device *pdev)
err_qmi:
if (penv->event_wq)
destroy_workqueue(penv->event_wq);
-err_workqueue:
- device_remove_file(&pdev->dev, &dev_attr_icnss_wlan_mode);
-err_wlan_mode:
- if (penv->smmu_clk)
- icnss_clock_disable(penv->smmu_clk);
err_smmu_clock_enable:
if (penv->smmu_mapping)
icnss_smmu_remove(&pdev->dev);
@@ -1782,13 +1863,14 @@ static int icnss_remove(struct platform_device *pdev)
{
int ret = 0;
+ icnss_debugfs_destroy(penv);
+
qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
WLFW_SERVICE_VERS_V01,
WLFW_SERVICE_INS_ID_V01,
&wlfw_clnt_nb);
if (penv->event_wq)
destroy_workqueue(penv->event_wq);
- device_remove_file(&pdev->dev, &dev_attr_icnss_wlan_mode);
if (penv->smmu_mapping) {
if (penv->smmu_clk)
diff --git a/drivers/soc/qcom/jtag-fuse.c b/drivers/soc/qcom/jtag-fuse.c
new file mode 100644
index 000000000000..0b05ce9a22bb
--- /dev/null
+++ b/drivers/soc/qcom/jtag-fuse.c
@@ -0,0 +1,209 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <soc/qcom/jtag.h>
+
+#define fuse_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define fuse_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define OEM_CONFIG0 (0x000)
+#define OEM_CONFIG1 (0x004)
+#define OEM_CONFIG2 (0x008)
+
+/* JTAG FUSE V1 */
+#define ALL_DEBUG_DISABLE BIT(21)
+#define APPS_DBGEN_DISABLE BIT(0)
+#define APPS_NIDEN_DISABLE BIT(1)
+#define APPS_SPIDEN_DISABLE BIT(2)
+#define APPS_SPNIDEN_DISABLE BIT(3)
+#define DAP_DEVICEEN_DISABLE BIT(8)
+
+/* JTAG FUSE V2 */
+#define ALL_DEBUG_DISABLE_V2 BIT(0)
+#define APPS_DBGEN_DISABLE_V2 BIT(10)
+#define APPS_NIDEN_DISABLE_V2 BIT(11)
+#define APPS_SPIDEN_DISABLE_V2 BIT(12)
+#define APPS_SPNIDEN_DISABLE_V2 BIT(13)
+#define DAP_DEVICEEN_DISABLE_V2 BIT(18)
+
+/* JTAG FUSE V3 */
+#define ALL_DEBUG_DISABLE_V3 BIT(29)
+#define APPS_DBGEN_DISABLE_V3 BIT(8)
+#define APPS_NIDEN_DISABLE_V3 BIT(21)
+#define APPS_SPIDEN_DISABLE_V3 BIT(5)
+#define APPS_SPNIDEN_DISABLE_V3 BIT(31)
+#define DAP_DEVICEEN_DISABLE_V3 BIT(7)
+
+#define JTAG_FUSE_VERSION_V1 "qcom,jtag-fuse"
+#define JTAG_FUSE_VERSION_V2 "qcom,jtag-fuse-v2"
+#define JTAG_FUSE_VERSION_V3 "qcom,jtag-fuse-v3"
+
+struct fuse_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ bool fuse_v2;
+ bool fuse_v3;
+};
+
+static struct fuse_drvdata *fusedrvdata;
+
+bool msm_jtag_fuse_apps_access_disabled(void)
+{
+ struct fuse_drvdata *drvdata = fusedrvdata;
+ uint32_t config0, config1, config2;
+ bool ret = false;
+
+ if (!drvdata)
+ return false;
+
+ config0 = fuse_readl(drvdata, OEM_CONFIG0);
+ config1 = fuse_readl(drvdata, OEM_CONFIG1);
+
+ dev_dbg(drvdata->dev, "apps config0: %lx\n", (unsigned long)config0);
+ dev_dbg(drvdata->dev, "apps config1: %lx\n", (unsigned long)config1);
+
+ if (drvdata->fuse_v3) {
+ config2 = fuse_readl(drvdata, OEM_CONFIG2);
+ dev_dbg(drvdata->dev, "apps config2: %lx\n",
+ (unsigned long)config2);
+ }
+
+ if (drvdata->fuse_v3) {
+ if (config0 & ALL_DEBUG_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE_V3)
+ ret = true;
+ else if (config2 & APPS_SPIDEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE_V3)
+ ret = true;
+ } else if (drvdata->fuse_v2) {
+ if (config1 & ALL_DEBUG_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_SPIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE_V2)
+ ret = true;
+ } else {
+ if (config0 & ALL_DEBUG_DISABLE)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_SPIDEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE)
+ ret = true;
+ }
+
+ if (ret)
+ dev_dbg(drvdata->dev, "apps fuse disabled\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_jtag_fuse_apps_access_disabled);
+
+static const struct of_device_id jtag_fuse_match[] = {
+ {.compatible = JTAG_FUSE_VERSION_V1 },
+ {.compatible = JTAG_FUSE_VERSION_V2 },
+ {.compatible = JTAG_FUSE_VERSION_V3 },
+ {}
+};
+
+static int jtag_fuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fuse_drvdata *drvdata;
+ struct resource *res;
+ const struct of_device_id *match;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ /* Store the driver data pointer for use in exported functions */
+ fusedrvdata = drvdata;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ match = of_match_device(jtag_fuse_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V2))
+ drvdata->fuse_v2 = true;
+ else if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V3))
+ drvdata->fuse_v3 = true;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ dev_info(dev, "JTag Fuse initialized\n");
+ return 0;
+}
+
+static int jtag_fuse_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver jtag_fuse_driver = {
+ .probe = jtag_fuse_probe,
+ .remove = jtag_fuse_remove,
+ .driver = {
+ .name = "msm-jtag-fuse",
+ .owner = THIS_MODULE,
+ .of_match_table = jtag_fuse_match,
+ },
+};
+
+static int __init jtag_fuse_init(void)
+{
+ return platform_driver_register(&jtag_fuse_driver);
+}
+arch_initcall(jtag_fuse_init);
+
+static void __exit jtag_fuse_exit(void)
+{
+ platform_driver_unregister(&jtag_fuse_driver);
+}
+module_exit(jtag_fuse_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("JTag Fuse driver");
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
new file mode 100644
index 000000000000..2c15f7896c82
--- /dev/null
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -0,0 +1,1722 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#include <asm/smp_plat.h>
+#include <asm/etmv4x.h>
+#include <soc/qcom/socinfo.h>
+
+#define CORESIGHT_LAR (0xFB0)
+
+#define TIMEOUT_US (100)
+
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
+/*
+ * ETMv4 registers:
+ * 0x000 - 0x2FC: Trace registers
+ * 0x300 - 0x314: Management registers
+ * 0x318 - 0xEFC: Trace registers
+ * 0xF00: Management registers
+ * 0xFA0 - 0xFA4: Trace registers
+ * 0xFA8 - 0xFFC: Management registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+/* Main control and configuration registers */
+#define TRCPRGCTLR (0x004)
+#define TRCPROCSELR (0x008)
+#define TRCSTATR (0x00C)
+#define TRCCONFIGR (0x010)
+#define TRCAUXCTLR (0x018)
+#define TRCEVENTCTL0R (0x020)
+#define TRCEVENTCTL1R (0x024)
+#define TRCSTALLCTLR (0x02C)
+#define TRCTSCTLR (0x030)
+#define TRCSYNCPR (0x034)
+#define TRCCCCTLR (0x038)
+#define TRCBBCTLR (0x03C)
+#define TRCTRACEIDR (0x040)
+#define TRCQCTLR (0x044)
+/* Filtering control registers */
+#define TRCVICTLR (0x080)
+#define TRCVIIECTLR (0x084)
+#define TRCVISSCTLR (0x088)
+#define TRCVIPCSSCTLR (0x08C)
+#define TRCVDCTLR (0x0A0)
+#define TRCVDSACCTLR (0x0A4)
+#define TRCVDARCCTLR (0x0A8)
+/* Derived resources registers */
+#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQRSTEVR (0x118)
+#define TRCSEQSTR (0x11C)
+#define TRCEXTINSELR (0x120)
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
+#define TRCCNTCTLRn(n) (0x150 + (n * 4))
+#define TRCCNTVRn(n) (0x160 + (n * 4))
+/* ID registers */
+#define TRCIDR8 (0x180)
+#define TRCIDR9 (0x184)
+#define TRCIDR10 (0x188)
+#define TRCIDR11 (0x18C)
+#define TRCIDR12 (0x190)
+#define TRCIDR13 (0x194)
+#define TRCIMSPEC0 (0x1C0)
+#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIDR0 (0x1E0)
+#define TRCIDR1 (0x1E4)
+#define TRCIDR2 (0x1E8)
+#define TRCIDR3 (0x1EC)
+#define TRCIDR4 (0x1F0)
+#define TRCIDR5 (0x1F4)
+#define TRCIDR6 (0x1F8)
+#define TRCIDR7 (0x1FC)
+/* Resource selection registers */
+#define TRCRSCTLRn(n) (0x200 + (n * 4))
+/* Single-shot comparator registers */
+#define TRCSSCCRn(n) (0x280 + (n * 4))
+#define TRCSSCSRn(n) (0x2A0 + (n * 4))
+#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
+/* Management registers (0x300-0x314) */
+#define TRCOSLAR (0x300)
+#define TRCOSLSR (0x304)
+#define TRCPDCR (0x310)
+#define TRCPDSR (0x314)
+/* Trace registers (0x318-0xEFC) */
+/* Comparator registers */
+#define TRCACVRn(n) (0x400 + (n * 8))
+#define TRCACATRn(n) (0x480 + (n * 8))
+#define TRCDVCVRn(n) (0x500 + (n * 16))
+#define TRCDVCMRn(n) (0x580 + (n * 16))
+#define TRCCIDCVRn(n) (0x600 + (n * 8))
+#define TRCVMIDCVRn(n) (0x640 + (n * 8))
+#define TRCCIDCCTLR0 (0x680)
+#define TRCCIDCCTLR1 (0x684)
+#define TRCVMIDCCTLR0 (0x688)
+#define TRCVMIDCCTLR1 (0x68C)
+/* Management register (0xF00) */
+/* Integration control registers */
+#define TRCITCTRL (0xF00)
+/* Trace registers (0xFA0-0xFA4) */
+/* Claim tag registers */
+#define TRCCLAIMSET (0xFA0)
+#define TRCCLAIMCLR (0xFA4)
+/* Management registers (0xFA8-0xFFC) */
+#define TRCDEVAFF0 (0xFA8)
+#define TRCDEVAFF1 (0xFAC)
+#define TRCLAR (0xFB0)
+#define TRCLSR (0xFB4)
+#define TRCAUTHSTATUS (0xFB8)
+#define TRCDEVARCH (0xFBC)
+#define TRCDEVID (0xFC8)
+#define TRCDEVTYPE (0xFCC)
+#define TRCPIDR4 (0xFD0)
+#define TRCPIDR5 (0xFD4)
+#define TRCPIDR6 (0xFD8)
+#define TRCPIDR7 (0xFDC)
+#define TRCPIDR0 (0xFE0)
+#define TRCPIDR1 (0xFE4)
+#define TRCPIDR2 (0xFE8)
+#define TRCPIDR3 (0xFEC)
+#define TRCCIDR0 (0xFF0)
+#define TRCCIDR1 (0xFF4)
+#define TRCCIDR2 (0xFF8)
+#define TRCCIDR3 (0xFFC)
+
+/* ETMv4 resources */
+#define ETM_MAX_NR_PE (8)
+#define ETM_MAX_CNTR (4)
+#define ETM_MAX_SEQ_STATES (4)
+#define ETM_MAX_EXT_INP_SEL (4)
+#define ETM_MAX_EXT_INP (256)
+#define ETM_MAX_EXT_OUT (4)
+#define ETM_MAX_SINGLE_ADDR_CMP (16)
+#define ETM_MAX_ADDR_RANGE_CMP (ETM_MAX_SINGLE_ADDR_CMP / 2)
+#define ETM_MAX_DATA_VAL_CMP (8)
+#define ETM_MAX_CTXID_CMP (8)
+#define ETM_MAX_VMID_CMP (8)
+#define ETM_MAX_PE_CMP (8)
+#define ETM_MAX_RES_SEL (32)
+#define ETM_MAX_SS_CMP (8)
+
+#define ETM_CPMR_CLKEN (0x4)
+#define ETM_ARCH_V4 (0x40)
+
+#define MAX_ETM_STATE_SIZE (165)
+
+#define TZ_DBG_ETM_FEAT_ID (0x8)
+#define TZ_DBG_ETM_VER (0x400000)
+#define HW_SOC_ID_M8953 (293)
+
+#define etm_writel(etm, val, off) \
+ __raw_writel(val, etm->base + off)
+#define etm_readl(etm, off) \
+ __raw_readl(etm->base + off)
+
+#define etm_writeq(etm, val, off) \
+ __raw_writeq(val, etm->base + off)
+#define etm_readq(etm, off) \
+ __raw_readq(etm->base + off)
+
+#define ETM_LOCK(base) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ etm_writel(base, 0x0, CORESIGHT_LAR); \
+} while (0)
+
+#define ETM_UNLOCK(base) \
+do { \
+ etm_writel(base, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+struct etm_ctx {
+ uint8_t arch;
+ uint8_t nr_pe;
+ uint8_t nr_pe_cmp;
+ uint8_t nr_addr_cmp;
+ uint8_t nr_data_cmp;
+ uint8_t nr_cntr;
+ uint8_t nr_ext_inp;
+ uint8_t nr_ext_inp_sel;
+ uint8_t nr_ext_out;
+ uint8_t nr_ctxid_cmp;
+ uint8_t nr_vmid_cmp;
+ uint8_t nr_seq_state;
+ uint8_t nr_event;
+ uint8_t nr_resource;
+ uint8_t nr_ss_cmp;
+ bool si_enable;
+ bool save_restore_disabled;
+ bool save_restore_enabled;
+ bool os_lock_present;
+ bool init;
+ bool enable;
+ void __iomem *base;
+ struct device *dev;
+ uint64_t *state;
+ spinlock_t spinlock;
+ struct mutex mutex;
+};
+
+static struct etm_ctx *etm[NR_CPUS];
+static int cnt;
+
+static struct clk *clock[NR_CPUS];
+
+ATOMIC_NOTIFIER_HEAD(etm_save_notifier_list);
+ATOMIC_NOTIFIER_HEAD(etm_restore_notifier_list);
+
+int msm_jtag_save_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_register);
+
+int msm_jtag_save_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_unregister);
+
+int msm_jtag_restore_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_register);
+
+int msm_jtag_restore_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_unregister);
+
+static void etm_os_lock(struct etm_ctx *etmdata)
+{
+ if (etmdata->os_lock_present) {
+ etm_writel(etmdata, 0x1, TRCOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ mb();
+ }
+}
+
+static void etm_os_unlock(struct etm_ctx *etmdata)
+{
+ if (etmdata->os_lock_present) {
+ /* Ensure all writes are complete before clearing OS lock */
+ mb();
+ etm_writel(etmdata, 0x0, TRCOSLAR);
+ }
+}
+
+static inline void etm_mm_save_state(struct etm_ctx *etmdata)
+{
+ int i, j, count;
+
+ i = 0;
+ mb(); /* ensure all register writes complete before saving them */
+ isb();
+ ETM_UNLOCK(etmdata);
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ etm_os_lock(etmdata);
+
+ /* poll until programmers' model becomes stable */
+ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 1)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("programmers model is not stable\n"
+ );
+
+ /* main control and configuration registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCPROCSELR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCONFIGR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCAUXCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL0R);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL1R);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSTALLCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCTSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSYNCPR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCCCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCBBCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCTRACEIDR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCQCTLR);
+ /* filtering control registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCVICTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVIIECTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVISSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVIPCSSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDSACCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDARCCTLR);
+ /* derived resource registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQEVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQRSTEVR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQSTR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++) {
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTRLDVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTCTLRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTVRn(j));
+ }
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+ etmdata->state[i++] = etm_readq(etmdata, TRCACVRn(j));
+ etmdata->state[i++] = etm_readq(etmdata, TRCACATRn(j));
+ }
+ for (j = 0; j < etmdata->nr_data_cmp; j++) {
+ etmdata->state[i++] = etm_readq(etmdata, TRCDVCVRn(j));
+ etmdata->state[i++] = etm_readq(etmdata, TRCDVCMRn(i));
+ }
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ etmdata->state[i++] = etm_readq(etmdata, TRCCIDCVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR0);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR1);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ etmdata->state[i++] = etm_readq(etmdata,
+ TRCVMIDCVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR0);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR1);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+ etmdata->state[i++] = etm_readl(etmdata, TRCSSCCRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCSSCSRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCSSPCICRn(j));
+ }
+ /* claim tag registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCCLAIMCLR);
+ /* program ctrl register */
+ etmdata->state[i++] = etm_readl(etmdata, TRCPRGCTLR);
+
+ /* ensure trace unit is idle to be powered down */
+ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 0)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("timeout waiting for idle state\n");
+
+ atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ ETM_LOCK(etmdata);
+}
+
+static inline void etm_mm_restore_state(struct etm_ctx *etmdata)
+{
+ int i, j;
+
+ i = 0;
+ ETM_UNLOCK(etmdata);
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+ /* check OS lock is locked */
+ if (BVAL(etm_readl(etmdata, TRCOSLSR), 1) != 1) {
+ pr_err_ratelimited("OS lock is unlocked\n");
+ etm_os_lock(etmdata);
+ }
+
+ /* main control and configuration registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCPROCSELR);
+ etm_writel(etmdata, etmdata->state[i++], TRCCONFIGR);
+ etm_writel(etmdata, etmdata->state[i++], TRCAUXCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL0R);
+ etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL1R);
+ etm_writel(etmdata, etmdata->state[i++], TRCSTALLCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCTSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCSYNCPR);
+ etm_writel(etmdata, etmdata->state[i++], TRCCCCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCBBCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCTRACEIDR);
+ etm_writel(etmdata, etmdata->state[i++], TRCQCTLR);
+ /* filtering control registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCVICTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVIIECTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVISSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVIPCSSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDSACCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDARCCTLR);
+ /* derived resources registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQEVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQRSTEVR);
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQSTR);
+ etm_writel(etmdata, etmdata->state[i++], TRCEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++) {
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCCNTRLDVRn(j));
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCCNTCTLRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
+ }
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+ etm_writeq(etmdata, etmdata->state[i++], TRCACVRn(j));
+ etm_writeq(etmdata, etmdata->state[i++], TRCACATRn(j));
+ }
+ for (j = 0; j < etmdata->nr_data_cmp; j++) {
+ etm_writeq(etmdata, etmdata->state[i++], TRCDVCVRn(j));
+ etm_writeq(etmdata, etmdata->state[i++], TRCDVCMRn(j));
+ }
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ etm_writeq(etmdata, etmdata->state[i++], TRCCIDCVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR0);
+ etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR1);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ etm_writeq(etmdata, etmdata->state[i++],
+ TRCVMIDCVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR0);
+ etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR1);
+ /* e-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+ etm_writel(etmdata, etmdata->state[i++], TRCSSCCRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCSSCSRn(j));
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCSSPCICRn(j));
+ }
+ /* claim tag registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCCLAIMSET);
+ /* program ctrl register */
+ etm_writel(etmdata, etmdata->state[i++], TRCPRGCTLR);
+
+ etm_os_unlock(etmdata);
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ ETM_LOCK(etmdata);
+}
+
+static inline void etm_clk_disable(void)
+{
+ uint32_t cpmr;
+
+ isb();
+ cpmr = trc_readl(CPMR_EL1);
+ cpmr &= ~ETM_CPMR_CLKEN;
+ trc_write(cpmr, CPMR_EL1);
+}
+
+static inline void etm_clk_enable(void)
+{
+ uint32_t cpmr;
+
+ cpmr = trc_readl(CPMR_EL1);
+ cpmr |= ETM_CPMR_CLKEN;
+ trc_write(cpmr, CPMR_EL1);
+ isb();
+}
+
+static int etm_read_ssxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMSEQEVR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMSEQEVR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMSEQEVR2);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_crxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMCNTRLDVR0);
+ state[i++] = trc_readl(ETMCNTCTLR0);
+ state[i++] = trc_readl(ETMCNTVR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMCNTRLDVR1);
+ state[i++] = trc_readl(ETMCNTCTLR1);
+ state[i++] = trc_readl(ETMCNTVR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMCNTRLDVR2);
+ state[i++] = trc_readl(ETMCNTCTLR2);
+ state[i++] = trc_readl(ETMCNTVR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMCNTRLDVR3);
+ state[i++] = trc_readl(ETMCNTCTLR3);
+ state[i++] = trc_readl(ETMCNTVR3);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_rsxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 2:
+ state[i++] = trc_readl(ETMRSCTLR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMRSCTLR3);
+ break;
+ case 4:
+ state[i++] = trc_readl(ETMRSCTLR4);
+ break;
+ case 5:
+ state[i++] = trc_readl(ETMRSCTLR5);
+ break;
+ case 6:
+ state[i++] = trc_readl(ETMRSCTLR6);
+ break;
+ case 7:
+ state[i++] = trc_readl(ETMRSCTLR7);
+ break;
+ case 8:
+ state[i++] = trc_readl(ETMRSCTLR8);
+ break;
+ case 9:
+ state[i++] = trc_readl(ETMRSCTLR9);
+ break;
+ case 10:
+ state[i++] = trc_readl(ETMRSCTLR10);
+ break;
+ case 11:
+ state[i++] = trc_readl(ETMRSCTLR11);
+ break;
+ case 12:
+ state[i++] = trc_readl(ETMRSCTLR12);
+ break;
+ case 13:
+ state[i++] = trc_readl(ETMRSCTLR13);
+ break;
+ case 14:
+ state[i++] = trc_readl(ETMRSCTLR14);
+ break;
+ case 15:
+ state[i++] = trc_readl(ETMRSCTLR15);
+ break;
+ case 16:
+ state[i++] = trc_readl(ETMRSCTLR16);
+ break;
+ case 17:
+ state[i++] = trc_readl(ETMRSCTLR17);
+ break;
+ case 18:
+ state[i++] = trc_readl(ETMRSCTLR18);
+ break;
+ case 19:
+ state[i++] = trc_readl(ETMRSCTLR19);
+ break;
+ case 20:
+ state[i++] = trc_readl(ETMRSCTLR20);
+ break;
+ case 21:
+ state[i++] = trc_readl(ETMRSCTLR21);
+ break;
+ case 22:
+ state[i++] = trc_readl(ETMRSCTLR22);
+ break;
+ case 23:
+ state[i++] = trc_readl(ETMRSCTLR23);
+ break;
+ case 24:
+ state[i++] = trc_readl(ETMRSCTLR24);
+ break;
+ case 25:
+ state[i++] = trc_readl(ETMRSCTLR25);
+ break;
+ case 26:
+ state[i++] = trc_readl(ETMRSCTLR26);
+ break;
+ case 27:
+ state[i++] = trc_readl(ETMRSCTLR27);
+ break;
+ case 28:
+ state[i++] = trc_readl(ETMRSCTLR28);
+ break;
+ case 29:
+ state[i++] = trc_readl(ETMRSCTLR29);
+ break;
+ case 30:
+ state[i++] = trc_readl(ETMRSCTLR30);
+ break;
+ case 31:
+ state[i++] = trc_readl(ETMRSCTLR31);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_acr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMACVR0);
+ state[i++] = trc_readq(ETMACATR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMACVR1);
+ state[i++] = trc_readq(ETMACATR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMACVR2);
+ state[i++] = trc_readq(ETMACATR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMACVR3);
+ state[i++] = trc_readq(ETMACATR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMACVR4);
+ state[i++] = trc_readq(ETMACATR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMACVR5);
+ state[i++] = trc_readq(ETMACATR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMACVR6);
+ state[i++] = trc_readq(ETMACATR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMACVR7);
+ state[i++] = trc_readq(ETMACATR7);
+ break;
+ case 8:
+ state[i++] = trc_readq(ETMACVR8);
+ state[i++] = trc_readq(ETMACATR8);
+ break;
+ case 9:
+ state[i++] = trc_readq(ETMACVR9);
+ state[i++] = trc_readq(ETMACATR9);
+ break;
+ case 10:
+ state[i++] = trc_readq(ETMACVR10);
+ state[i++] = trc_readq(ETMACATR10);
+ break;
+ case 11:
+ state[i++] = trc_readq(ETMACVR11);
+ state[i++] = trc_readq(ETMACATR11);
+ break;
+ case 12:
+ state[i++] = trc_readq(ETMACVR12);
+ state[i++] = trc_readq(ETMACATR12);
+ break;
+ case 13:
+ state[i++] = trc_readq(ETMACVR13);
+ state[i++] = trc_readq(ETMACATR13);
+ break;
+ case 14:
+ state[i++] = trc_readq(ETMACVR14);
+ state[i++] = trc_readq(ETMACATR14);
+ break;
+ case 15:
+ state[i++] = trc_readq(ETMACVR15);
+ state[i++] = trc_readq(ETMACATR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_dvcr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMDVCVR0);
+ state[i++] = trc_readq(ETMDVCMR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMDVCVR1);
+ state[i++] = trc_readq(ETMDVCMR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMDVCVR2);
+ state[i++] = trc_readq(ETMDVCMR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMDVCVR3);
+ state[i++] = trc_readq(ETMDVCMR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMDVCVR4);
+ state[i++] = trc_readq(ETMDVCMR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMDVCVR5);
+ state[i++] = trc_readq(ETMDVCMR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMDVCVR6);
+ state[i++] = trc_readq(ETMDVCMR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMDVCVR7);
+ state[i++] = trc_readq(ETMDVCMR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_ccvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMCIDCVR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMCIDCVR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMCIDCVR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMCIDCVR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMCIDCVR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMCIDCVR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMCIDCVR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMCIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_vcvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMVMIDCVR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMVMIDCVR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMVMIDCVR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMVMIDCVR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMVMIDCVR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMVMIDCVR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMVMIDCVR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMVMIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_sscr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMSSCCR0);
+ state[i++] = trc_readl(ETMSSCSR0);
+ state[i++] = trc_readl(ETMSSPCICR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMSSCCR1);
+ state[i++] = trc_readl(ETMSSCSR1);
+ state[i++] = trc_readl(ETMSSPCICR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMSSCCR2);
+ state[i++] = trc_readl(ETMSSCSR2);
+ state[i++] = trc_readl(ETMSSPCICR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMSSCCR3);
+ state[i++] = trc_readl(ETMSSCSR3);
+ state[i++] = trc_readl(ETMSSPCICR3);
+ break;
+ case 4:
+ state[i++] = trc_readl(ETMSSCCR4);
+ state[i++] = trc_readl(ETMSSCSR4);
+ state[i++] = trc_readl(ETMSSPCICR4);
+ break;
+ case 5:
+ state[i++] = trc_readl(ETMSSCCR5);
+ state[i++] = trc_readl(ETMSSCSR5);
+ state[i++] = trc_readl(ETMSSPCICR5);
+ break;
+ case 6:
+ state[i++] = trc_readl(ETMSSCCR6);
+ state[i++] = trc_readl(ETMSSCSR6);
+ state[i++] = trc_readl(ETMSSPCICR6);
+ break;
+ case 7:
+ state[i++] = trc_readl(ETMSSCCR7);
+ state[i++] = trc_readl(ETMSSCSR7);
+ state[i++] = trc_readl(ETMSSPCICR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void etm_si_save_state(struct etm_ctx *etmdata)
+{
+ int i, j, count;
+
+ i = 0;
+ /* Ensure all writes are complete before saving ETM registers */
+ mb();
+ isb();
+
+ /* Vote for ETM power/clock enable */
+ etm_clk_enable();
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ trc_write(0x1, ETMOSLAR);
+ isb();
+
+ /* poll until programmers' model becomes stable */
+ for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 1)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("programmers model is not stable\n");
+
+ /* main control and configuration registers */
+ etmdata->state[i++] = trc_readl(ETMCONFIGR);
+ etmdata->state[i++] = trc_readl(ETMEVENTCTL0R);
+ etmdata->state[i++] = trc_readl(ETMEVENTCTL1R);
+ etmdata->state[i++] = trc_readl(ETMSTALLCTLR);
+ etmdata->state[i++] = trc_readl(ETMTSCTLR);
+ etmdata->state[i++] = trc_readl(ETMSYNCPR);
+ etmdata->state[i++] = trc_readl(ETMCCCTLR);
+ etmdata->state[i++] = trc_readl(ETMTRACEIDR);
+ /* filtering control registers */
+ etmdata->state[i++] = trc_readl(ETMVICTLR);
+ etmdata->state[i++] = trc_readl(ETMVIIECTLR);
+ etmdata->state[i++] = trc_readl(ETMVISSCTLR);
+ /* derived resource registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ i = etm_read_ssxr(etmdata->state, i, j);
+ etmdata->state[i++] = trc_readl(ETMSEQRSTEVR);
+ etmdata->state[i++] = trc_readl(ETMSEQSTR);
+ etmdata->state[i++] = trc_readl(ETMEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++)
+ i = etm_read_crxr(etmdata->state, i, j);
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ i = etm_read_rsxr(etmdata->state, i, j + 2);
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+ i = etm_read_acr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_data_cmp; j++)
+ i = etm_read_dvcr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ i = etm_read_ccvr(etmdata->state, i, j);
+ etmdata->state[i++] = trc_readl(ETMCIDCCTLR0);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ i = etm_read_vcvr(etmdata->state, i, j);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++)
+ i = etm_read_sscr(etmdata->state, i, j);
+ /* program ctrl register */
+ etmdata->state[i++] = trc_readl(ETMPRGCTLR);
+
+ /* ensure trace unit is idle to be powered down */
+ for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 0)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("timeout waiting for idle state\n");
+
+ atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_clk_disable();
+}
+
+static int etm_write_ssxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMSEQEVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMSEQEVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMSEQEVR2);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_crxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMCNTRLDVR0);
+ trc_write(state[i++], ETMCNTCTLR0);
+ trc_write(state[i++], ETMCNTVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMCNTRLDVR1);
+ trc_write(state[i++], ETMCNTCTLR1);
+ trc_write(state[i++], ETMCNTVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMCNTRLDVR2);
+ trc_write(state[i++], ETMCNTCTLR2);
+ trc_write(state[i++], ETMCNTVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMCNTRLDVR3);
+ trc_write(state[i++], ETMCNTCTLR3);
+ trc_write(state[i++], ETMCNTVR3);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_rsxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 2:
+ trc_write(state[i++], ETMRSCTLR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMRSCTLR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMRSCTLR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMRSCTLR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMRSCTLR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMRSCTLR7);
+ break;
+ case 8:
+ trc_write(state[i++], ETMRSCTLR8);
+ break;
+ case 9:
+ trc_write(state[i++], ETMRSCTLR9);
+ break;
+ case 10:
+ trc_write(state[i++], ETMRSCTLR10);
+ break;
+ case 11:
+ trc_write(state[i++], ETMRSCTLR11);
+ break;
+ case 12:
+ trc_write(state[i++], ETMRSCTLR12);
+ break;
+ case 13:
+ trc_write(state[i++], ETMRSCTLR13);
+ break;
+ case 14:
+ trc_write(state[i++], ETMRSCTLR14);
+ break;
+ case 15:
+ trc_write(state[i++], ETMRSCTLR15);
+ break;
+ case 16:
+ trc_write(state[i++], ETMRSCTLR16);
+ break;
+ case 17:
+ trc_write(state[i++], ETMRSCTLR17);
+ break;
+ case 18:
+ trc_write(state[i++], ETMRSCTLR18);
+ break;
+ case 19:
+ trc_write(state[i++], ETMRSCTLR19);
+ break;
+ case 20:
+ trc_write(state[i++], ETMRSCTLR20);
+ break;
+ case 21:
+ trc_write(state[i++], ETMRSCTLR21);
+ break;
+ case 22:
+ trc_write(state[i++], ETMRSCTLR22);
+ break;
+ case 23:
+ trc_write(state[i++], ETMRSCTLR23);
+ break;
+ case 24:
+ trc_write(state[i++], ETMRSCTLR24);
+ break;
+ case 25:
+ trc_write(state[i++], ETMRSCTLR25);
+ break;
+ case 26:
+ trc_write(state[i++], ETMRSCTLR26);
+ break;
+ case 27:
+ trc_write(state[i++], ETMRSCTLR27);
+ break;
+ case 28:
+ trc_write(state[i++], ETMRSCTLR28);
+ break;
+ case 29:
+ trc_write(state[i++], ETMRSCTLR29);
+ break;
+ case 30:
+ trc_write(state[i++], ETMRSCTLR30);
+ break;
+ case 31:
+ trc_write(state[i++], ETMRSCTLR31);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_acr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMACVR0);
+ trc_write(state[i++], ETMACATR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMACVR1);
+ trc_write(state[i++], ETMACATR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMACVR2);
+ trc_write(state[i++], ETMACATR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMACVR3);
+ trc_write(state[i++], ETMACATR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMACVR4);
+ trc_write(state[i++], ETMACATR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMACVR5);
+ trc_write(state[i++], ETMACATR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMACVR6);
+ trc_write(state[i++], ETMACATR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMACVR7);
+ trc_write(state[i++], ETMACATR7);
+ break;
+ case 8:
+ trc_write(state[i++], ETMACVR8);
+ trc_write(state[i++], ETMACATR8);
+ break;
+ case 9:
+ trc_write(state[i++], ETMACVR9);
+ trc_write(state[i++], ETMACATR9);
+ break;
+ case 10:
+ trc_write(state[i++], ETMACVR10);
+ trc_write(state[i++], ETMACATR10);
+ break;
+ case 11:
+ trc_write(state[i++], ETMACVR11);
+ trc_write(state[i++], ETMACATR11);
+ break;
+ case 12:
+ trc_write(state[i++], ETMACVR12);
+ trc_write(state[i++], ETMACATR12);
+ break;
+ case 13:
+ trc_write(state[i++], ETMACVR13);
+ trc_write(state[i++], ETMACATR13);
+ break;
+ case 14:
+ trc_write(state[i++], ETMACVR14);
+ trc_write(state[i++], ETMACATR14);
+ break;
+ case 15:
+ trc_write(state[i++], ETMACVR15);
+ trc_write(state[i++], ETMACATR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_dvcr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMDVCVR0);
+ trc_write(state[i++], ETMDVCMR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMDVCVR1);
+ trc_write(state[i++], ETMDVCMR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMDVCVR2);
+ trc_write(state[i++], ETMDVCMR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMDVCVR3);
+ trc_write(state[i++], ETMDVCMR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMDVCVR4);
+ trc_write(state[i++], ETMDVCMR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMDVCVR5);
+ trc_write(state[i++], ETMDVCMR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMDVCVR6);
+ trc_write(state[i++], ETMDVCMR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMDVCVR7);
+ trc_write(state[i++], ETMDVCMR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_ccvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMCIDCVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMCIDCVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMCIDCVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMCIDCVR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMCIDCVR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMCIDCVR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMCIDCVR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMCIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_vcvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMVMIDCVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMVMIDCVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMVMIDCVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMVMIDCVR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMVMIDCVR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMVMIDCVR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMVMIDCVR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMVMIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_sscr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMSSCCR0);
+ trc_write(state[i++], ETMSSCSR0);
+ trc_write(state[i++], ETMSSPCICR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMSSCCR1);
+ trc_write(state[i++], ETMSSCSR1);
+ trc_write(state[i++], ETMSSPCICR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMSSCCR2);
+ trc_write(state[i++], ETMSSCSR2);
+ trc_write(state[i++], ETMSSPCICR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMSSCCR3);
+ trc_write(state[i++], ETMSSCSR3);
+ trc_write(state[i++], ETMSSPCICR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMSSCCR4);
+ trc_write(state[i++], ETMSSCSR4);
+ trc_write(state[i++], ETMSSPCICR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMSSCCR5);
+ trc_write(state[i++], ETMSSCSR5);
+ trc_write(state[i++], ETMSSPCICR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMSSCCR6);
+ trc_write(state[i++], ETMSSCSR6);
+ trc_write(state[i++], ETMSSPCICR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMSSCCR7);
+ trc_write(state[i++], ETMSSCSR7);
+ trc_write(state[i++], ETMSSPCICR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void etm_si_restore_state(struct etm_ctx *etmdata)
+{
+ int i, j;
+
+ i = 0;
+
+ /* Vote for ETM power/clock enable */
+ etm_clk_enable();
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+ /* check OS lock is locked */
+ if (BVAL(trc_readl(ETMOSLSR), 1) != 1) {
+ pr_err_ratelimited("OS lock is unlocked\n");
+ trc_write(0x1, ETMOSLAR);
+ isb();
+ }
+
+ /* main control and configuration registers */
+ trc_write(etmdata->state[i++], ETMCONFIGR);
+ trc_write(etmdata->state[i++], ETMEVENTCTL0R);
+ trc_write(etmdata->state[i++], ETMEVENTCTL1R);
+ trc_write(etmdata->state[i++], ETMSTALLCTLR);
+ trc_write(etmdata->state[i++], ETMTSCTLR);
+ trc_write(etmdata->state[i++], ETMSYNCPR);
+ trc_write(etmdata->state[i++], ETMCCCTLR);
+ trc_write(etmdata->state[i++], ETMTRACEIDR);
+ /* filtering control registers */
+ trc_write(etmdata->state[i++], ETMVICTLR);
+ trc_write(etmdata->state[i++], ETMVIIECTLR);
+ trc_write(etmdata->state[i++], ETMVISSCTLR);
+ /* derived resources registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ i = etm_write_ssxr(etmdata->state, i, j);
+ trc_write(etmdata->state[i++], ETMSEQRSTEVR);
+ trc_write(etmdata->state[i++], ETMSEQSTR);
+ trc_write(etmdata->state[i++], ETMEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++)
+ i = etm_write_crxr(etmdata->state, i, j);
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ i = etm_write_rsxr(etmdata->state, i, j + 2);
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+ i = etm_write_acr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_data_cmp; j++)
+ i = etm_write_dvcr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ i = etm_write_ccvr(etmdata->state, i, j);
+ trc_write(etmdata->state[i++], ETMCIDCCTLR0);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ i = etm_write_vcvr(etmdata->state, i, j);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++)
+ i = etm_write_sscr(etmdata->state, i, j);
+ /* program ctrl register */
+ trc_write(etmdata->state[i++], ETMPRGCTLR);
+
+ isb();
+ trc_write(0x0, ETMOSLAR);
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_clk_disable();
+}
+
+void msm_jtag_etm_save_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+ return;
+
+ if (etm[cpu]->save_restore_enabled) {
+ if (etm[cpu]->si_enable)
+ etm_si_save_state(etm[cpu]);
+ else
+ etm_mm_save_state(etm[cpu]);
+ }
+}
+EXPORT_SYMBOL(msm_jtag_etm_save_state);
+
+void msm_jtag_etm_restore_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+ return;
+
+ /*
+ * Check to ensure we attempt to restore only when save
+ * has been done is accomplished by callee function.
+ */
+ if (etm[cpu]->save_restore_enabled) {
+ if (etm[cpu]->si_enable)
+ etm_si_restore_state(etm[cpu]);
+ else
+ etm_mm_restore_state(etm[cpu]);
+ }
+}
+EXPORT_SYMBOL(msm_jtag_etm_restore_state);
+
+static inline bool etm_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case ETM_ARCH_V4:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void etm_os_lock_init(struct etm_ctx *etmdata)
+{
+ uint32_t etmoslsr;
+
+ etmoslsr = etm_readl(etmdata, TRCOSLSR);
+ if ((BVAL(etmoslsr, 0) == 0) && BVAL(etmoslsr, 3))
+ etmdata->os_lock_present = true;
+ else
+ etmdata->os_lock_present = false;
+}
+
+static void etm_init_arch_data(void *info)
+{
+ uint32_t val;
+ struct etm_ctx *etmdata = info;
+
+ ETM_UNLOCK(etmdata);
+
+ etm_os_lock_init(etmdata);
+
+ val = etm_readl(etmdata, TRCIDR1);
+ etmdata->arch = BMVAL(val, 4, 11);
+
+ /* number of resources trace unit supports */
+ val = etm_readl(etmdata, TRCIDR4);
+ etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
+ etmdata->nr_data_cmp = BMVAL(val, 4, 7);
+ etmdata->nr_resource = BMVAL(val, 16, 19);
+ etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
+ etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
+ etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
+
+ val = etm_readl(etmdata, TRCIDR5);
+ etmdata->nr_seq_state = BMVAL(val, 25, 27);
+ etmdata->nr_cntr = BMVAL(val, 28, 30);
+
+ ETM_LOCK(etmdata);
+}
+
+static int jtag_mm_etm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (!etm[cpu])
+ goto out;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ spin_lock(&etm[cpu]->spinlock);
+ if (!etm[cpu]->init) {
+ etm_init_arch_data(etm[cpu]);
+ etm[cpu]->init = true;
+ }
+ spin_unlock(&etm[cpu]->spinlock);
+ break;
+
+ case CPU_ONLINE:
+ mutex_lock(&etm[cpu]->mutex);
+ if (etm[cpu]->enable) {
+ mutex_unlock(&etm[cpu]->mutex);
+ goto out;
+ }
+ if (etm_arch_supported(etm[cpu]->arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
+ TZ_DBG_ETM_VER)
+ etm[cpu]->save_restore_enabled = true;
+ else
+ pr_info("etm save-restore supported by TZ\n");
+ } else
+ pr_info("etm arch %u not supported\n", etm[cpu]->arch);
+ etm[cpu]->enable = true;
+ mutex_unlock(&etm[cpu]->mutex);
+ break;
+ default:
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_mm_etm_notifier = {
+ .notifier_call = jtag_mm_etm_callback,
+};
+
+static bool skip_etm_save_restore(void)
+{
+ uint32_t id;
+ uint32_t version;
+
+ id = socinfo_get_id();
+ version = socinfo_get_version();
+
+ if (id == HW_SOC_ID_M8953 && SOCINFO_VERSION_MAJOR(version) == 1 &&
+ SOCINFO_VERSION_MINOR(version) == 0)
+ return true;
+
+ return false;
+}
+
+static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
+{
+ struct etm_ctx *etmdata;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ /* Allocate memory per cpu */
+ etmdata = devm_kzalloc(dev, sizeof(struct etm_ctx), GFP_KERNEL);
+ if (!etmdata)
+ return -ENOMEM;
+
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base");
+ if (!res)
+ return -ENODEV;
+
+ etmdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!etmdata->base)
+ return -EINVAL;
+
+ etmdata->si_enable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,si-enable");
+ etmdata->save_restore_disabled = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,save-restore-disable");
+
+ if (skip_etm_save_restore())
+ etmdata->save_restore_disabled = 1;
+
+ /* Allocate etm state save space per core */
+ etmdata->state = devm_kzalloc(dev,
+ MAX_ETM_STATE_SIZE * sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!etmdata->state)
+ return -ENOMEM;
+
+ spin_lock_init(&etmdata->spinlock);
+ mutex_init(&etmdata->mutex);
+
+ if (cnt++ == 0)
+ register_hotcpu_notifier(&jtag_mm_etm_notifier);
+
+ get_online_cpus();
+
+ if (!smp_call_function_single(cpu, etm_init_arch_data, etmdata,
+ 1))
+ etmdata->init = true;
+
+ etm[cpu] = etmdata;
+
+ put_online_cpus();
+
+ mutex_lock(&etmdata->mutex);
+ if (etmdata->init && !etmdata->enable) {
+ if (etm_arch_supported(etmdata->arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
+ TZ_DBG_ETM_VER)
+ etmdata->save_restore_enabled = true;
+ else
+ pr_info("etm save-restore supported by TZ\n");
+ } else
+ pr_info("etm arch %u not supported\n", etmdata->arch);
+ etmdata->enable = true;
+ }
+ mutex_unlock(&etmdata->mutex);
+ return 0;
+}
+
+static int jtag_mm_probe(struct platform_device *pdev)
+{
+ int ret, i, cpu = -1;
+ struct device *dev = &pdev->dev;
+ struct device_node *cpu_node;
+
+ if (msm_jtag_fuse_apps_access_disabled())
+ return -EPERM;
+
+ cpu_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,coresight-jtagmm-cpu", 0);
+ if (!cpu_node) {
+ dev_err(dev, "Jtag-mm cpu handle not specified\n");
+ return -ENODEV;
+ }
+ for_each_possible_cpu(i) {
+ if (cpu_node == of_get_cpu_node(i, NULL)) {
+ cpu = i;
+ break;
+ }
+ }
+ if (cpu == -1) {
+ dev_err(dev, "invalid Jtag-mm cpu handle\n");
+ return -EINVAL;
+ }
+
+ clock[cpu] = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(clock[cpu])) {
+ ret = PTR_ERR(clock[cpu]);
+ return ret;
+ }
+
+ ret = clk_set_rate(clock[cpu], CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(clock[cpu]);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, clock[cpu]);
+
+ ret = jtag_mm_etm_probe(pdev, cpu);
+ if (ret)
+ clk_disable_unprepare(clock[cpu]);
+ return ret;
+}
+
+static void jtag_mm_etm_remove(void)
+{
+ unregister_hotcpu_notifier(&jtag_mm_etm_notifier);
+}
+
+static int jtag_mm_remove(struct platform_device *pdev)
+{
+ struct clk *clock = platform_get_drvdata(pdev);
+
+ if (--cnt == 0)
+ jtag_mm_etm_remove();
+ clk_disable_unprepare(clock);
+ return 0;
+}
+
+static const struct of_device_id msm_qdss_mm_match[] = {
+ { .compatible = "qcom,jtagv8-mm"},
+ {}
+};
+
+static struct platform_driver jtag_mm_driver = {
+ .probe = jtag_mm_probe,
+ .remove = jtag_mm_remove,
+ .driver = {
+ .name = "msm-jtagv8-mm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_qdss_mm_match,
+ },
+};
+
+static int __init jtag_mm_init(void)
+{
+ return platform_driver_register(&jtag_mm_driver);
+}
+module_init(jtag_mm_init);
+
+static void __exit jtag_mm_exit(void)
+{
+ platform_driver_unregister(&jtag_mm_driver);
+}
+module_exit(jtag_mm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight DEBUGv8 and ETMv4 save-restore driver");
diff --git a/drivers/soc/qcom/jtagv8.c b/drivers/soc/qcom/jtagv8.c
new file mode 100644
index 000000000000..94c391eabaea
--- /dev/null
+++ b/drivers/soc/qcom/jtagv8.c
@@ -0,0 +1,1015 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#ifdef CONFIG_ARM64
+#include <asm/debugv8.h>
+#else
+#include <asm/hardware/debugv8.h>
+#endif
+
+#define TIMEOUT_US (100)
+
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
+#ifdef CONFIG_ARM64
+#define ARM_DEBUG_ARCH_V8 (0x6)
+#endif
+
+#define MAX_DBG_REGS (66)
+#define MAX_DBG_STATE_SIZE (MAX_DBG_REGS * num_possible_cpus())
+
+#define OSLOCK_MAGIC (0xC5ACCE55)
+#define TZ_DBG_ETM_FEAT_ID (0x8)
+#define TZ_DBG_ETM_VER (0x400000)
+
+uint32_t msm_jtag_save_cntr[NR_CPUS];
+uint32_t msm_jtag_restore_cntr[NR_CPUS];
+
+/* access debug registers using system instructions */
+struct dbg_cpu_ctx {
+ uint32_t *state;
+};
+
+struct dbg_ctx {
+ uint8_t arch;
+ bool save_restore_enabled;
+ uint8_t nr_wp;
+ uint8_t nr_bp;
+ uint8_t nr_ctx_cmp;
+#ifdef CONFIG_ARM64
+ uint64_t *state;
+#else
+ uint32_t *state;
+#endif
+};
+
+static struct dbg_ctx dbg;
+static struct notifier_block jtag_hotcpu_save_notifier;
+static struct notifier_block jtag_hotcpu_restore_notifier;
+static struct notifier_block jtag_cpu_pm_notifier;
+
+#ifdef CONFIG_ARM64
+static int dbg_read_arch64_bxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_readq(DBGBVR0_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR0_EL1);
+ break;
+ case 1:
+ state[i++] = dbg_readq(DBGBVR1_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR1_EL1);
+ break;
+ case 2:
+ state[i++] = dbg_readq(DBGBVR2_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR2_EL1);
+ break;
+ case 3:
+ state[i++] = dbg_readq(DBGBVR3_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR3_EL1);
+ break;
+ case 4:
+ state[i++] = dbg_readq(DBGBVR4_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR4_EL1);
+ break;
+ case 5:
+ state[i++] = dbg_readq(DBGBVR5_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR5_EL1);
+ break;
+ case 6:
+ state[i++] = dbg_readq(DBGBVR6_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR6_EL1);
+ break;
+ case 7:
+ state[i++] = dbg_readq(DBGBVR7_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR7_EL1);
+ break;
+ case 8:
+ state[i++] = dbg_readq(DBGBVR8_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR8_EL1);
+ break;
+ case 9:
+ state[i++] = dbg_readq(DBGBVR9_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR9_EL1);
+ break;
+ case 10:
+ state[i++] = dbg_readq(DBGBVR10_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR10_EL1);
+ break;
+ case 11:
+ state[i++] = dbg_readq(DBGBVR11_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR11_EL1);
+ break;
+ case 12:
+ state[i++] = dbg_readq(DBGBVR12_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR12_EL1);
+ break;
+ case 13:
+ state[i++] = dbg_readq(DBGBVR13_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR13_EL1);
+ break;
+ case 14:
+ state[i++] = dbg_readq(DBGBVR14_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR14_EL1);
+ break;
+ case 15:
+ state[i++] = dbg_readq(DBGBVR15_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch64_bxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGBVR0_EL1);
+ dbg_write(state[i++], DBGBCR0_EL1);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGBVR1_EL1);
+ dbg_write(state[i++], DBGBCR1_EL1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGBVR2_EL1);
+ dbg_write(state[i++], DBGBCR2_EL1);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGBVR3_EL1);
+ dbg_write(state[i++], DBGBCR3_EL1);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGBVR4_EL1);
+ dbg_write(state[i++], DBGBCR4_EL1);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGBVR5_EL1);
+ dbg_write(state[i++], DBGBCR5_EL1);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGBVR6_EL1);
+ dbg_write(state[i++], DBGBCR6_EL1);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGBVR7_EL1);
+ dbg_write(state[i++], DBGBCR7_EL1);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGBVR8_EL1);
+ dbg_write(state[i++], DBGBCR8_EL1);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGBVR9_EL1);
+ dbg_write(state[i++], DBGBCR9_EL1);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGBVR10_EL1);
+ dbg_write(state[i++], DBGBCR10_EL1);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGBVR11_EL1);
+ dbg_write(state[i++], DBGBCR11_EL1);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGBVR12_EL1);
+ dbg_write(state[i++], DBGBCR12_EL1);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGBVR13_EL1);
+ dbg_write(state[i++], DBGBCR13_EL1);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGBVR14_EL1);
+ dbg_write(state[i++], DBGBCR14_EL1);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGBVR15_EL1);
+ dbg_write(state[i++], DBGBCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_read_arch64_wxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_readq(DBGWVR0_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR0_EL1);
+ break;
+ case 1:
+ state[i++] = dbg_readq(DBGWVR1_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR1_EL1);
+ break;
+ case 2:
+ state[i++] = dbg_readq(DBGWVR2_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR2_EL1);
+ break;
+ case 3:
+ state[i++] = dbg_readq(DBGWVR3_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR3_EL1);
+ break;
+ case 4:
+ state[i++] = dbg_readq(DBGWVR4_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR4_EL1);
+ break;
+ case 5:
+ state[i++] = dbg_readq(DBGWVR5_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR5_EL1);
+ break;
+ case 6:
+ state[i++] = dbg_readq(DBGWVR6_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR6_EL1);
+ break;
+ case 7:
+ state[i++] = dbg_readq(DBGWVR7_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR7_EL1);
+ break;
+ case 8:
+ state[i++] = dbg_readq(DBGWVR8_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR8_EL1);
+ break;
+ case 9:
+ state[i++] = dbg_readq(DBGWVR9_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR9_EL1);
+ break;
+ case 10:
+ state[i++] = dbg_readq(DBGWVR10_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR10_EL1);
+ break;
+ case 11:
+ state[i++] = dbg_readq(DBGWVR11_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR11_EL1);
+ break;
+ case 12:
+ state[i++] = dbg_readq(DBGWVR12_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR12_EL1);
+ break;
+ case 13:
+ state[i++] = dbg_readq(DBGWVR13_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR13_EL1);
+ break;
+ case 14:
+ state[i++] = dbg_readq(DBGWVR14_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR14_EL1);
+ break;
+ case 15:
+ state[i++] = dbg_readq(DBGWVR15_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch64_wxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGWVR0_EL1);
+ dbg_write(state[i++], DBGWCR0_EL1);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGWVR1_EL1);
+ dbg_write(state[i++], DBGWCR1_EL1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGWVR2_EL1);
+ dbg_write(state[i++], DBGWCR2_EL1);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGWVR3_EL1);
+ dbg_write(state[i++], DBGWCR3_EL1);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGWVR4_EL1);
+ dbg_write(state[i++], DBGWCR4_EL1);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGWVR5_EL1);
+ dbg_write(state[i++], DBGWCR5_EL1);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGWVR0_EL1);
+ dbg_write(state[i++], DBGWCR6_EL1);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGWVR7_EL1);
+ dbg_write(state[i++], DBGWCR7_EL1);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGWVR8_EL1);
+ dbg_write(state[i++], DBGWCR8_EL1);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGWVR9_EL1);
+ dbg_write(state[i++], DBGWCR9_EL1);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGWVR10_EL1);
+ dbg_write(state[i++], DBGWCR10_EL1);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGWVR11_EL1);
+ dbg_write(state[i++], DBGWCR11_EL1);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGWVR12_EL1);
+ dbg_write(state[i++], DBGWCR12_EL1);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGWVR13_EL1);
+ dbg_write(state[i++], DBGWCR13_EL1);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGWVR14_EL1);
+ dbg_write(state[i++], DBGWCR14_EL1);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGWVR15_EL1);
+ dbg_write(state[i++], DBGWCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Set OS Lock to inform the debugger that the OS is in the
+ * process of saving debug registers. It prevents accidental
+ * modification of the debug regs by the external debugger.
+ */
+ dbg_write(0x1, OSLAR_EL1);
+ /* Ensure OS lock is set before proceeding */
+ isb();
+
+ dbg.state[i++] = (uint32_t)dbg_readl(MDSCR_EL1);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_read_arch64_bxr((uint64_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_read_arch64_wxr((uint64_t *)dbg.state, i, j);
+ dbg.state[i++] = (uint32_t)dbg_readl(MDCCINT_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(DBGCLAIMCLR_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSECCR_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSDTRRX_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSDTRTX_EL1);
+
+ /* Set the OS double lock */
+ isb();
+ dbg_write(0x1, OSDLR_EL1);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Clear the OS double lock */
+ isb();
+ dbg_write(0x0, OSDLR_EL1);
+ isb();
+
+ /* Set OS lock. Lock will already be set after power collapse
+ * but this write is included to ensure it is set.
+ */
+ dbg_write(0x1, OSLAR_EL1);
+ isb();
+
+ dbg_write(dbg.state[i++], MDSCR_EL1);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_write_arch64_bxr((uint64_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_write_arch64_wxr((uint64_t *)dbg.state, i, j);
+ dbg_write(dbg.state[i++], MDCCINT_EL1);
+ dbg_write(dbg.state[i++], DBGCLAIMSET_EL1);
+ dbg_write(dbg.state[i++], OSECCR_EL1);
+ dbg_write(dbg.state[i++], OSDTRRX_EL1);
+ dbg_write(dbg.state[i++], OSDTRTX_EL1);
+
+ isb();
+ dbg_write(0x0, OSLAR_EL1);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static void dbg_init_arch_data(void)
+{
+ uint64_t dbgfr;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbgfr = dbg_readq(ID_AA64DFR0_EL1);
+ dbg.arch = BMVAL(dbgfr, 0, 3);
+ dbg.nr_bp = BMVAL(dbgfr, 12, 15) + 1;
+ dbg.nr_wp = BMVAL(dbgfr, 20, 23) + 1;
+ dbg.nr_ctx_cmp = BMVAL(dbgfr, 28, 31) + 1;
+}
+#else
+
+static int dbg_read_arch32_bxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_read(DBGBVR0);
+ state[i++] = dbg_read(DBGBCR0);
+ break;
+ case 1:
+ state[i++] = dbg_read(DBGBVR1);
+ state[i++] = dbg_read(DBGBCR1);
+ break;
+ case 2:
+ state[i++] = dbg_read(DBGBVR2);
+ state[i++] = dbg_read(DBGBCR2);
+ break;
+ case 3:
+ state[i++] = dbg_read(DBGBVR3);
+ state[i++] = dbg_read(DBGBCR3);
+ break;
+ case 4:
+ state[i++] = dbg_read(DBGBVR4);
+ state[i++] = dbg_read(DBGBCR4);
+ break;
+ case 5:
+ state[i++] = dbg_read(DBGBVR5);
+ state[i++] = dbg_read(DBGBCR5);
+ break;
+ case 6:
+ state[i++] = dbg_read(DBGBVR6);
+ state[i++] = dbg_read(DBGBCR6);
+ break;
+ case 7:
+ state[i++] = dbg_read(DBGBVR7);
+ state[i++] = dbg_read(DBGBCR7);
+ break;
+ case 8:
+ state[i++] = dbg_read(DBGBVR8);
+ state[i++] = dbg_read(DBGBCR8);
+ break;
+ case 9:
+ state[i++] = dbg_read(DBGBVR9);
+ state[i++] = dbg_read(DBGBCR9);
+ break;
+ case 10:
+ state[i++] = dbg_read(DBGBVR10);
+ state[i++] = dbg_read(DBGBCR10);
+ break;
+ case 11:
+ state[i++] = dbg_read(DBGBVR11);
+ state[i++] = dbg_read(DBGBCR11);
+ break;
+ case 12:
+ state[i++] = dbg_read(DBGBVR12);
+ state[i++] = dbg_read(DBGBCR12);
+ break;
+ case 13:
+ state[i++] = dbg_read(DBGBVR13);
+ state[i++] = dbg_read(DBGBCR13);
+ break;
+ case 14:
+ state[i++] = dbg_read(DBGBVR14);
+ state[i++] = dbg_read(DBGBCR14);
+ break;
+ case 15:
+ state[i++] = dbg_read(DBGBVR15);
+ state[i++] = dbg_read(DBGBCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch32_bxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGBVR0);
+ dbg_write(state[i++], DBGBCR0);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGBVR1);
+ dbg_write(state[i++], DBGBCR1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGBVR2);
+ dbg_write(state[i++], DBGBCR2);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGBVR3);
+ dbg_write(state[i++], DBGBCR3);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGBVR4);
+ dbg_write(state[i++], DBGBCR4);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGBVR5);
+ dbg_write(state[i++], DBGBCR5);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGBVR6);
+ dbg_write(state[i++], DBGBCR6);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGBVR7);
+ dbg_write(state[i++], DBGBCR7);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGBVR8);
+ dbg_write(state[i++], DBGBCR8);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGBVR9);
+ dbg_write(state[i++], DBGBCR9);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGBVR10);
+ dbg_write(state[i++], DBGBCR10);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGBVR11);
+ dbg_write(state[i++], DBGBCR11);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGBVR12);
+ dbg_write(state[i++], DBGBCR12);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGBVR13);
+ dbg_write(state[i++], DBGBCR13);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGBVR14);
+ dbg_write(state[i++], DBGBCR14);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGBVR15);
+ dbg_write(state[i++], DBGBCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_read_arch32_wxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_read(DBGWVR0);
+ state[i++] = dbg_read(DBGWCR0);
+ break;
+ case 1:
+ state[i++] = dbg_read(DBGWVR1);
+ state[i++] = dbg_read(DBGWCR1);
+ break;
+ case 2:
+ state[i++] = dbg_read(DBGWVR2);
+ state[i++] = dbg_read(DBGWCR2);
+ break;
+ case 3:
+ state[i++] = dbg_read(DBGWVR3);
+ state[i++] = dbg_read(DBGWCR3);
+ break;
+ case 4:
+ state[i++] = dbg_read(DBGWVR4);
+ state[i++] = dbg_read(DBGWCR4);
+ break;
+ case 5:
+ state[i++] = dbg_read(DBGWVR5);
+ state[i++] = dbg_read(DBGWCR5);
+ break;
+ case 6:
+ state[i++] = dbg_read(DBGWVR6);
+ state[i++] = dbg_read(DBGWCR6);
+ break;
+ case 7:
+ state[i++] = dbg_read(DBGWVR7);
+ state[i++] = dbg_read(DBGWCR7);
+ break;
+ case 8:
+ state[i++] = dbg_read(DBGWVR8);
+ state[i++] = dbg_read(DBGWCR8);
+ break;
+ case 9:
+ state[i++] = dbg_read(DBGWVR9);
+ state[i++] = dbg_read(DBGWCR9);
+ break;
+ case 10:
+ state[i++] = dbg_read(DBGWVR10);
+ state[i++] = dbg_read(DBGWCR10);
+ break;
+ case 11:
+ state[i++] = dbg_read(DBGWVR11);
+ state[i++] = dbg_read(DBGWCR11);
+ break;
+ case 12:
+ state[i++] = dbg_read(DBGWVR12);
+ state[i++] = dbg_read(DBGWCR12);
+ break;
+ case 13:
+ state[i++] = dbg_read(DBGWVR13);
+ state[i++] = dbg_read(DBGWCR13);
+ break;
+ case 14:
+ state[i++] = dbg_read(DBGWVR14);
+ state[i++] = dbg_read(DBGWCR14);
+ break;
+ case 15:
+ state[i++] = dbg_read(DBGWVR15);
+ state[i++] = dbg_read(DBGWCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch32_wxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGWVR0);
+ dbg_write(state[i++], DBGWCR0);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGWVR1);
+ dbg_write(state[i++], DBGWCR1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGWVR2);
+ dbg_write(state[i++], DBGWCR2);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGWVR3);
+ dbg_write(state[i++], DBGWCR3);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGWVR4);
+ dbg_write(state[i++], DBGWCR4);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGWVR5);
+ dbg_write(state[i++], DBGWCR5);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGWVR6);
+ dbg_write(state[i++], DBGWCR6);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGWVR7);
+ dbg_write(state[i++], DBGWCR7);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGWVR8);
+ dbg_write(state[i++], DBGWCR8);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGWVR9);
+ dbg_write(state[i++], DBGWCR9);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGWVR10);
+ dbg_write(state[i++], DBGWCR10);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGWVR11);
+ dbg_write(state[i++], DBGWCR11);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGWVR12);
+ dbg_write(state[i++], DBGWCR12);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGWVR13);
+ dbg_write(state[i++], DBGWCR13);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGWVR14);
+ dbg_write(state[i++], DBGWCR14);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGWVR15);
+ dbg_write(state[i++], DBGWCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Set OS Lock to inform the debugger that the OS is in the
+ * process of saving debug registers. It prevents accidental
+ * modification of the debug regs by the external debugger.
+ */
+ dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ isb();
+
+ dbg.state[i++] = dbg_read(DBGDSCRext);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_read_arch32_bxr(dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_read_arch32_wxr(dbg.state, i, j);
+ dbg.state[i++] = dbg_read(DBGDCCINT);
+ dbg.state[i++] = dbg_read(DBGCLAIMCLR);
+ dbg.state[i++] = dbg_read(DBGOSECCR);
+ dbg.state[i++] = dbg_read(DBGDTRRXext);
+ dbg.state[i++] = dbg_read(DBGDTRTXext);
+
+ /* Set the OS double lock */
+ isb();
+ dbg_write(0x1, DBGOSDLR);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Clear the OS double lock */
+ isb();
+ dbg_write(0x0, DBGOSDLR);
+ isb();
+
+ /* Set OS lock. Lock will already be set after power collapse
+ * but this write is included to ensure it is set.
+ */
+ dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+ isb();
+
+ dbg_write(dbg.state[i++], DBGDSCRext);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_write_arch32_bxr((uint32_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_write_arch32_wxr((uint32_t *)dbg.state, i, j);
+ dbg_write(dbg.state[i++], DBGDCCINT);
+ dbg_write(dbg.state[i++], DBGCLAIMSET);
+ dbg_write(dbg.state[i++], DBGOSECCR);
+ dbg_write(dbg.state[i++], DBGDTRRXext);
+ dbg_write(dbg.state[i++], DBGDTRTXext);
+
+ isb();
+ dbg_write(0x0, DBGOSLAR);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static void dbg_init_arch_data(void)
+{
+ uint32_t dbgdidr;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbgdidr = dbg_read(DBGDIDR);
+ dbg.arch = BMVAL(dbgdidr, 16, 19);
+ dbg.nr_ctx_cmp = BMVAL(dbgdidr, 20, 23) + 1;
+ dbg.nr_bp = BMVAL(dbgdidr, 24, 27) + 1;
+ dbg.nr_wp = BMVAL(dbgdidr, 28, 31) + 1;
+}
+#endif
+
+/*
+ * msm_jtag_save_state - save debug registers
+ *
+ * Debug registers are saved before power collapse if debug
+ * architecture is supported respectively and TZ isn't supporting
+ * the save and restore of debug registers.
+ *
+ * CONTEXT:
+ * Called with preemption off and interrupts locked from:
+ * 1. per_cpu idle thread context for idle power collapses
+ * or
+ * 2. per_cpu idle thread context for hotplug/suspend power collapse
+ * for nonboot cpus
+ * or
+ * 3. suspend thread context for suspend power collapse for core0
+ *
+ * In all cases we will run on the same cpu for the entire duration.
+ */
+void msm_jtag_save_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ msm_jtag_save_cntr[cpu]++;
+ /* ensure counter is updated before moving forward */
+ mb();
+
+ msm_jtag_etm_save_state();
+ if (dbg.save_restore_enabled)
+ dbg_save_state(cpu);
+}
+EXPORT_SYMBOL(msm_jtag_save_state);
+
+void msm_jtag_restore_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ /* Attempt restore only if save has been done. If power collapse
+ * is disabled, hotplug off of non-boot core will result in WFI
+ * and hence msm_jtag_save_state will not occur. Subsequently,
+ * during hotplug on of non-boot core when msm_jtag_restore_state
+ * is called via msm_platform_secondary_init, this check will help
+ * bail us out without restoring.
+ */
+ if (msm_jtag_save_cntr[cpu] == msm_jtag_restore_cntr[cpu])
+ return;
+ else if (msm_jtag_save_cntr[cpu] != msm_jtag_restore_cntr[cpu] + 1)
+ pr_err_ratelimited("jtag imbalance, save:%lu, restore:%lu\n",
+ (unsigned long)msm_jtag_save_cntr[cpu],
+ (unsigned long)msm_jtag_restore_cntr[cpu]);
+
+ msm_jtag_restore_cntr[cpu]++;
+ /* ensure counter is updated before moving forward */
+ mb();
+
+ if (dbg.save_restore_enabled)
+ dbg_restore_state(cpu);
+ msm_jtag_etm_restore_state();
+}
+EXPORT_SYMBOL(msm_jtag_restore_state);
+
+static inline bool dbg_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case ARM_DEBUG_ARCH_V8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int jtag_hotcpu_save_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ msm_jtag_save_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_save_notifier = {
+ .notifier_call = jtag_hotcpu_save_callback,
+};
+
+static int jtag_hotcpu_restore_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ msm_jtag_restore_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_restore_notifier = {
+ .notifier_call = jtag_hotcpu_restore_callback,
+ .priority = 1,
+};
+
+static int jtag_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ msm_jtag_save_state();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ msm_jtag_restore_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_cpu_pm_notifier = {
+ .notifier_call = jtag_cpu_pm_callback,
+};
+
+static int __init msm_jtag_dbg_init(void)
+{
+ int ret;
+
+ if (msm_jtag_fuse_apps_access_disabled())
+ return -EPERM;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbg_init_arch_data();
+
+ if (dbg_arch_supported(dbg.arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
+ dbg.save_restore_enabled = true;
+ } else {
+ pr_info("dbg save-restore supported by TZ\n");
+ goto dbg_out;
+ }
+ } else {
+ pr_info("dbg arch %u not supported\n", dbg.arch);
+ goto dbg_out;
+ }
+
+ /* Allocate dbg state save space */
+#ifdef CONFIG_ARM64
+ dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint64_t), GFP_KERNEL);
+#else
+ dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint32_t), GFP_KERNEL);
+#endif
+ if (!dbg.state) {
+ ret = -ENOMEM;
+ goto dbg_err;
+ }
+
+ register_hotcpu_notifier(&jtag_hotcpu_save_notifier);
+ register_hotcpu_notifier(&jtag_hotcpu_restore_notifier);
+ cpu_pm_register_notifier(&jtag_cpu_pm_notifier);
+dbg_out:
+ return 0;
+dbg_err:
+ return ret;
+}
+arch_initcall(msm_jtag_dbg_init);
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index 9f953aeb03a4..bbe686f4bc42 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -496,7 +496,7 @@ void apr_cb_func(void *buf, int len, void *priv)
pr_debug("\n*****************\n");
if (!buf || len <= APR_HDR_SIZE) {
- pr_err("APR: Improper apr pkt received:%p %d\n", buf, len);
+ pr_err("APR: Improper apr pkt received:%pK %d\n", buf, len);
return;
}
hdr = buf;
@@ -584,7 +584,7 @@ void apr_cb_func(void *buf, int len, void *priv)
return;
}
pr_debug("svc_idx = %d\n", i);
- pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id,
+ pr_debug("%x %x %x %pK %pK\n", c_svc->id, c_svc->dest_id,
c_svc->client_id, c_svc->fn, c_svc->priv);
data.payload_size = hdr->pkt_size - hdr_size;
data.opcode = hdr->opcode;
@@ -648,7 +648,7 @@ static void apr_reset_deregister(struct work_struct *work)
container_of(work, struct apr_reset_work, work);
handle = apr_reset->handle;
- pr_debug("%s:handle[%p]\n", __func__, handle);
+ pr_debug("%s:handle[%pK]\n", __func__, handle);
apr_deregister(handle);
kfree(apr_reset);
}
@@ -681,7 +681,7 @@ int apr_deregister(void *handle)
client[dest_id][client_id].svc_cnt--;
if (!client[dest_id][client_id].svc_cnt) {
svc->need_reset = 0x0;
- pr_debug("%s: service is reset %p\n", __func__, svc);
+ pr_debug("%s: service is reset %pK\n", __func__, svc);
}
}
@@ -709,7 +709,7 @@ void apr_reset(void *handle)
if (!handle)
return;
- pr_debug("%s: handle[%p]\n", __func__, handle);
+ pr_debug("%s: handle[%pK]\n", __func__, handle);
if (apr_reset_workqueue == NULL) {
pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 09b8bda2f6c8..9b44fb03cf94 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -157,11 +157,11 @@ int msm_audio_ion_alloc(const char *name, struct ion_client **client,
pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
goto err_ion_handle;
}
- pr_debug("%s: mapped address = %p, size=%zd\n", __func__,
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
*vaddr, bufsz);
if (bufsz != 0) {
- pr_debug("%s: memset to 0 %p %zd\n", __func__, *vaddr, bufsz);
+ pr_debug("%s: memset to 0 %pK %zd\n", __func__, *vaddr, bufsz);
memset((void *)*vaddr, 0, bufsz);
}
@@ -208,7 +208,7 @@ int msm_audio_ion_import(const char *name, struct ion_client **client,
bufsz should be 0 and fd shouldn't be 0 as of now
*/
*handle = ion_import_dma_buf(*client, fd);
- pr_debug("%s: DMA Buf name=%s, fd=%d handle=%p\n", __func__,
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
name, fd, *handle);
if (IS_ERR_OR_NULL((void *) (*handle))) {
pr_err("%s: ion import dma buffer failed\n",
@@ -239,7 +239,7 @@ int msm_audio_ion_import(const char *name, struct ion_client **client,
rc = -ENOMEM;
goto err_ion_handle;
}
- pr_debug("%s: mapped address = %p, size=%zd\n", __func__,
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
*vaddr, bufsz);
return 0;
@@ -321,7 +321,7 @@ int msm_audio_ion_mmap(struct audio_buffer *ab,
offset = 0;
}
len = min(len, remainder);
- pr_debug("vma=%p, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+ pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
vma, (unsigned int)addr, len,
(unsigned int)vma->vm_start,
(unsigned int)vma->vm_end,
@@ -344,8 +344,8 @@ int msm_audio_ion_mmap(struct audio_buffer *ab,
, __func__ , ret);
return ret;
}
- pr_debug("phys=%pa len=%zd\n", &phys_addr, phys_len);
- pr_debug("vma=%p, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+ pr_debug("phys=%pKK len=%zd\n", &phys_addr, phys_len);
+ pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
vma, (unsigned int)vma->vm_start,
(unsigned int)vma->vm_end, vma->vm_pgoff,
(unsigned long int)vma->vm_page_prot);
@@ -380,7 +380,7 @@ struct ion_client *msm_audio_ion_client_create(const char *name)
void msm_audio_ion_client_destroy(struct ion_client *client)
{
- pr_debug("%s: client = %p smmu_enabled = %d\n", __func__,
+ pr_debug("%s: client = %pK smmu_enabled = %d\n", __func__,
client, msm_audio_ion_data.smmu_enabled);
ion_client_destroy(client);
@@ -402,7 +402,7 @@ int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
bufsz should be 0 and fd shouldn't be 0 as of now
*/
*handle = ion_import_dma_buf(client, fd);
- pr_debug("%s: DMA Buf name=%s, fd=%d handle=%p\n", __func__,
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
name, fd, *handle);
if (IS_ERR_OR_NULL((void *)(*handle))) {
pr_err("%s: ion import dma buffer failed\n",
@@ -468,7 +468,7 @@ int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
int msm_cache_ops = 0;
if (!abuff) {
- pr_err("Invalid params: %p, %p\n", __func__, abuff);
+ pr_err("%s: Invalid params: %pK\n", __func__, abuff);
return -EINVAL;
}
rc = ion_handle_get_flags(abuff->client, abuff->handle,
@@ -641,7 +641,7 @@ static int msm_audio_dma_buf_unmap(struct ion_client *client,
if (!found) {
dev_err(cb_dev,
- "%s: cannot find allocation, ion_handle %p, ion_client %p",
+ "%s: cannot find allocation, ion_handle %pK, ion_client %pK",
__func__, handle, client);
rc = -EINVAL;
}
@@ -671,7 +671,7 @@ static int msm_audio_ion_get_phys(struct ion_client *client,
rc = ion_phys(client, handle, addr, len);
}
- pr_debug("phys=%pa, len=%zd, rc=%d\n", &(*addr), *len, rc);
+ pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
err:
return rc;
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b10479fee334..c26e530c61f5 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -516,6 +516,8 @@ static struct msm_soc_info cpu_of_id[] = {
[310] = {MSM_CPU_8996, "MSM8996"},
[311] = {MSM_CPU_8996, "APQ8096"},
[291] = {MSM_CPU_8996, "APQ8096"},
+ [305] = {MSM_CPU_8996, "MSM8996pro"},
+ [312] = {MSM_CPU_8996, "APQ8096pro"},
/* 8976 ID */
[266] = {MSM_CPU_8976, "MSM8976"},
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 6ea51140da28..65eda2de9586 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/platform_device.h>
+#include <linux/wait.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/watchdog.h>
@@ -53,6 +54,11 @@ static struct msm_watchdog_data *wdog_data;
static int cpu_idle_pc_state[NR_CPUS];
+/*
+ * user_pet_enable:
+ * Require userspace to write to a sysfs file every pet_time milliseconds.
+ * Disabled by default on boot.
+ */
struct msm_watchdog_data {
unsigned int __iomem phys_base;
size_t size;
@@ -74,10 +80,16 @@ struct msm_watchdog_data {
bool irq_ppi;
struct msm_watchdog_data __percpu **wdog_cpu_dd;
struct notifier_block panic_blk;
+
bool enabled;
+ bool user_pet_enabled;
+
struct task_struct *watchdog_task;
struct timer_list pet_timer;
- struct completion pet_complete;
+ wait_queue_head_t pet_complete;
+
+ bool timer_expired;
+ bool user_pet_complete;
};
/*
@@ -245,6 +257,65 @@ static ssize_t wdog_disable_set(struct device *dev,
static DEVICE_ATTR(disable, S_IWUSR | S_IRUSR, wdog_disable_get,
wdog_disable_set);
+/*
+ * Userspace Watchdog Support:
+ * Write 1 to the "user_pet_enabled" file to enable hw support for a
+ * userspace watchdog.
+ * Userspace is required to pet the watchdog by continuing to write 1
+ * to this file in the expected interval.
+ * Userspace may disable this requirement by writing 0 to this same
+ * file.
+ */
+static void __wdog_user_pet(struct msm_watchdog_data *wdog_dd)
+{
+ wdog_dd->user_pet_complete = true;
+ wake_up(&wdog_dd->pet_complete);
+}
+
+static ssize_t wdog_user_pet_enabled_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ wdog_dd->user_pet_enabled);
+ return ret;
+}
+
+static ssize_t wdog_user_pet_enabled_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+ ret = strtobool(buf, &wdog_dd->user_pet_enabled);
+ if (ret) {
+ dev_err(wdog_dd->dev, "invalid user input\n");
+ return ret;
+ }
+
+ __wdog_user_pet(wdog_dd);
+
+ return count;
+}
+
+static DEVICE_ATTR(user_pet_enabled, S_IWUSR | S_IRUSR,
+ wdog_user_pet_enabled_get, wdog_user_pet_enabled_set);
+
+static ssize_t wdog_pet_time_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", wdog_dd->pet_time);
+ return ret;
+}
+
+static DEVICE_ATTR(pet_time, S_IRUSR, wdog_pet_time_get, NULL);
+
static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
{
int slack, i, count, prev_count = 0;
@@ -298,7 +369,8 @@ static void pet_task_wakeup(unsigned long data)
{
struct msm_watchdog_data *wdog_dd =
(struct msm_watchdog_data *)data;
- complete(&wdog_dd->pet_complete);
+ wdog_dd->timer_expired = true;
+ wake_up(&wdog_dd->pet_complete);
}
static __ref int watchdog_kthread(void *arg)
@@ -310,14 +382,24 @@ static __ref int watchdog_kthread(void *arg)
sched_setscheduler(current, SCHED_FIFO, &param);
while (!kthread_should_stop()) {
- while (wait_for_completion_interruptible(
- &wdog_dd->pet_complete) != 0)
+ while (wait_event_interruptible(
+ wdog_dd->pet_complete,
+ wdog_dd->timer_expired) != 0)
+ ;
+
+ if (wdog_dd->do_ipi_ping)
+ ping_other_cpus(wdog_dd);
+
+ while (wait_event_interruptible(
+ wdog_dd->pet_complete,
+ wdog_dd->user_pet_complete) != 0)
;
- reinit_completion(&wdog_dd->pet_complete);
+
+ wdog_dd->timer_expired = false;
+ wdog_dd->user_pet_complete = !wdog_dd->user_pet_enabled;
+
if (enable) {
delay_time = msecs_to_jiffies(wdog_dd->pet_time);
- if (wdog_dd->do_ipi_ping)
- ping_other_cpus(wdog_dd);
pet_watchdog(wdog_dd);
}
/* Check again before scheduling *
@@ -509,12 +591,29 @@ out0:
return;
}
+static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
+{
+ int error = 0;
+
+ error |= device_create_file(wdog_dd->dev, &dev_attr_disable);
+
+ if (of_property_read_bool(wdog_dd->dev->of_node,
+ "qcom,userspace-watchdog")) {
+ error |= device_create_file(wdog_dd->dev, &dev_attr_pet_time);
+ error |= device_create_file(wdog_dd->dev,
+ &dev_attr_user_pet_enabled);
+ }
+
+ if (error)
+ dev_err(wdog_dd->dev, "cannot create sysfs attribute\n");
+
+ return error;
+}
static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
{
unsigned long delay_time;
uint32_t val;
- int error;
u64 timeout;
int ret;
@@ -561,7 +660,10 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
atomic_notifier_chain_register(&panic_notifier_list,
&wdog_dd->panic_blk);
mutex_init(&wdog_dd->disable_lock);
- init_completion(&wdog_dd->pet_complete);
+ init_waitqueue_head(&wdog_dd->pet_complete);
+ wdog_dd->timer_expired = false;
+ wdog_dd->user_pet_complete = true;
+ wdog_dd->user_pet_enabled = false;
wake_up_process(wdog_dd->watchdog_task);
init_timer(&wdog_dd->pet_timer);
wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
@@ -576,9 +678,9 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
__raw_writel(1, wdog_dd->base + WDT0_RST);
wdog_dd->last_pet = sched_clock();
wdog_dd->enabled = true;
- error = device_create_file(wdog_dd->dev, &dev_attr_disable);
- if (error)
- dev_err(wdog_dd->dev, "cannot create sysfs attribute\n");
+
+ init_watchdog_sysfs(wdog_dd);
+
if (wdog_dd->irq_ppi)
enable_percpu_irq(wdog_dd->bark_irq, 0);
if (ipi_opt_en)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4312a8f86e57..9f67de3cc9e8 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -630,7 +630,7 @@ static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
char kbuf[10];
- unsigned int num, dir;
+ unsigned int num, dir, temp;
unsigned long flags;
memset(kbuf, 0, 10);
@@ -641,8 +641,16 @@ static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
if (sscanf(kbuf, "%u %u", &num, &dir) != 2)
return -EINVAL;
+ if (dir != 0 && dir != 1)
+ return -EINVAL;
+
+ temp = (num << 1) + dir;
+ if (temp >= (dwc->num_in_eps + dwc->num_out_eps) ||
+ temp >= DWC3_ENDPOINTS_NUM)
+ return -EINVAL;
+
spin_lock_irqsave(&dwc->lock, flags);
- ep_num = (num << 1) + dir;
+ ep_num = temp;
spin_unlock_irqrestore(&dwc->lock, flags);
return count;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f5554d7a8e00..506a1a500f63 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -186,9 +186,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
if (!(cdev && cdev->config) || !dwc->needs_fifo_resize)
return 0;
- /* gadget.num_eps never be greater than dwc->num_in_eps */
- num_eps = min_t(int, dwc->num_in_eps,
- cdev->config->num_ineps_used + 1);
+ num_eps = dwc->num_in_eps;
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
@@ -206,7 +204,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
int tmp;
if (!(dep->flags & DWC3_EP_ENABLED)) {
- dev_warn(dwc->dev, "ep%dIn not enabled", num);
+ dev_dbg(dwc->dev, "ep%dIn not enabled", num);
tmp = max_packet + mdwidth;
goto resize_fifo;
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index e0a8ad44a008..39b014b90e28 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -567,10 +567,13 @@ int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl)
pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
__func__, stop_state_mask);
- rc = readl_poll_timeout(ctrl->ctrl_base + LANE_STATUS, val,
- (val & stop_state_mask), sleep_us, timeout_us);
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ rc = mdss_dsi_phy_v3_wait_for_lanes_stop_state(ctrl, &val);
+ else
+ rc = readl_poll_timeout(ctrl->ctrl_base + LANE_STATUS, val,
+ (val & stop_state_mask), sleep_us, timeout_us);
if (rc) {
- pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
+ pr_debug("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
__func__, val);
goto error;
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.h b/drivers/video/fbdev/msm/mdss_dsi_phy.h
index 4cb78e378548..5fff3123b63f 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy.h
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.h
@@ -92,4 +92,27 @@ int mdss_dsi_phy_v3_regulator_disable(struct mdss_dsi_ctrl_pdata *ctrl);
*/
void mdss_dsi_phy_v3_toggle_resync_fifo(struct mdss_dsi_ctrl_pdata *ctrl);
+/**
+ * mdss_dsi_phy_v3_wait_for_lanes_stop_state() - Wait for DSI lanes to be in
+ * stop state
+ * @ctrl: pointer to DSI controller structure
+ * @lane_status: value of lane status register at the end of the poll
+ *
+ * This function waits for all the active DSI lanes to be in stop state by
+ * polling the lane status register. This function assumes that the bus clocks
+ * required to access the registers are already turned on.
+ */
+int mdss_dsi_phy_v3_wait_for_lanes_stop_state(struct mdss_dsi_ctrl_pdata *ctrl,
+ u32 *lane_status);
+
+/**
+ * mdss_dsi_phy_v3_ulps_config() - Program DSI lanes to enter/exit ULPS mode
+ * @ctrl: pointer to DSI controller structure
+ * @enable: true to enter ULPS, false to exit ULPS
+ *
+ * This function executes the necessary hardware programming sequence to
+ * enter/exit DSI Ultra-Low Power State (ULPS) for DSI PHY v3. This function
+ * assumes that the link and core clocks are already on.
+ */
+int mdss_dsi_phy_v3_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl, bool enable);
#endif /* MDSS_DSI_PHY_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
index 0e10eb5d0cc9..0c4dcf9db216 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
@@ -45,6 +45,8 @@
#define CMN_TIMING_CTRL_10 0x0D4
#define CMN_TIMING_CTRL_11 0x0D8
#define CMN_PHY_STATUS 0x0EC
+#define CMN_LANE_STATUS0 0x0F4
+#define CMN_LANE_STATUS1 0x0F8
#define LNX_CFG0(n) ((0x200 + (0x80 * (n))) + 0x00)
#define LNX_CFG1(n) ((0x200 + (0x80 * (n))) + 0x04)
@@ -62,6 +64,29 @@
#define DSI_PHY_W32(b, off, val) MIPI_OUTP((b) + (off), (val))
#define DSI_PHY_R32(b, off) MIPI_INP((b) + (off))
+static u32 __get_active_lanes_mask(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mipi_panel_info *mipi;
+ u32 mask = 0;
+
+ mipi = &ctrl->panel_data.panel_info.mipi;
+
+ /* clock lane will always be programmed for ulps */
+ mask = BIT(4);
+
+ /* Mark all active data lanes */
+ if (mipi->data_lane0)
+ mask |= BIT(0);
+ if (mipi->data_lane1)
+ mask |= BIT(1);
+ if (mipi->data_lane2)
+ mask |= BIT(2);
+ if (mipi->data_lane3)
+ mask |= BIT(3);
+
+ return mask;
+}
+
static bool mdss_dsi_phy_v3_is_pll_on(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 data;
@@ -179,6 +204,92 @@ void mdss_dsi_phy_v3_toggle_resync_fifo(struct mdss_dsi_ctrl_pdata *ctrl)
wmb();
}
+int mdss_dsi_phy_v3_wait_for_lanes_stop_state(struct mdss_dsi_ctrl_pdata *ctrl,
+ u32 *lane_status)
+{
+ u32 stop_state_mask = 0;
+ u32 const sleep_us = 10;
+ u32 const timeout_us = 100;
+
+ if (!ctrl || !lane_status) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ stop_state_mask = __get_active_lanes_mask(ctrl);
+
+ return readl_poll_timeout(ctrl->phy_io.base + CMN_LANE_STATUS1,
+ *lane_status, (*lane_status == stop_state_mask), sleep_us,
+ timeout_us);
+}
+
+int mdss_dsi_phy_v3_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+ int rc = 0;
+ u32 active_lanes = 0;
+ u32 lane_status = 0;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ active_lanes = __get_active_lanes_mask(ctrl);
+
+ pr_debug("configuring ulps (%s) for ctrl%d, active lanes=0x%08x\n",
+ (enable ? "on" : "off"), ctrl->ndx, active_lanes);
+
+ if (enable) {
+ /*
+ * ULPS Entry Request.
+ * Wait for a short duration to ensure that the lanes
+ * enter ULP state.
+ */
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL1,
+ active_lanes);
+ usleep_range(100, 110);
+
+ /* Check to make sure that all active data lanes are in ULPS */
+ lane_status = DSI_PHY_R32(ctrl->phy_io.base, CMN_LANE_STATUS0);
+ if (lane_status & active_lanes) {
+ pr_err("ULPS entry req failed for ctrl%d. Lane status=0x%08x\n",
+ ctrl->ndx, lane_status);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ /*
+ * ULPS Exit Request
+ * Hardware requirement is to wait for at least 1ms
+ */
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL2,
+ active_lanes);
+ usleep_range(1000, 1010);
+
+ /*
+ * Sometimes when exiting ULPS, it is possible that some DSI
+ * lanes are not in the stop state which could lead to DSI
+ * commands not going through. To avoid this, force the lanes
+ * to be in stop state.
+ */
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL3,
+ active_lanes);
+
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL3, 0);
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL2, 0);
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL1, 0);
+
+ lane_status = DSI_PHY_R32(ctrl->phy_io.base, CMN_LANE_STATUS0);
+ }
+
+ pr_debug("DSI lane status = 0x%08x. Ulps %s\n", lane_status,
+ enable ? "enabled" : "disabled");
+
+error:
+ return rc;
+}
+
+
int mdss_dsi_phy_v3_shutdown(struct mdss_dsi_ctrl_pdata *ctrl)
{
/* ensure that the PLL is already off */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_formats.h b/drivers/video/fbdev/msm/mdss_mdp_formats.h
index a3a3434c9044..f27836f329a9 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_formats.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_formats.h
@@ -388,7 +388,7 @@ static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
.unpack_count = 3,
.bpp = 3,
.fetch_mode = MDSS_MDP_FETCH_LINEAR,
- .element = { C2_R_Cr, C1_B_Cb, C0_G_Y },
+ .element = { C0_G_Y, C1_B_Cb, C2_R_Cr },
},
{
FMT_YUV_COMMON(MDP_YCRCB_H1V1),
@@ -397,7 +397,7 @@ static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
.unpack_count = 3,
.bpp = 3,
.fetch_mode = MDSS_MDP_FETCH_LINEAR,
- .element = { C1_B_Cb, C2_R_Cr, C0_G_Y },
+ .element = { C0_G_Y, C2_R_Cr, C1_B_Cb },
},
{
FMT_YUV_COMMON(MDP_YCRYCB_H2V1),
@@ -406,7 +406,7 @@ static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
.unpack_count = 4,
.bpp = 2,
.fetch_mode = MDSS_MDP_FETCH_LINEAR,
- .element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y },
+ .element = { C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb },
},
{
FMT_YUV_COMMON(MDP_YCBYCR_H2V1),
@@ -416,7 +416,7 @@ static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
.unpack_count = 4,
.bpp = 2,
.fetch_mode = MDSS_MDP_FETCH_LINEAR,
- .element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
+ .element = { C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr },
},
{
FMT_YUV_COMMON(MDP_CRYCBY_H2V1),
@@ -425,7 +425,16 @@ static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
.unpack_count = 4,
.bpp = 2,
.fetch_mode = MDSS_MDP_FETCH_LINEAR,
- .element = { C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr },
+ .element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
+ },
+ {
+ FMT_YUV_COMMON(MDP_CBYCRY_H2V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y},
},
FMT_RGB_1555(MDP_RGBA_5551, 1, VALID_ROT_WB_FORMAT |
VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 68ab530f3583..715f4428e81a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -4298,6 +4298,10 @@ static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
dither_data.g_y_depth = 8;
dither_data.r_cr_depth = 8;
dither_data.b_cb_depth = 8;
+ /*
+ * Use default dither table by setting len to 0
+ */
+ dither_data.len = 0;
dither.cfg_payload = &dither_data;
break;
case mdp_pp_legacy:
@@ -4316,6 +4320,10 @@ static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
dither_data.g_y_depth = 6;
dither_data.r_cr_depth = 6;
dither_data.b_cb_depth = 6;
+ /*
+ * Use default dither table by setting len to 0
+ */
+ dither_data.len = 0;
dither.cfg_payload = &dither_data;
break;
case mdp_pp_legacy:
diff --git a/drivers/video/fbdev/msm/mdss_wb.c b/drivers/video/fbdev/msm/mdss_wb.c
index 68ccfebf4e29..4550dd270c69 100644
--- a/drivers/video/fbdev/msm/mdss_wb.c
+++ b/drivers/video/fbdev/msm/mdss_wb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
#include "mdss_panel.h"
#include "mdss_wb.h"
+#include "mdss.h"
/**
* mdss_wb_check_params - check new panel info params
@@ -128,11 +129,23 @@ static int mdss_wb_probe(struct platform_device *pdev)
{
struct mdss_panel_data *pdata = NULL;
struct mdss_wb_ctrl *wb_ctrl = NULL;
+ struct mdss_util_intf *util;
int rc = 0;
if (!pdev->dev.of_node)
return -ENODEV;
+ util = mdss_get_util_intf();
+ if (util == NULL) {
+ pr_err("%s: Failed to get mdss utility functions\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!util->mdp_probe_done) {
+ pr_err("%s: MDP not probed yet!\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
wb_ctrl = devm_kzalloc(&pdev->dev, sizeof(*wb_ctrl), GFP_KERNEL);
if (!wb_ctrl)
return -ENOMEM;
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 55d3a6f46a81..909b3b5ccf0c 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -1739,18 +1739,19 @@ static bool mdss_dsi_is_ulps_req_valid(struct mdss_dsi_ctrl_pdata *ctrl,
}
/**
- * mdss_dsi_ulps_config() - Program DSI lanes to enter/exit ULPS mode
+ * mdss_dsi_ulps_config_default() - Program DSI lanes to enter/exit ULPS mode
* @ctrl: pointer to DSI controller structure
- * @enable: 1 to enter ULPS, 0 to exit ULPS
+ * @enable: true to enter ULPS, false to exit ULPS
*
- * This function executes the necessary programming sequence to enter/exit
- * DSI Ultra-Low Power State (ULPS). This function assumes that the link and
- * core clocks are already on.
+ * Executes the default hardware programming sequence to enter/exit DSI
+ * Ultra-Low Power State (ULPS). This function would be called whenever there
+ * are no hardware version sepcific functions for configuring ULPS mode. This
+ * function assumes that the link and core clocks are already on.
*/
-static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
- int enable)
+static int mdss_dsi_ulps_config_default(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool enable)
{
- int ret = 0;
+ int rc = 0;
struct mdss_panel_data *pdata = NULL;
struct mdss_panel_info *pinfo;
struct mipi_panel_info *mipi;
@@ -1770,12 +1771,6 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
pinfo = &pdata->panel_info;
mipi = &pinfo->mipi;
- if (!mdss_dsi_is_ulps_req_valid(ctrl, enable)) {
- pr_debug("%s: skiping ULPS config for ctrl%d, enable=%d\n",
- __func__, ctrl->ndx, enable);
- return 0;
- }
-
/* clock lane will always be programmed for ulps */
active_lanes = BIT(4);
/*
@@ -1791,9 +1786,83 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
if (mipi->data_lane3)
active_lanes |= BIT(3);
- pr_debug("%s: configuring ulps (%s) for ctrl%d, active lanes=0x%08x,clamps=%s\n",
+ pr_debug("%s: configuring ulps (%s) for ctrl%d, active lanes=0x%08x\n",
+ __func__, (enable ? "on" : "off"), ctrl->ndx, active_lanes);
+
+ if (enable) {
+ /*
+ * ULPS Entry Request.
+ * Wait for a short duration to ensure that the lanes
+ * enter ULP state.
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes);
+ usleep_range(100, 110);
+
+ /* Check to make sure that all active data lanes are in ULPS */
+ lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+ if (lane_status & (active_lanes << 8)) {
+ pr_err("%s: ULPS entry req failed for ctrl%d. Lane status=0x%08x\n",
+ __func__, ctrl->ndx, lane_status);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ /*
+ * ULPS Exit Request
+ * Hardware requirement is to wait for at least 1ms
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 8);
+ usleep_range(1000, 1010);
+
+ /*
+ * Sometimes when exiting ULPS, it is possible that some DSI
+ * lanes are not in the stop state which could lead to DSI
+ * commands not going through. To avoid this, force the lanes
+ * to be in stop state.
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 16);
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, 0x0);
+
+ lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+ }
+
+ pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
+ lane_status, enable ? "enabled" : "disabled");
+
+error:
+ return rc;
+}
+
+/**
+ * mdss_dsi_ulps_config() - Program DSI lanes to enter/exit ULPS mode
+ * @ctrl: pointer to DSI controller structure
+ * @enable: 1 to enter ULPS, 0 to exit ULPS
+ *
+ * Execute the necessary programming sequence to enter/exit DSI Ultra-Low Power
+ * State (ULPS). This function the validity of the ULPS config request and
+ * executes and pre/post steps before/after the necessary hardware programming.
+ * This function assumes that the link and core clocks are already on.
+ */
+static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
+ int enable)
+{
+ int ret = 0;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!mdss_dsi_is_ulps_req_valid(ctrl, enable)) {
+ pr_debug("%s: skiping ULPS config for ctrl%d, enable=%d\n",
+ __func__, ctrl->ndx, enable);
+ return 0;
+ }
+
+ pr_debug("%s: configuring ulps (%s) for ctrl%d, clamps=%s\n",
__func__, (enable ? "on" : "off"), ctrl->ndx,
- active_lanes, ctrl->mmss_clamp ? "enabled" : "disabled");
+ ctrl->mmss_clamp ? "enabled" : "disabled");
if (enable && !ctrl->ulps) {
/*
@@ -1809,29 +1878,19 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
if (!ctrl->mmss_clamp) {
ret = mdss_dsi_wait_for_lane_idle(ctrl);
if (ret) {
- pr_warn("%s: lanes not idle, skip ulps\n",
+ pr_warn_ratelimited("%s: lanes not idle, skip ulps\n",
__func__);
ret = 0;
goto error;
}
}
- /*
- * ULPS Entry Request.
- * Wait for a short duration to ensure that the lanes
- * enter ULP state.
- */
- MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes);
- usleep_range(100, 100);
-
- /* Check to make sure that all active data lanes are in ULPS */
- lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
- if (lane_status & (active_lanes << 8)) {
- pr_err("%s: ULPS entry req failed for ctrl%d. Lane status=0x%08x\n",
- __func__, ctrl->ndx, lane_status);
- ret = -EINVAL;
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ ret = mdss_dsi_phy_v3_ulps_config(ctrl, true);
+ else
+ ret = mdss_dsi_ulps_config_default(ctrl, true);
+ if (ret)
goto error;
- }
ctrl->ulps = true;
} else if (!enable && ctrl->ulps) {
@@ -1842,22 +1901,12 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
*/
mdss_dsi_dln0_phy_err(ctrl, false);
- /*
- * ULPS Exit Request
- * Hardware requirement is to wait for at least 1ms
- */
- MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 8);
- usleep_range(1000, 1000);
-
- /*
- * Sometimes when exiting ULPS, it is possible that some DSI
- * lanes are not in the stop state which could lead to DSI
- * commands not going through. To avoid this, force the lanes
- * to be in stop state.
- */
- MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 16);
-
- MIPI_OUTP(ctrl->ctrl_base + 0x0AC, 0x0);
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ ret = mdss_dsi_phy_v3_ulps_config(ctrl, false);
+ else
+ ret = mdss_dsi_ulps_config_default(ctrl, false);
+ if (ret)
+ goto error;
/*
* Wait for a short duration before enabling
@@ -1865,7 +1914,6 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
*/
usleep_range(100, 100);
- lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
ctrl->ulps = false;
} else {
pr_debug("%s: No change requested: %s -> %s\n", __func__,
@@ -1873,9 +1921,6 @@ static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
enable ? "enabled" : "disabled");
}
- pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
- lane_status, enable ? "enabled" : "disabled");
-
error:
return ret;
}
@@ -2204,7 +2249,7 @@ int mdss_dsi_post_clkon_cb(void *priv,
if (mmss_clamp)
mdss_dsi_ctrl_setup(ctrl);
- if (ctrl->ulps) {
+ if (ctrl->ulps && mmss_clamp) {
/*
* ULPS Entry Request. This is needed if the lanes were
* in ULPS prior to power collapse, since after
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index faf9ae79ca3c..e1360198955a 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -58,6 +58,9 @@
/* connector orientation 0 - CC1, 1 - CC2 */
#define EXTCON_USB_CC 28
+/* connector speed 0 - High Speed, 1 - super speed */
+#define EXTCON_USB_SPEED 29
+
/* Display external connector */
#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */
#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 0dd2f0bf9c23..a0dd21d215d2 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1056,92 +1056,6 @@ struct ipa_wdi_buffer_info {
};
/**
- * enum ipa_mhi_event_type - event type for mhi callback
- *
- * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
- * this event MHI client is expected to call to ipa_mhi_start() API
- * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
- */
-enum ipa_mhi_event_type {
- IPA_MHI_EVENT_READY,
- IPA_MHI_EVENT_DATA_AVAILABLE,
- IPA_MHI_EVENT_MAX,
-};
-
-typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
- unsigned long data);
-
-/**
- * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
- * @addr_low: MSI lower base physical address
- * @addr_hi: MSI higher base physical address
- * @data: Data Pattern to use when generating the MSI
- * @mask: Mask indicating number of messages assigned by the host to device
- *
- * msi value is written according to this formula:
- * ((data & ~mask) | (mmio.msiVec & mask))
- */
-struct ipa_mhi_msi_info {
- u32 addr_low;
- u32 addr_hi;
- u32 data;
- u32 mask;
-};
-
-/**
- * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
- *
- * @msi: MSI (Message Signaled Interrupts) parameters
- * @mmio_addr: MHI MMIO physical address
- * @first_ch_idx: First channel ID for hardware accelerated channels.
- * @first_er_idx: First event ring ID for hardware accelerated channels.
- * @assert_bit40: should assert bit 40 in order to access hots space.
- * if PCIe iATU is configured then not need to assert bit40
- * @notify: client callback
- * @priv: client private data to be provided in client callback
- * @test_mode: flag to indicate if IPA MHI is in unit test mode
- */
-struct ipa_mhi_init_params {
- struct ipa_mhi_msi_info msi;
- u32 mmio_addr;
- u32 first_ch_idx;
- u32 first_er_idx;
- bool assert_bit40;
- mhi_client_cb notify;
- void *priv;
- bool test_mode;
-};
-
-/**
- * struct ipa_mhi_start_params - parameters for IPA MHI start API
- *
- * @host_ctrl_addr: Base address of MHI control data structures
- * @host_data_addr: Base address of MHI data buffers
- * @channel_context_addr: channel context array address in host address space
- * @event_context_addr: event context array address in host address space
- */
-struct ipa_mhi_start_params {
- u32 host_ctrl_addr;
- u32 host_data_addr;
- u64 channel_context_array_addr;
- u64 event_context_array_addr;
-};
-
-/**
- * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
- *
- * @sys: IPA EP configuration info
- * @channel_id: MHI channel id
- */
-struct ipa_mhi_connect_params {
- struct ipa_sys_connect_params sys;
- u8 channel_id;
-};
-
-/* bit #40 in address should be asserted for MHI transfers over pcie */
-#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
-
-/**
* struct ipa_gsi_ep_config - IPA GSI endpoint configurations
*
* @ipa_ep_num: IPA EP pipe number
@@ -1436,23 +1350,6 @@ int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
void ipa_dma_destroy(void);
/*
- * MHI
- */
-int ipa_mhi_init(struct ipa_mhi_init_params *params);
-
-int ipa_mhi_start(struct ipa_mhi_start_params *params);
-
-int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
-
-int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
-
-int ipa_mhi_suspend(bool force);
-
-int ipa_mhi_resume(void);
-
-void ipa_mhi_destroy(void);
-
-/*
* mux id
*/
int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
@@ -2112,45 +2009,6 @@ static inline void ipa_dma_destroy(void)
}
/*
- * MHI
- */
-static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
-{
- return -EPERM;
-}
-
-static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
-{
- return -EPERM;
-}
-
-static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
- u32 *clnt_hdl)
-{
- return -EPERM;
-}
-
-static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
-{
- return -EPERM;
-}
-
-static inline int ipa_mhi_suspend(bool force)
-{
- return -EPERM;
-}
-
-static inline int ipa_mhi_resume(void)
-{
- return -EPERM;
-}
-
-static inline void ipa_mhi_destroy(void)
-{
- return;
-}
-
-/*
* mux id
*/
static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
diff --git a/include/linux/ipa_mhi.h b/include/linux/ipa_mhi.h
new file mode 100644
index 000000000000..4d3b9747a876
--- /dev/null
+++ b/include/linux/ipa_mhi.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_MHI_H_
+#define IPA_MHI_H_
+
+#include <linux/ipa.h>
+#include <linux/types.h>
+
+/**
+ * enum ipa_mhi_event_type - event type for mhi callback
+ *
+ * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
+ * this event MHI client is expected to call to ipa_mhi_start() API
+ * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
+ */
+enum ipa_mhi_event_type {
+ IPA_MHI_EVENT_READY,
+ IPA_MHI_EVENT_DATA_AVAILABLE,
+ IPA_MHI_EVENT_MAX,
+};
+
+typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data);
+
+/**
+ * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
+ * @addr_low: MSI lower base physical address
+ * @addr_hi: MSI higher base physical address
+ * @data: Data Pattern to use when generating the MSI
+ * @mask: Mask indicating number of messages assigned by the host to device
+ *
+ * msi value is written according to this formula:
+ * ((data & ~mask) | (mmio.msiVec & mask))
+ */
+struct ipa_mhi_msi_info {
+ u32 addr_low;
+ u32 addr_hi;
+ u32 data;
+ u32 mask;
+};
+
+/**
+ * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
+ *
+ * @msi: MSI (Message Signaled Interrupts) parameters
+ * @mmio_addr: MHI MMIO physical address
+ * @first_ch_idx: First channel ID for hardware accelerated channels.
+ * @first_er_idx: First event ring ID for hardware accelerated channels.
+ * @assert_bit40: should assert bit 40 in order to access host space.
+ * if PCIe iATU is configured then not need to assert bit40
+ * @notify: client callback
+ * @priv: client private data to be provided in client callback
+ * @test_mode: flag to indicate if IPA MHI is in unit test mode
+ */
+struct ipa_mhi_init_params {
+ struct ipa_mhi_msi_info msi;
+ u32 mmio_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ bool assert_bit40;
+ mhi_client_cb notify;
+ void *priv;
+ bool test_mode;
+};
+
+/**
+ * struct ipa_mhi_start_params - parameters for IPA MHI start API
+ *
+ * @host_ctrl_addr: Base address of MHI control data structures
+ * @host_data_addr: Base address of MHI data buffers
+ * @channel_context_addr: channel context array address in host address space
+ * @event_context_addr: event context array address in host address space
+ */
+struct ipa_mhi_start_params {
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u64 channel_context_array_addr;
+ u64 event_context_array_addr;
+};
+
+/**
+ * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
+ *
+ * @sys: IPA EP configuration info
+ * @channel_id: MHI channel id
+ */
+struct ipa_mhi_connect_params {
+ struct ipa_sys_connect_params sys;
+ u8 channel_id;
+};
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+int ipa_mhi_init(struct ipa_mhi_init_params *params);
+
+int ipa_mhi_start(struct ipa_mhi_start_params *params);
+
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
+
+int ipa_mhi_suspend(bool force);
+
+int ipa_mhi_resume(void);
+
+void ipa_mhi_destroy(void);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_suspend(bool force)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_resume(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa_mhi_destroy(void)
+{
+
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* IPA_MHI_H_ */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 429fdfc3baf5..f01c2ff9845b 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -8,6 +8,7 @@
#include <linux/hardirq.h>
#include <linux/perf_event.h>
#include <linux/tracepoint.h>
+#include <linux/coresight-stm.h>
struct trace_array;
struct trace_buffer;
@@ -231,7 +232,8 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len);
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+ unsigned long len);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -500,6 +502,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
* @entry: The event itself
* @irq_flags: The state of the interrupts at the start of the event
* @pc: The state of the preempt count at the start of the event.
+ * @len: The length of the payload data required for stm logging.
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
@@ -509,12 +512,16 @@ static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc)
+ void *entry, unsigned long irq_flags, int pc,
+ unsigned long len)
{
enum event_trigger_type tt = ETT_NONE;
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+ if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) {
+ if (len)
+ stm_log(OST_ENTITY_FTRACE_EVENTS, entry, len);
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+ }
if (tt)
event_triggers_post_call(file, tt);
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 27ae1332947c..f688b56c5798 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -84,10 +84,6 @@ struct icnss_soc_info {
extern int icnss_register_driver(struct icnss_driver_ops *driver);
extern int icnss_unregister_driver(struct icnss_driver_ops *driver);
-extern int icnss_register_ce_irq(unsigned int ce_id,
- irqreturn_t (*handler)(int, void *),
- unsigned long flags, const char *name);
-extern int icnss_unregister_ce_irq(unsigned int ce_id);
extern int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
enum icnss_driver_mode mode,
const char *host_version);
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index de996cf61053..af0cb7907922 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -682,7 +682,8 @@ trace_event_raw_event_##call(void *__data, proto) \
\
{ assign; } \
\
- trace_event_buffer_commit(&fbuffer); \
+ trace_event_buffer_commit(&fbuffer, \
+ sizeof(*entry) + __data_size); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 4d7f6f0d676a..696e80ee4571 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -800,6 +800,12 @@ enum v4l2_mpeg_vidc_extradata {
#define V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP \
V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP
V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP = 25,
+#define V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI \
+ V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI
+ V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI = 26,
+#define V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
+ V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
+ V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 27,
};
#define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index eaf2f5e02d0e..40cd867a9b7b 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -162,6 +162,20 @@ struct msm_vidc_roi_qp_payload {
unsigned int data[1];
};
+struct msm_vidc_mastering_display_colour_sei_payload {
+ unsigned int nDisplayPrimariesX[3];
+ unsigned int nDisplayPrimariesY[3];
+ unsigned int nWhitePointX;
+ unsigned int nWhitePointY;
+ unsigned int nMaxDisplayMasteringLuminance;
+ unsigned int nMinDisplayMasteringLuminance;
+};
+
+struct msm_vidc_content_light_level_sei_payload {
+ unsigned int nMaxContentLight;
+ unsigned int nMaxPicAverageLight;
+};
+
enum msm_vidc_extradata_type {
MSM_VIDC_EXTRADATA_NONE = 0x00000000,
MSM_VIDC_EXTRADATA_MB_QUANTIZATION = 0x00000001,
@@ -179,6 +193,12 @@ enum msm_vidc_extradata_type {
MSM_VIDC_EXTRADATA_FRAME_BITS_INFO = 0x00000010,
MSM_VIDC_EXTRADATA_VQZIP_SEI = 0x00000011,
MSM_VIDC_EXTRADATA_ROI_QP = 0x00000013,
+#define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI \
+ MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI
+ MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI = 0x00000015,
+#define MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
+ MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
+ MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 0x00000016,
MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E,
#define MSM_VIDC_EXTRADATA_OUTPUT_CROP \
MSM_VIDC_EXTRADATA_OUTPUT_CROP
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1e488a1be4c3..530bbc44082c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -41,6 +41,7 @@
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/sched/rt.h>
+#include <linux/coresight-stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -573,8 +574,11 @@ int __trace_puts(unsigned long ip, const char *str, int size)
if (entry->buf[size - 1] != '\n') {
entry->buf[size] = '\n';
entry->buf[size + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 2);
+ } else {
entry->buf[size] = '\0';
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 1);
+ }
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -615,6 +619,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->str = str;
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->str, strlen(entry->str)+1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -2240,6 +2245,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
}
@@ -5265,8 +5271,11 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+ } else {
entry->buf[cnt] = '\0';
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+ }
__buffer_unlock_commit(buffer, event);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4f6ef6912e00..52bb846af3d6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -285,14 +285,15 @@ static void output_printk(struct trace_event_buffer *fbuffer)
spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+ unsigned long len)
{
if (tracepoint_printk)
output_printk(fbuffer);
event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->flags, fbuffer->pc, len);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 0655afbea83f..a01740a98afa 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -336,7 +336,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -382,7 +382,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->ret = syscall_get_return_value(current, regs);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d2f6d0be3503..23515a716748 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -821,7 +821,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize);
- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0, 0);
}
/* uprobe handler */
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index ddc428dfdb64..58fca9131b3c 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -3119,7 +3119,7 @@ static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
if (ret) {
pr_err("%s: fail to sent acdb cal, err = %d",
__func__, ret);
- return ret;
+ goto err_ret;
}
/* Send operation mode */
@@ -3129,22 +3129,11 @@ static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
det_mode.detect_failure = detect_failure;
ret = wcd_cpe_send_param_opmode(core, session,
&det_mode, &ids);
- if (ret) {
+ if (ret)
dev_err(core->dev,
"%s: Failed to set opmode, err=%d\n",
__func__, ret);
- goto err_ret;
- }
- /* Send connect to port */
- ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
- ids.param_id = CPE_LSM_PARAM_ID_CONNECT_TO_PORT;
- ret = wcd_cpe_send_param_connectport(core, session,
- NULL, &ids, CPE_AFE_PORT_1_TX);
- if (ret)
- dev_err(core->dev,
- "%s: Failed to set connectPort, err=%d\n",
- __func__, ret);
err_ret:
return ret;
}
@@ -3354,7 +3343,19 @@ static int wcd_cpe_cmd_lsm_start(void *core_handle,
if (ret)
return ret;
- /* Send connect to port */
+ /* Send connect to port (input) */
+ ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
+ ids.param_id = CPE_LSM_PARAM_ID_CONNECT_TO_PORT;
+ ret = wcd_cpe_send_param_connectport(core, session,
+ NULL, &ids, CPE_AFE_PORT_1_TX);
+ if (ret) {
+ dev_err(core->dev,
+ "%s: Failed to set connectPort, err=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Send connect to port (output) */
ids.module_id = CPE_LSM_MODULE_FRAMEWORK;
ids.param_id = CPE_LSM_PARAM_ID_CONNECT_TO_PORT;
ret = wcd_cpe_send_param_connectport(core, session,
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index ed25415698ad..ea58987455bf 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -635,7 +635,7 @@ static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
goto err;
}
cal_block->buffer_number = basic_cal->cal_hdr.buffer_number;
- pr_debug("%s: created block for cal type %d, buf num %d, map handle %d, map size %zd paddr 0x%pa!\n",
+ pr_debug("%s: created block for cal type %d, buf num %d, map handle %d, map size %zd paddr 0x%pK!\n",
__func__, cal_type->info.reg.cal_type,
cal_block->buffer_number,
cal_block->map_data.ion_map_handle,
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 6c0d7bb7b259..f577637ee2b2 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,7 +187,7 @@ static void compr_event_handler(uint32_t opcode,
pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
__func__, prtd->pcm_count, prtd->out_head);
temp = buf[0].phys + (prtd->out_head * prtd->pcm_count);
- pr_debug("%s:writing buffer[%d] from 0x%pa\n",
+ pr_debug("%s:writing buffer[%d] from 0x%pK\n",
__func__, prtd->out_head, &temp);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
@@ -238,7 +238,7 @@ static void compr_event_handler(uint32_t opcode,
break;
case ASM_DATA_EVENT_READ_DONE_V2: {
pr_debug("ASM_DATA_EVENT_READ_DONE\n");
- pr_debug("buf = %p, data = 0x%X, *data = %p,\n"
+ pr_debug("buf = %pK, data = 0x%X, *data = %pK,\n"
"prtd->pcm_irq_pos = %d\n",
prtd->audio_client->port[OUT].buf,
*(uint32_t *)prtd->audio_client->port[OUT].buf->data,
@@ -248,7 +248,7 @@ static void compr_event_handler(uint32_t opcode,
memcpy(prtd->audio_client->port[OUT].buf->data +
prtd->pcm_irq_pos, (ptrmem + READDONE_IDX_SIZE),
COMPRE_CAPTURE_HEADER_SIZE);
- pr_debug("buf = %p, updated data = 0x%X, *data = %p\n",
+ pr_debug("buf = %pK, updated data = 0x%X, *data = %pK\n",
prtd->audio_client->port[OUT].buf,
*(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
prtd->pcm_irq_pos),
@@ -264,7 +264,7 @@ static void compr_event_handler(uint32_t opcode,
}
buf = prtd->audio_client->port[OUT].buf;
- pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%pa\n",
+ pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%pK\n",
prtd->pcm_irq_pos, &buf[0].phys);
read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
read_param.paddr = buf[0].phys +
@@ -290,7 +290,7 @@ static void compr_event_handler(uint32_t opcode,
pr_debug("%s: writing %d bytes of buffer[%d] to dsp\n",
__func__, prtd->pcm_count, prtd->out_head);
buf = prtd->audio_client->port[IN].buf;
- pr_debug("%s: writing buffer[%d] from 0x%pa head %d count %d\n",
+ pr_debug("%s: writing buffer[%d] from 0x%pK head %d count %d\n",
__func__, prtd->out_head, &buf[0].phys,
prtd->pcm_count, prtd->out_head);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
@@ -605,7 +605,7 @@ static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
- COMPRE_CAPTURE_HEADER_SIZE;
read_param.paddr = buf[i].phys
+ COMPRE_CAPTURE_HEADER_SIZE;
- pr_debug("Push buffer [%d] to DSP, paddr: %pa, vaddr: %p\n",
+ pr_debug("Push buffer [%d] to DSP, paddr: %pK, vaddr: %pK\n",
i, &read_param.paddr,
buf[i].data);
q6asm_async_read(prtd->audio_client, &read_param);
@@ -951,7 +951,7 @@ static int msm_compr_hw_params(struct snd_pcm_substream *substream,
dma_buf->addr = buf[0].phys;
dma_buf->bytes = runtime->hw.buffer_bytes_max;
- pr_debug("%s: buf[%p]dma_buf->area[%p]dma_buf->addr[%pa]\n"
+ pr_debug("%s: buf[%pK]dma_buf->area[%pK]dma_buf->addr[%pK]\n"
"dma_buf->bytes[%zd]\n", __func__,
(void *)buf, (void *)dma_buf->area,
&dma_buf->addr, dma_buf->bytes);
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index dbacc51b19ae..a7ed5381e690 100755
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -2096,7 +2096,7 @@ static int msm_compr_get_caps(struct snd_compr_stream *cstream,
memcpy(arg, &prtd->compr_cap, sizeof(struct snd_compr_caps));
} else {
ret = -EINVAL;
- pr_err("%s: arg (0x%p), prtd (0x%p)\n", __func__, arg, prtd);
+ pr_err("%s: arg (0x%pK), prtd (0x%pK)\n", __func__, arg, prtd);
}
return ret;
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 2ca6f590c532..f773061921fd 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -2369,7 +2369,7 @@ static int msm_auxpcm_dev_probe(struct platform_device *pdev)
goto fail_pdata_nomem;
}
- dev_dbg(&pdev->dev, "%s: dev %p, dai_data %p, auxpcm_pdata %p\n",
+ dev_dbg(&pdev->dev, "%s: dev %pK, dai_data %pK, auxpcm_pdata %pK\n",
__func__, &pdev->dev, dai_data, auxpcm_pdata);
rc = of_property_read_u32_array(pdev->dev.of_node,
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
index ace747dd9bde..fea7bb4e7331 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
@@ -1103,7 +1103,7 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
ds2_ap_params_obj = &ds2_dap_params[cache_device];
pr_debug("%s: cache dev %d, dev_map_idx %d\n", __func__,
cache_device, dev_map_idx);
- pr_debug("%s: endp - %p %p\n", __func__,
+ pr_debug("%s: endp - %pK %pK\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj);
params_value = kzalloc(params_length, GFP_KERNEL);
@@ -1189,7 +1189,7 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx,
}
ds2_ap_params_obj = &ds2_dap_params[cache_device];
- pr_debug("%s: cached param - %p %p, cache_device %d\n", __func__,
+ pr_debug("%s: cached param - %pK %pK, cache_device %d\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj,
cache_device);
params_value = kzalloc(params_length, GFP_KERNEL);
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
index 9b1443f376f6..7a23a170be67 100644
--- a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
+++ b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -463,7 +463,7 @@ static int _sendcache_pre(struct audio_client *ac)
err = -EINVAL;
if ((_depc_size == 0) || !_depc || (size == 0) ||
cmd == 0 || ((offset + size) > _depc_size) || (err != 0)) {
- eagle_precache_err("%s: primary device %i cache index %i general error - cache size = %u, cache ptr = %p, offset = %u, size = %u, cmd = %i",
+ eagle_precache_err("%s: primary device %i cache index %i general error - cache size = %u, cache ptr = %pK, offset = %u, size = %u, cmd = %i",
__func__, _device_primary, cidx, _depc_size, _depc,
offset, size, cmd);
return -EINVAL;
@@ -547,7 +547,7 @@ NT_MODE_GOTO:
err = -EINVAL;
if ((_depc_size == 0) || !_depc || (err != 0) || (size == 0) ||
(cmd == 0) || (offset + size) > _depc_size) {
- eagle_postcache_err("%s: primary device %i cache index %i port_id 0x%X general error - cache size = %u, cache ptr = %p, offset = %u, size = %u, cmd = %i",
+ eagle_postcache_err("%s: primary device %i cache index %i port_id 0x%X general error - cache size = %u, cache ptr = %pK, offset = %u, size = %u, cmd = %i",
__func__, _device_primary, cidx, port_id,
_depc_size, _depc, offset, size, cmd);
return -EINVAL;
@@ -1012,7 +1012,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
eagle_ioctl_info("%s: called with control 0x%X (allocate param cache)",
__func__, cmd);
if (copy_from_user((void *)&size, (void *)arg, sizeof(size))) {
- eagle_ioctl_err("%s: error copying size (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error copying size (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &size, sizeof(size));
return -EFAULT;
} else if (size > DEPC_MAX_SIZE) {
@@ -1052,7 +1052,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
eagle_ioctl_info("%s: control 0x%X (get param)",
__func__, cmd);
if (copy_from_user((void *)&depd, (void *)arg, sizeof(depd))) {
- eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &depd, sizeof(depd));
return -EFAULT;
}
@@ -1123,7 +1123,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
eagle_ioctl_info("%s: control 0x%X (set param)",
__func__, cmd);
if (copy_from_user((void *)&depd, (void *)arg, sizeof(depd))) {
- eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &depd, sizeof(depd));
return -EFAULT;
}
@@ -1156,7 +1156,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
if (copy_from_user((void *)&_depc[offset],
(void *)(((char *)arg)+sizeof(depd)),
depd.size)) {
- eagle_ioctl_err("%s: error copying param to cache (src:%p, tgt:%p, size:%u)",
+ eagle_ioctl_err("%s: error copying param to cache (src:%pK, tgt:%pK, size:%u)",
__func__, ((char *)arg)+sizeof(depd),
&_depc[offset], depd.size);
return -EFAULT;
@@ -1175,7 +1175,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
eagle_ioctl_info("%s: with control 0x%X (set param cache block)",
__func__, cmd);
if (copy_from_user((void *)b_, (void *)arg, sizeof(b_))) {
- eagle_ioctl_err("%s: error copying cache block data (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error copying cache block data (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, b_, sizeof(b_));
return -EFAULT;
}
@@ -1206,7 +1206,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
eagle_ioctl_dbg("%s: with control 0x%X (set active device)",
__func__, cmd);
if (copy_from_user((void *)data, (void *)arg, sizeof(data))) {
- eagle_ioctl_err("%s: error copying active device data (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error copying active device data (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, data, sizeof(data));
return -EFAULT;
}
@@ -1228,7 +1228,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
__func__, cmd);
if (copy_from_user((void *)&target, (void *)arg,
sizeof(target))) {
- eagle_ioctl_err("%s: error reading license index. (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error reading license index. (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &target, sizeof(target));
return -EFAULT;
}
@@ -1275,7 +1275,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
cmd);
if (copy_from_user((void *)target, (void *)arg,
sizeof(target))) {
- eagle_ioctl_err("%s: error reading license index (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error reading license index (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, target, sizeof(target));
return -EFAULT;
}
@@ -1318,7 +1318,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
(void *)&(((u32 *)_sec_blob[target[0]])[1]),
(void *)(((char *)arg)+sizeof(target)),
target[1])) {
- eagle_ioctl_err("%s: error copying license to index %u, size %u (src:%p, tgt:%p, size:%u)",
+ eagle_ioctl_err("%s: error copying license to index %u, size %u (src:%pK, tgt:%pK, size:%u)",
__func__, target[0], target[1],
((char *)arg)+sizeof(target),
&(((u32 *)_sec_blob[target[0]])[1]),
@@ -1335,7 +1335,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
cmd);
if (copy_from_user((void *)&target, (void *)arg,
sizeof(target))) {
- eagle_ioctl_err("%s: error reading license index (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error reading license index (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &target, sizeof(target));
return -EFAULT;
}
@@ -1365,7 +1365,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
__func__, cmd);
if (copy_from_user((void *)&spec, (void *)arg,
sizeof(spec))) {
- eagle_ioctl_err("%s: error reading volume command specifier (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error reading volume command specifier (src:%pK, tgt:%pK, size:%zu)",
__func__, (void *)arg, &spec, sizeof(spec));
return -EFAULT;
}
@@ -1387,7 +1387,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
if (copy_from_user((void *)&_vol_cmds_d[idx],
(void *)(((char *)arg) + sizeof(int)),
sizeof(struct vol_cmds_d))) {
- eagle_ioctl_err("%s: error reading volume command descriptor (src:%p, tgt:%p, size:%zu)",
+ eagle_ioctl_err("%s: error reading volume command descriptor (src:%pK, tgt:%pK, size:%zu)",
__func__, ((char *)arg) + sizeof(int),
&_vol_cmds_d[idx],
sizeof(struct vol_cmds_d));
@@ -1400,7 +1400,7 @@ int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
if (copy_from_user((void *)_vol_cmds[idx],
(void *)(((char *)arg) + (sizeof(int) +
sizeof(struct vol_cmds_d))), size)) {
- eagle_ioctl_err("%s: error reading volume command string (src:%p, tgt:%p, size:%i)",
+ eagle_ioctl_err("%s: error reading volume command string (src:%pK, tgt:%pK, size:%i)",
__func__, ((char *)arg) + (sizeof(int) +
sizeof(struct vol_cmds_d)),
_vol_cmds[idx], size);
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c b/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
index ddfbcecd0c40..7c35d19bb610 100644
--- a/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -292,7 +292,7 @@ static int reg_ion_mem(void)
&po.kvaddr);
if (rc != 0)
pr_err("%s: failed to allocate memory.\n", __func__);
- pr_debug("%s: exited ion_client = %p, ion_handle = %p, phys_addr = %lu, length = %d, vaddr = %p, rc = 0x%x\n",
+ pr_debug("%s: exited ion_client = %pK, ion_handle = %pK, phys_addr = %lu, length = %d, vaddr = %pK, rc = 0x%x\n",
__func__, ion_client, ion_handle, (long)po.paddr,
(unsigned int)po.size, po.kvaddr, rc);
return rc;
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index fab1dd4311f5..52830c9cbecb 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -95,7 +95,7 @@ static int msm_lsm_queue_lab_buffer(struct lsm_priv *prtd, int i)
struct snd_soc_pcm_runtime *rtd;
if (!prtd || !prtd->lsm_client) {
- pr_err("%s: Invalid params prtd %p lsm client %p\n",
+ pr_err("%s: Invalid params prtd %pK lsm client %pK\n",
__func__, prtd, ((!prtd) ? NULL : prtd->lsm_client));
return -EINVAL;
}
@@ -109,7 +109,7 @@ static int msm_lsm_queue_lab_buffer(struct lsm_priv *prtd, int i)
if (!prtd->lsm_client->lab_buffer ||
i >= prtd->lsm_client->hw_params.period_count) {
dev_err(rtd->dev,
- "%s: Lab buffer not setup %p incorrect index %d period count %d\n",
+ "%s: Lab buffer not setup %pK incorrect index %d period count %d\n",
__func__, prtd->lsm_client->lab_buffer, i,
prtd->lsm_client->hw_params.period_count);
return -EINVAL;
@@ -137,7 +137,7 @@ static int lsm_lab_buffer_sanity(struct lsm_priv *prtd,
struct snd_soc_pcm_runtime *rtd;
if (!prtd || !read_done || !index) {
- pr_err("%s: Invalid params prtd %p read_done %p index %p\n",
+ pr_err("%s: Invalid params prtd %pK read_done %pK index %pK\n",
__func__, prtd, read_done, index);
return -EINVAL;
}
@@ -151,7 +151,7 @@ static int lsm_lab_buffer_sanity(struct lsm_priv *prtd,
if (!prtd->lsm_client->lab_enable || !prtd->lsm_client->lab_buffer) {
dev_err(rtd->dev,
- "%s: Lab not enabled %d invalid lab buffer %p\n",
+ "%s: Lab not enabled %d invalid lab buffer %pK\n",
__func__, prtd->lsm_client->lab_enable,
prtd->lsm_client->lab_buffer);
return -EINVAL;
@@ -165,7 +165,7 @@ static int lsm_lab_buffer_sanity(struct lsm_priv *prtd,
(prtd->lsm_client->lab_buffer[i].mem_map_handle ==
read_done->mem_map_handle)) {
dev_dbg(rtd->dev,
- "%s: Buffer found %pa memmap handle %d\n",
+ "%s: Buffer found %pK memmap handle %d\n",
__func__, &prtd->lsm_client->lab_buffer[i].phys,
prtd->lsm_client->lab_buffer[i].mem_map_handle);
if (read_done->total_size >
@@ -212,7 +212,7 @@ static void lsm_event_handler(uint32_t opcode, uint32_t token,
if (prtd->lsm_client->session != token ||
!read_done) {
dev_err(rtd->dev,
- "%s: EVENT_READ_DONE invalid callback, session %d callback %d payload %p",
+ "%s: EVENT_READ_DONE invalid callback, session %d callback %d payload %pK",
__func__, prtd->lsm_client->session,
token, read_done);
return;
@@ -310,7 +310,7 @@ static int msm_lsm_lab_buffer_alloc(struct lsm_priv *lsm, int alloc)
int ret = 0;
struct snd_dma_buffer *dma_buf = NULL;
if (!lsm) {
- pr_err("%s: Invalid param lsm %p\n", __func__, lsm);
+ pr_err("%s: Invalid param lsm %pK\n", __func__, lsm);
return -EINVAL;
}
if (alloc) {
@@ -781,7 +781,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
snd_model_v2.data, snd_model_v2.data_size)) {
dev_err(rtd->dev,
"%s: copy from user data failed\n"
- "data %p size %d\n", __func__,
+ "data %pK size %d\n", __func__,
snd_model_v2.data, snd_model_v2.data_size);
q6lsm_snd_model_buf_free(prtd->lsm_client);
rc = -EFAULT;
@@ -1795,7 +1795,7 @@ static int msm_lsm_hw_params(struct snd_pcm_substream *substream,
if (!prtd || !params) {
dev_err(rtd->dev,
- "%s: invalid params prtd %p params %p",
+ "%s: invalid params prtd %pK params %pK",
__func__, prtd, params);
return -EINVAL;
}
@@ -1837,7 +1837,7 @@ static snd_pcm_uframes_t msm_lsm_pcm_pointer(
if (!prtd) {
dev_err(rtd->dev,
- "%s: Invalid param %p\n", __func__, prtd);
+ "%s: Invalid param %pK\n", __func__, prtd);
return 0;
}
@@ -1865,7 +1865,7 @@ static int msm_lsm_pcm_copy(struct snd_pcm_substream *substream, int ch,
if (!prtd) {
dev_err(rtd->dev,
- "%s: Invalid param %p\n", __func__, prtd);
+ "%s: Invalid param %pK\n", __func__, prtd);
return -EINVAL;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 6144619ca39a..d65108e04e0b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -430,7 +430,7 @@ static int msm_afe_open(struct snd_pcm_substream *substream)
pr_err("Failed to allocate memory for msm_audio\n");
return -ENOMEM;
} else
- pr_debug("prtd %p\n", prtd);
+ pr_debug("prtd %pK\n", prtd);
mutex_init(&prtd->lock);
spin_lock_init(&prtd->dsp_lock);
@@ -500,7 +500,7 @@ static int msm_afe_playback_copy(struct snd_pcm_substream *substream,
char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
u32 mem_map_handle = 0;
- pr_debug("%s : appl_ptr 0x%lx hw_ptr 0x%lx dest_to_copy 0x%p\n",
+ pr_debug("%s : appl_ptr 0x%lx hw_ptr 0x%lx dest_to_copy 0x%pK\n",
__func__,
runtime->control->appl_ptr, runtime->status->hw_ptr, hwbuf);
@@ -588,7 +588,7 @@ static int msm_afe_capture_copy(struct snd_pcm_substream *substream,
}
atomic_set(&prtd->rec_bytes_avail, 0);
}
- pr_debug("%s:appl_ptr 0x%lx hw_ptr 0x%lx src_to_copy 0x%p\n",
+ pr_debug("%s:appl_ptr 0x%lx hw_ptr 0x%lx src_to_copy 0x%pK\n",
__func__, runtime->control->appl_ptr,
runtime->status->hw_ptr, hwbuf);
@@ -792,7 +792,7 @@ static int msm_afe_hw_params(struct snd_pcm_substream *substream,
return -ENOMEM;
}
- pr_debug("%s:buf = %p\n", __func__, buf);
+ pr_debug("%s:buf = %pK\n", __func__, buf);
dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
dma_buf->dev.dev = substream->pcm->card->dev;
dma_buf->private_data = NULL;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
index c1909778e082..48f4a2456c84 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -504,7 +504,7 @@ static int hpcm_allocate_shared_memory(struct hpcm_drv *prtd)
sess->tp_mem_table.size = sizeof(struct vss_imemory_table_t);
- pr_debug("%s: data %p phys %pa\n", __func__,
+ pr_debug("%s: data %pK phys %pK\n", __func__,
sess->tp_mem_table.data, &sess->tp_mem_table.phys);
/* Split 4096 block into four 1024 byte blocks for each dai */
@@ -682,7 +682,7 @@ void hpcm_notify_evt_processing(uint8_t *data, char *session,
}
if (tp == NULL || tmd == NULL) {
- pr_err("%s: tp = %p or tmd = %p is null\n", __func__,
+ pr_err("%s: tp = %pK or tmd = %pK is null\n", __func__,
tp, tmd);
return;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index dea1d8aa5819..c456134b87fa 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -764,7 +764,7 @@ static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
pr_debug("%s: pcm stopped in_count 0\n", __func__);
return 0;
}
- pr_debug("Checking if valid buffer is available...%p\n",
+ pr_debug("Checking if valid buffer is available...%pK\n",
data);
data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
bufptr = data;
@@ -921,7 +921,7 @@ static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
if (buf == NULL || buf[0].data == NULL)
return -ENOMEM;
- pr_debug("%s:buf = %p\n", __func__, buf);
+ pr_debug("%s:buf = %pK\n", __func__, buf);
dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
dma_buf->dev.dev = substream->pcm->card->dev;
dma_buf->private_data = NULL;
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index baa759c72c6e..6877985ca05e 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -377,7 +377,7 @@ int adm_dts_eagle_get(int port_id, int copp_idx, int param_id,
}
if ((size == 0) || !data) {
- pr_err("DTS_EAGLE_ADM: %s - invalid size %u or pointer %p.\n",
+ pr_err("DTS_EAGLE_ADM: %s - invalid size %u or pointer %pK.\n",
__func__, size, data);
return -EINVAL;
}
@@ -1240,7 +1240,7 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
+ pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
__func__,
data->reset_event, data->reset_proc, this_adm.apr);
if (this_adm.apr) {
@@ -1719,7 +1719,7 @@ static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
pr_err("%s: ADM mmap did not work! size = %zd ret %d\n",
__func__,
cal_block->map_data.map_size, ret);
- pr_debug("%s: ADM mmap did not work! addr = 0x%pa, size = %zd ret %d\n",
+ pr_debug("%s: ADM mmap did not work! addr = 0x%pK, size = %zd ret %d\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size, ret);
@@ -1786,7 +1786,7 @@ static void send_adm_custom_topology(void)
adm_top.payload_size = cal_block->cal_data.size;
atomic_set(&this_adm.adm_stat, -1);
- pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%pa, size = %d\n",
+ pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%pK, size = %d\n",
__func__, &cal_block->cal_data.paddr,
adm_top.payload_size);
result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
@@ -1875,14 +1875,14 @@ static int send_adm_cal_block(int port_id, int copp_idx,
adm_params.payload_size = cal_block->cal_data.size;
atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- pr_debug("%s: Sending SET_PARAMS payload = 0x%pa, size = %d\n",
+ pr_debug("%s: Sending SET_PARAMS payload = 0x%pK, size = %d\n",
__func__, &cal_block->cal_data.paddr,
adm_params.payload_size);
result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
if (result < 0) {
pr_err("%s: Set params failed port 0x%x result %d\n",
__func__, port_id, result);
- pr_debug("%s: Set params failed port = 0x%x payload = 0x%pa result %d\n",
+ pr_debug("%s: Set params failed port = 0x%x payload = 0x%pK result %d\n",
__func__, port_id, &cal_block->cal_data.paddr, result);
result = -EINVAL;
goto done;
@@ -1894,7 +1894,7 @@ static int send_adm_cal_block(int port_id, int copp_idx,
if (!result) {
pr_err("%s: Set params timed out port = 0x%x\n",
__func__, port_id);
- pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pa\n",
+ pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pK\n",
__func__, port_id, &cal_block->cal_data.paddr);
result = -EINVAL;
goto done;
@@ -2360,7 +2360,7 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
res = adm_memory_map_regions(&this_adm.outband_memmap.paddr, 0,
(uint32_t *)&this_adm.outband_memmap.size, 1);
if (res < 0) {
- pr_err("%s: SRS adm_memory_map_regions failed ! addr = 0x%p, size = %d\n",
+ pr_err("%s: SRS adm_memory_map_regions failed ! addr = 0x%pK, size = %d\n",
__func__, (void *)this_adm.outband_memmap.paddr,
(uint32_t)this_adm.outband_memmap.size);
}
@@ -2968,7 +2968,7 @@ int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
pr_err("%s: RTAC mmap did not work! size = %d result %d\n",
__func__,
cal_block->map_data.map_size, result);
- pr_debug("%s: RTAC mmap did not work! addr = 0x%pa, size = %d\n",
+ pr_debug("%s: RTAC mmap did not work! addr = 0x%pK, size = %d\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -4149,7 +4149,7 @@ static int adm_source_tracking_alloc_map_memory(void)
(uint32_t *)&this_adm.sourceTrackingData.memmap.size,
1);
if (ret < 0) {
- pr_err("%s: failed to map memory, paddr = 0x%p, size = %d\n",
+ pr_err("%s: failed to map memory, paddr = 0x%pK, size = %d\n",
__func__,
(void *)this_adm.sourceTrackingData.memmap.paddr,
(uint32_t)this_adm.sourceTrackingData.memmap.size);
@@ -4169,7 +4169,7 @@ static int adm_source_tracking_alloc_map_memory(void)
goto done;
}
ret = 0;
- pr_debug("%s: paddr = 0x%p, size = %d, mem_map_handle = 0x%x\n",
+ pr_debug("%s: paddr = 0x%pK, size = %d, mem_map_handle = 0x%x\n",
__func__, (void *)this_adm.sourceTrackingData.memmap.paddr,
(uint32_t)this_adm.sourceTrackingData.memmap.size,
atomic_read(&this_adm.mem_map_handles
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 89d3fb7d6156..820ef080326d 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -260,7 +260,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: reset event = %d %d apr[%p]\n",
+ pr_debug("%s: reset event = %d %d apr[%pK]\n",
__func__,
data->reset_event, data->reset_proc, this_afe.apr);
@@ -309,7 +309,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return 0;
if (!payload || (data->token >= AFE_MAX_PORTS)) {
- pr_err("%s: Error: size %d payload %p token %d\n",
+ pr_err("%s: Error: size %d payload %pK token %d\n",
__func__, data->payload_size,
payload, data->token);
return -EINVAL;
@@ -719,7 +719,7 @@ static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block)
msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
afe_cal.param.mem_map_handle = cal_block->map_data.q6map_handle;
- pr_debug("%s: AFE cal sent for device port = 0x%x, cal size = %zd, cal addr = 0x%pa\n",
+ pr_debug("%s: AFE cal sent for device port = 0x%x, cal size = %zd, cal addr = 0x%pK\n",
__func__, port_id,
cal_block->cal_data.size, &cal_block->cal_data.paddr);
@@ -764,7 +764,7 @@ static int afe_send_custom_topology_block(struct cal_block_data *cal_block)
msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
afe_cal.mem_map_handle = cal_block->map_data.q6map_handle;
- pr_debug("%s:cmd_id:0x%x calsize:%zd memmap_hdl:0x%x caladdr:0x%pa",
+ pr_debug("%s:cmd_id:0x%x calsize:%zd memmap_hdl:0x%x caladdr:0x%pK",
__func__, AFE_CMD_ADD_TOPOLOGIES, cal_block->cal_data.size,
afe_cal.mem_map_handle, &cal_block->cal_data.paddr);
@@ -1363,7 +1363,7 @@ static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
pr_err("%s: mmap did not work! size = %zd ret %d\n",
__func__,
cal_block->map_data.map_size, ret);
- pr_debug("%s: mmap did not work! addr = 0x%pa, size = %zd\n",
+ pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -3727,7 +3727,7 @@ int q6afe_audio_client_buf_alloc_contiguous(unsigned int dir,
size_t len;
if (!(ac) || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return -EINVAL;
}
@@ -3779,7 +3779,7 @@ int q6afe_audio_client_buf_alloc_contiguous(unsigned int dir,
buf[cnt].used = dir ^ 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
- pr_debug("%s: data[%p]phys[%pa][%p]\n", __func__,
+ pr_debug("%s: data[%pK]phys[%pK][%pK]\n", __func__,
buf[cnt].data,
&buf[cnt].phys,
&buf[cnt].phys);
@@ -3873,7 +3873,7 @@ int afe_cmd_memory_map(phys_addr_t dma_addr_p, u32 dma_buf_sz)
mregion_pl->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
mregion_pl->mem_size_bytes = dma_buf_sz;
- pr_debug("%s: dma_addr_p 0x%pa , size %d\n", __func__,
+ pr_debug("%s: dma_addr_p 0x%pK , size %d\n", __func__,
&dma_addr_p, dma_buf_sz);
atomic_set(&this_afe.state, 1);
atomic_set(&this_afe.status, 0);
@@ -3999,7 +3999,7 @@ int q6afe_audio_client_buf_free_contiguous(unsigned int dir,
cnt = port->max_buf_cnt - 1;
if (port->buf[0].data) {
- pr_debug("%s: data[%p]phys[%pa][%p] , client[%p] handle[%p]\n",
+ pr_debug("%s: data[%pK]phys[%pK][%pK] , client[%pK] handle[%pK]\n",
__func__,
port->buf[0].data,
&port->buf[0].phys,
@@ -4249,7 +4249,7 @@ int afe_rt_proxy_port_write(phys_addr_t buf_addr_p,
ret = -ENODEV;
return ret;
}
- pr_debug("%s: buf_addr_p = 0x%pa bytes = %d\n", __func__,
+ pr_debug("%s: buf_addr_p = 0x%pK bytes = %d\n", __func__,
&buf_addr_p, bytes);
afecmd_wr.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -4286,7 +4286,7 @@ int afe_rt_proxy_port_read(phys_addr_t buf_addr_p,
ret = -ENODEV;
return ret;
}
- pr_debug("%s: buf_addr_p = 0x%pa bytes = %d\n", __func__,
+ pr_debug("%s: buf_addr_p = 0x%pK bytes = %d\n", __func__,
&buf_addr_p, bytes);
afecmd_rd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -6127,7 +6127,7 @@ static int afe_map_cal_data(int32_t cal_type,
pr_err("%s: mmap did not work! size = %zd ret %d\n",
__func__,
cal_block->map_data.map_size, ret);
- pr_debug("%s: mmap did not work! addr = 0x%pa, size = %zd\n",
+ pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -6284,7 +6284,7 @@ int afe_map_rtac_block(struct rtac_cal_block_data *cal_block)
result = afe_cmd_memory_map(cal_block->cal_data.paddr,
cal_block->map_data.map_size);
if (result < 0) {
- pr_err("%s: afe_cmd_memory_map failed for addr = 0x%pa, size = %d, err %d\n",
+ pr_err("%s: afe_cmd_memory_map failed for addr = 0x%pK, size = %d, err %d\n",
__func__, &cal_block->cal_data.paddr,
cal_block->map_data.map_size, result);
return result;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 23261271bec8..116151aafc37 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -573,7 +573,7 @@ static int q6asm_map_cal_memory(int32_t cal_type,
pr_err("%s: mmap did not work! size = %zd result %d\n",
__func__,
cal_block->map_data.map_size, result);
- pr_debug("%s: mmap did not work! addr = 0x%pa, size = %zd\n",
+ pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -720,7 +720,7 @@ int send_asm_custom_topology(struct audio_client *ac)
asm_top.mem_map_handle = cal_block->map_data.q6map_handle;
asm_top.payload_size = cal_block->cal_data.size;
- pr_debug("%s: Sending ASM_CMD_ADD_TOPOLOGIES payload = %pa, size = %d, map handle = 0x%x\n",
+ pr_debug("%s: Sending ASM_CMD_ADD_TOPOLOGIES payload = %pK, size = %d, map handle = 0x%x\n",
__func__, &cal_block->cal_data.paddr,
asm_top.payload_size, asm_top.mem_map_handle);
@@ -728,7 +728,7 @@ int send_asm_custom_topology(struct audio_client *ac)
if (result < 0) {
pr_err("%s: Set topologies failed result %d\n",
__func__, result);
- pr_debug("%s: Set topologies failed payload = 0x%pa\n",
+ pr_debug("%s: Set topologies failed payload = 0x%pK\n",
__func__, &cal_block->cal_data.paddr);
goto unmap;
@@ -738,7 +738,7 @@ int send_asm_custom_topology(struct audio_client *ac)
(atomic_read(&ac->mem_state) >= 0), 5*HZ);
if (!result) {
pr_err("%s: Set topologies failed timeout\n", __func__);
- pr_debug("%s: Set topologies failed after timedout payload = 0x%pa\n",
+ pr_debug("%s: Set topologies failed after timedout payload = 0x%pK\n",
__func__, &cal_block->cal_data.paddr);
result = -ETIMEDOUT;
goto unmap;
@@ -819,7 +819,7 @@ int q6asm_map_rtac_block(struct rtac_cal_block_data *cal_block)
pr_err("%s: mmap did not work! size = %d result %d\n",
__func__,
cal_block->map_data.map_size, result);
- pr_debug("%s: mmap did not work! addr = 0x%pa, size = %d\n",
+ pr_debug("%s: mmap did not work! addr = 0x%pK, size = %d\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -956,7 +956,7 @@ int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
}
if (port->buf[0].data) {
- pr_debug("%s: data[%p]phys[%pa][%p] , client[%p] handle[%p]\n",
+ pr_debug("%s: data[%pK]phys[%pK][%pK] , client[%pK] handle[%pK]\n",
__func__,
port->buf[0].data,
&port->buf[0].phys,
@@ -988,7 +988,7 @@ void q6asm_audio_client_free(struct audio_client *ac)
struct audio_port_data *port;
if (!ac) {
- pr_err("%s: ac %p\n", __func__, ac);
+ pr_err("%s: ac %pK\n", __func__, ac);
return;
}
if (!ac->session) {
@@ -1202,7 +1202,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
size_t len;
if (!(ac) || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return -EINVAL;
}
@@ -1255,7 +1255,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
buf[cnt].used = 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
- pr_debug("%s: data[%p]phys[%pa][%p]\n",
+ pr_debug("%s: data[%pK]phys[%pK][%pK]\n",
__func__,
buf[cnt].data,
&buf[cnt].phys,
@@ -1292,7 +1292,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
int bytes_to_alloc;
if (!(ac) || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return -EINVAL;
}
@@ -1361,7 +1361,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
buf[cnt].used = dir ^ 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
- pr_debug("%s: data[%p]phys[%pa][%p]\n",
+ pr_debug("%s: data[%pK]phys[%pK][%pK]\n",
__func__,
buf[cnt].data,
&buf[cnt].phys,
@@ -1404,7 +1404,7 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
+ pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
__func__,
data->reset_event,
data->reset_proc,
@@ -1603,7 +1603,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
if (!q6asm_is_valid_audio_client(ac)) {
- pr_err("%s: audio client pointer is invalid, ac = %p\n",
+ pr_err("%s: audio client pointer is invalid, ac = %pK\n",
__func__, ac);
return -EINVAL;
}
@@ -1627,7 +1627,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
atomic_set(&ac->reset, 1);
if (ac->apr == NULL)
ac->apr = ac->apr2;
- pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
+ pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
__func__,
data->reset_event, data->reset_proc, ac->apr);
if (ac->cb)
@@ -1770,7 +1770,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
payload[0] ||
msm_audio_populate_upper_32_bits(
port->buf[buf_index].phys) != payload[1]) {
- pr_debug("%s: Expected addr %pa\n",
+ pr_debug("%s: Expected addr %pK\n",
__func__, &port->buf[buf_index].phys);
pr_err("%s: rxedl[0x%x] rxedu [0x%x]\n",
__func__, payload[0], payload[1]);
@@ -1856,7 +1856,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
msm_audio_populate_upper_32_bits(
port->buf[buf_index].phys) !=
payload[READDONE_IDX_BUFADD_MSW]) {
- dev_vdbg(ac->dev, "%s: Expected addr %pa\n",
+ dev_vdbg(ac->dev, "%s: Expected addr %pK\n",
__func__, &port->buf[buf_index].phys);
pr_err("%s: rxedl[0x%x] rxedu[0x%x]\n",
__func__,
@@ -1944,7 +1944,7 @@ void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
struct audio_port_data *port;
if (!ac || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return NULL;
}
@@ -1971,7 +1971,7 @@ void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
*size = port->buf[idx].actual_size;
*index = port->cpu_buf;
data = port->buf[idx].data;
- dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%p]size[%d]\n",
+ dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%pK]size[%d]\n",
__func__,
ac->session,
port->cpu_buf,
@@ -1996,7 +1996,7 @@ void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
struct audio_port_data *port;
if (!ac || ((dir != IN) && (dir != OUT))) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return NULL;
}
@@ -2023,7 +2023,7 @@ void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
*size = port->buf[idx].actual_size;
*index = port->cpu_buf;
data = port->buf[idx].data;
- dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%p]size[%d]\n",
+ dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%pK]size[%d]\n",
__func__, ac->session, port->cpu_buf,
data, *size);
/*
@@ -2044,7 +2044,7 @@ int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
uint32_t idx;
if (!ac || (dir != OUT)) {
- pr_err("%s: ac %p dir %d\n", __func__, ac, dir);
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
return ret;
}
@@ -2340,13 +2340,13 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
struct asm_stream_cmd_open_write_compressed open;
if (ac == NULL) {
- pr_err("%s: ac[%p] NULL\n", __func__, ac);
+ pr_err("%s: ac[%pK] NULL\n", __func__, ac);
rc = -EINVAL;
goto fail_cmd;
}
if (ac->apr == NULL) {
- pr_err("%s: APR handle[%p] NULL\n", __func__, ac->apr);
+ pr_err("%s: APR handle[%pK] NULL\n", __func__, ac->apr);
rc = -EINVAL;
goto fail_cmd;
}
@@ -4375,7 +4375,7 @@ int q6asm_memory_map(struct audio_client *ac, phys_addr_t buf_add, int dir,
ac->port[dir].tmp_hdl = 0;
port = &ac->port[dir];
- pr_debug("%s: buf_add 0x%pa, bufsz: %d\n", __func__,
+ pr_debug("%s: buf_add 0x%pK, bufsz: %d\n", __func__,
&buf_add, bufsz);
mregions->shm_addr_lsw = lower_32_bits(buf_add);
mregions->shm_addr_msw = msm_audio_populate_upper_32_bits(buf_add);
@@ -4564,7 +4564,7 @@ static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
mmap_region_cmd;
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, dir);
atomic_set(&ac->mem_state, -1);
- pr_debug("%s: mmap_region=0x%p token=0x%x\n", __func__,
+ pr_debug("%s: mmap_region=0x%pK token=0x%x\n", __func__,
mmap_regions, ((ac->session << 8) | dir));
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
@@ -4623,7 +4623,7 @@ static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
buffer_node[i].mmap_hdl = ac->port[dir].tmp_hdl;
list_add_tail(&buffer_node[i].list,
&ac->port[dir].mem_map_handle);
- pr_debug("%s: i=%d, bufadd[i] = 0x%pa, maphdl[i] = 0x%x\n",
+ pr_debug("%s: i=%d, bufadd[i] = 0x%pK, maphdl[i] = 0x%x\n",
__func__, i, &buffer_node[i].buf_phys_addr,
buffer_node[i].mmap_hdl);
}
@@ -4963,7 +4963,7 @@ int q6asm_dts_eagle_set(struct audio_client *ac, int param_id, uint32_t size,
struct asm_dts_eagle_param *ad;
if (!ac || ac->apr == NULL || (size == 0) || !data) {
- pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %p.\n",
+ pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %pK.\n",
__func__, size, data);
return -EINVAL;
}
@@ -4974,7 +4974,7 @@ int q6asm_dts_eagle_set(struct audio_client *ac, int param_id, uint32_t size,
__func__, sz);
return -ENOMEM;
}
- pr_debug("DTS_EAGLE_ASM - %s: ac %p param_id 0x%x size %u data %p m_id 0x%x\n",
+ pr_debug("DTS_EAGLE_ASM - %s: ac %pK param_id 0x%x size %u data %pK m_id 0x%x\n",
__func__, ac, param_id, size, data, m_id);
q6asm_add_hdr_async(ac, &ad->hdr, sz, 1);
ad->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -4993,7 +4993,7 @@ int q6asm_dts_eagle_set(struct audio_client *ac, int param_id, uint32_t size,
if (po) {
struct list_head *ptr, *next;
struct asm_buffer_node *node;
- pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %p, physical %lu)\n",
+ pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %pK, physical %lu)\n",
__func__, po->kvaddr, (long)po->paddr);
ad->param.data_payload_addr_lsw = lower_32_bits(po->paddr);
ad->param.data_payload_addr_msw =
@@ -5070,7 +5070,7 @@ int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
(po ? 0 : size);
if (!ac || ac->apr == NULL || (size == 0) || !data) {
- pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %p\n",
+ pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %pK\n",
__func__, size, data);
return -EINVAL;
}
@@ -5080,7 +5080,7 @@ int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
__func__, sz);
return -ENOMEM;
}
- pr_debug("DTS_EAGLE_ASM - %s: ac %p param_id 0x%x size %u data %p m_id 0x%x\n",
+ pr_debug("DTS_EAGLE_ASM - %s: ac %pK param_id 0x%x size %u data %pK m_id 0x%x\n",
__func__, ac, param_id, size, data, m_id);
q6asm_add_hdr(ac, &ad->hdr, sz, TRUE);
ad->hdr.opcode = ASM_STREAM_CMD_GET_PP_PARAMS_V2;
@@ -5105,7 +5105,7 @@ int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
if (po) {
struct list_head *ptr, *next;
struct asm_buffer_node *node;
- pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %p, physical %lu)\n",
+ pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %pK, physical %lu)\n",
__func__, po->kvaddr, (long)po->paddr);
ad->param.data_payload_addr_lsw = lower_32_bits(po->paddr);
ad->param.data_payload_addr_msw =
@@ -5554,7 +5554,7 @@ static int __q6asm_read(struct audio_client *ac, bool is_custom_len_reqd,
}
ab = &port->buf[dsp_buf];
- dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%p]cpu_buf[%d][%pa]\n",
+ dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%pK]cpu_buf[%d][%pK]\n",
__func__,
ac->session,
dsp_buf,
@@ -5585,7 +5585,7 @@ static int __q6asm_read(struct audio_client *ac, bool is_custom_len_reqd,
port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
port->max_buf_cnt);
mutex_unlock(&port->lock);
- dev_vdbg(ac->dev, "%s: buf add[%pa] token[0x%x] uid[%d]\n",
+ dev_vdbg(ac->dev, "%s: buf add[%pK] token[0x%x] uid[%d]\n",
__func__, &ab->phys, read.hdr.token,
read.seq_id);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
@@ -5637,7 +5637,7 @@ int q6asm_read_nolock(struct audio_client *ac)
dsp_buf = port->dsp_buf;
ab = &port->buf[dsp_buf];
- dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%p]cpu_buf[%d][%pa]\n",
+ dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%pK]cpu_buf[%d][%pK]\n",
__func__,
ac->session,
dsp_buf,
@@ -5668,7 +5668,7 @@ int q6asm_read_nolock(struct audio_client *ac)
port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
port->max_buf_cnt);
- dev_vdbg(ac->dev, "%s: buf add[%pa] token[0x%x] uid[%d]\n",
+ dev_vdbg(ac->dev, "%s: buf add[%pK] token[0x%x] uid[%d]\n",
__func__, &ab->phys, read.hdr.token,
read.seq_id);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
@@ -5731,7 +5731,7 @@ int q6asm_async_write(struct audio_client *ac,
else
lbuf_phys_addr = param->paddr;
- dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pa], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pa]\n",
+ dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pK], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pK]\n",
__func__,
write.hdr.token, &param->paddr,
write.buf_size, write.timestamp_msw,
@@ -5884,7 +5884,7 @@ int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
list);
write.mem_map_handle = buf_node->mmap_hdl;
- dev_vdbg(ac->dev, "%s: ab->phys[%pa]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+ dev_vdbg(ac->dev, "%s: ab->phys[%pK]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
&ab->phys,
write.buf_addr_lsw,
@@ -5964,7 +5964,7 @@ int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
port->max_buf_cnt);
- dev_vdbg(ac->dev, "%s: ab->phys[%pa]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+ dev_vdbg(ac->dev, "%s: ab->phys[%pK]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
&ab->phys,
write.buf_addr_lsw,
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 1d3318165fac..cc26af528aba 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -188,7 +188,7 @@ void ocm_core_open(void)
if (q6core_lcl.core_handle_q == NULL)
q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
aprv2_core_fn_q, 0xFFFFFFFF, NULL);
- pr_debug("%s: Open_q %p\n", __func__, q6core_lcl.core_handle_q);
+ pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
if (q6core_lcl.core_handle_q == NULL)
pr_err("%s: Unable to register CORE\n", __func__);
}
@@ -351,7 +351,7 @@ int core_dts_eagle_set(int size, char *data)
pr_debug("DTS_EAGLE_CORE - %s\n", __func__);
if (size <= 0 || !data) {
- pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %p.\n",
+ pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %pK.\n",
__func__, size, data);
return -EINVAL;
}
@@ -397,7 +397,7 @@ int core_dts_eagle_get(int id, int size, char *data)
pr_debug("DTS_EAGLE_CORE - %s\n", __func__);
if (size <= 0 || !data) {
- pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %p.\n",
+ pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %pK.\n",
__func__, size, data);
return -EINVAL;
}
@@ -563,7 +563,7 @@ static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
++mregions;
}
- pr_debug("%s: sending memory map, addr %pa, size %d, bufcnt = %d\n",
+ pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
__func__, buf_add, bufsz[0], mmap_regions->num_regions);
*map_handle = 0;
@@ -745,7 +745,7 @@ static int q6core_send_custom_topologies(void)
q6core_lcl.adsp_status = 0;
q6core_lcl.bus_bw_resp_received = 0;
- pr_debug("%s: Register topologies addr %pa, size %zd, map handle %d\n",
+ pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
__func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
cal_block->map_data.q6map_handle);
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index de397588d538..d0250535c69b 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -135,7 +135,7 @@ static int q6lsm_callback(struct apr_client_data *data, void *priv)
uint32_t *payload;
if (!client || !data) {
- pr_err("%s: client %p data %p\n",
+ pr_err("%s: client %pK data %pK\n",
__func__, client, data);
WARN_ON(1);
return -EINVAL;
@@ -862,7 +862,7 @@ int q6lsm_register_sound_model(struct lsm_client *client,
rmb();
cmd.mem_map_handle = client->sound_model.mem_map_handle;
- pr_debug("%s: addr %pa, size %d, handle 0x%x\n", __func__,
+ pr_debug("%s: addr %pK, size %d, handle 0x%x\n", __func__,
&client->sound_model.phys, cmd.model_size, cmd.mem_map_handle);
rc = q6lsm_apr_send_pkt(client, client->apr, &cmd, true, NULL);
if (rc)
@@ -936,7 +936,7 @@ static int q6lsm_memory_map_regions(struct lsm_client *client,
int rc;
int cmd_size = 0;
- pr_debug("%s: dma_addr_p 0x%pa, dma_buf_sz %d, mmap_p 0x%p, session %d\n",
+ pr_debug("%s: dma_addr_p 0x%pK, dma_buf_sz %d, mmap_p 0x%pK, session %d\n",
__func__, &dma_addr_p, dma_buf_sz, mmap_p,
client->session);
if (CHECK_SESSION(client->session)) {
@@ -1213,7 +1213,7 @@ int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len,
if (cal_block == NULL)
goto fail;
- pr_debug("%s:Snd Model len = %zd cal size %zd phys addr %pa", __func__,
+ pr_debug("%s:Snd Model len = %zd cal size %zd phys addr %pK", __func__,
len, cal_block->cal_data.size,
&cal_block->cal_data.paddr);
if (!cal_block->cal_data.paddr) {
@@ -1268,8 +1268,8 @@ int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len,
memcpy((client->sound_model.data + pad_zero +
client->sound_model.size),
(uint32_t *)cal_block->cal_data.kvaddr, client->lsm_cal_size);
- pr_debug("%s: Copy cal start virt_addr %p phy_addr %pa\n"
- "Offset cal virtual Addr %p\n", __func__,
+ pr_debug("%s: Copy cal start virt_addr %pK phy_addr %pK\n"
+ "Offset cal virtual Addr %pK\n", __func__,
client->sound_model.data, &client->sound_model.phys,
(pad_zero + client->sound_model.data +
client->sound_model.size));
@@ -1588,7 +1588,7 @@ int q6lsm_lab_control(struct lsm_client *client, u32 enable)
u32 param_size;
if (!client) {
- pr_err("%s: invalid param client %p\n", __func__, client);
+ pr_err("%s: invalid param client %pK\n", __func__, client);
return -EINVAL;
}
/* enable/disable lab on dsp */
@@ -1645,7 +1645,7 @@ int q6lsm_stop_lab(struct lsm_client *client)
{
int rc = 0;
if (!client) {
- pr_err("%s: invalid param client %p\n", __func__, client);
+ pr_err("%s: invalid param client %pK\n", __func__, client);
return -EINVAL;
}
rc = q6lsm_cmd(client, LSM_SESSION_CMD_EOB, true);
@@ -1658,7 +1658,7 @@ int q6lsm_read(struct lsm_client *client, struct lsm_cmd_read *read)
{
int rc = 0;
if (!client || !read) {
- pr_err("%s: Invalid params client %p read %p\n", __func__,
+ pr_err("%s: Invalid params client %pK read %pK\n", __func__,
client, read);
return -EINVAL;
}
@@ -1728,7 +1728,7 @@ int q6lsm_lab_buffer_alloc(struct lsm_client *client, bool alloc)
kfree(client->lab_buffer);
client->lab_buffer = NULL;
} else {
- pr_debug("%s: Memory map handle %x phys %pa size %d\n",
+ pr_debug("%s: Memory map handle %x phys %pK size %d\n",
__func__,
client->lab_buffer[0].mem_map_handle,
&client->lab_buffer[0].phys,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index a008c696d256..084f7df9d243 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -359,7 +359,7 @@ static struct voice_data *voice_get_session(u32 session_id)
break;
}
- pr_debug("%s:session_id 0x%x session handle %p\n",
+ pr_debug("%s:session_id 0x%x session handle %pK\n",
__func__, session_id, v);
return v;
@@ -3502,7 +3502,7 @@ static int voice_map_cal_memory(struct cal_block_data *cal_block,
cal_block->map_data.map_size,
VOC_CAL_MEM_MAP_TOKEN);
if (result < 0) {
- pr_err("%s: Mmap did not work! addr = 0x%pa, size = %zd\n",
+ pr_err("%s: Mmap did not work! addr = 0x%pK, size = %zd\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -3542,7 +3542,7 @@ static int remap_cal_data(struct cal_block_data *cal_block,
goto done;
}
} else {
- pr_debug("%s: Cal block 0x%pa, size %zd already mapped. Q6 map handle = %d\n",
+ pr_debug("%s: Cal block 0x%pK, size %zd already mapped. Q6 map handle = %d\n",
__func__, &cal_block->cal_data.paddr,
cal_block->map_data.map_size,
cal_block->map_data.q6map_handle);
@@ -3740,7 +3740,7 @@ int voc_map_rtac_block(struct rtac_cal_block_data *cal_block)
if (!is_rtac_memory_allocated()) {
result = voice_alloc_rtac_mem_map_table();
if (result < 0) {
- pr_err("%s: RTAC alloc mem map table did not work! addr = 0x%pa, size = %d\n",
+ pr_err("%s: RTAC alloc mem map table did not work! addr = 0x%pK, size = %d\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -3755,7 +3755,7 @@ int voc_map_rtac_block(struct rtac_cal_block_data *cal_block)
cal_block->map_data.map_size,
VOC_RTAC_MEM_MAP_TOKEN);
if (result < 0) {
- pr_err("%s: RTAC mmap did not work! addr = 0x%pa, size = %d\n",
+ pr_err("%s: RTAC mmap did not work! addr = 0x%pK, size = %d\n",
__func__,
&cal_block->cal_data.paddr,
cal_block->map_data.map_size);
@@ -5105,7 +5105,7 @@ int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id)
break;
}
- pr_debug("%s: port_id: %d, set: %d, v: %p\n",
+ pr_debug("%s: port_id: %d, set: %d, v: %pK\n",
__func__, port_id, set, v);
mutex_lock(&v->lock);
@@ -7025,12 +7025,12 @@ static int voice_alloc_oob_shared_mem(void)
cnt++;
}
- pr_debug("%s buf[0].data:[%p], buf[0].phys:[%pa], &buf[0].phys:[%p],\n",
+ pr_debug("%s buf[0].data:[%pK], buf[0].phys:[%pK], &buf[0].phys:[%pK],\n",
__func__,
(void *)v->shmem_info.sh_buf.buf[0].data,
&v->shmem_info.sh_buf.buf[0].phys,
(void *)&v->shmem_info.sh_buf.buf[0].phys);
- pr_debug("%s: buf[1].data:[%p], buf[1].phys[%pa], &buf[1].phys[%p]\n",
+ pr_debug("%s: buf[1].data:[%pK], buf[1].phys[%pK], &buf[1].phys[%pK]\n",
__func__,
(void *)v->shmem_info.sh_buf.buf[1].data,
&v->shmem_info.sh_buf.buf[1].phys,
@@ -7072,7 +7072,7 @@ static int voice_alloc_oob_mem_table(void)
}
v->shmem_info.memtbl.size = sizeof(struct vss_imemory_table_t);
- pr_debug("%s data[%p]phys[%pa][%p]\n", __func__,
+ pr_debug("%s data[%pK]phys[%pK][%pK]\n", __func__,
(void *)v->shmem_info.memtbl.data,
&v->shmem_info.memtbl.phys,
(void *)&v->shmem_info.memtbl.phys);
@@ -7390,7 +7390,7 @@ static int voice_alloc_cal_mem_map_table(void)
}
common.cal_mem_map_table.size = sizeof(struct vss_imemory_table_t);
- pr_debug("%s: data %p phys %pa\n", __func__,
+ pr_debug("%s: data %pK phys %pK\n", __func__,
common.cal_mem_map_table.data,
&common.cal_mem_map_table.phys);
@@ -7417,7 +7417,7 @@ static int voice_alloc_rtac_mem_map_table(void)
}
common.rtac_mem_map_table.size = sizeof(struct vss_imemory_table_t);
- pr_debug("%s: data %p phys %pa\n", __func__,
+ pr_debug("%s: data %pK phys %pK\n", __func__,
common.rtac_mem_map_table.data,
&common.rtac_mem_map_table.phys);
@@ -8038,7 +8038,7 @@ static int voice_alloc_source_tracking_shared_memory(void)
memset((void *)(common.source_tracking_sh_mem.sh_mem_block.data), 0,
common.source_tracking_sh_mem.sh_mem_block.size);
- pr_debug("%s: sh_mem_block: phys:[%pa], data:[0x%p], size:[%zd]\n",
+ pr_debug("%s: sh_mem_block: phys:[%pK], data:[0x%pK], size:[%zd]\n",
__func__,
&(common.source_tracking_sh_mem.sh_mem_block.phys),
(void *)(common.source_tracking_sh_mem.sh_mem_block.data),
@@ -8069,7 +8069,7 @@ static int voice_alloc_source_tracking_shared_memory(void)
memset((void *)(common.source_tracking_sh_mem.sh_mem_table.data), 0,
common.source_tracking_sh_mem.sh_mem_table.size);
- pr_debug("%s sh_mem_table: phys:[%pa], data:[0x%p], size:[%zd],\n",
+ pr_debug("%s sh_mem_table: phys:[%pK], data:[0x%pK], size:[%zd],\n",
__func__,
&(common.source_tracking_sh_mem.sh_mem_table.phys),
(void *)(common.source_tracking_sh_mem.sh_mem_table.data),
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 39c02287815e..188f0d3e8c5d 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -157,7 +157,7 @@ int rtac_allocate_cal_buffer(uint32_t cal_type)
}
if (rtac_cal[cal_type].cal_data.paddr != 0) {
- pr_err("%s: memory already allocated! cal_type %d, paddr 0x%pa\n",
+ pr_err("%s: memory already allocated! cal_type %d, paddr 0x%pK\n",
__func__, cal_type, &rtac_cal[cal_type].cal_data.paddr);
result = -EPERM;
goto done;
@@ -176,7 +176,7 @@ int rtac_allocate_cal_buffer(uint32_t cal_type)
goto done;
}
- pr_debug("%s: cal_type %d, paddr 0x%pa, kvaddr 0x%p, map_size 0x%x\n",
+ pr_debug("%s: cal_type %d, paddr 0x%pK, kvaddr 0x%pK, map_size 0x%x\n",
__func__, cal_type,
&rtac_cal[cal_type].cal_data.paddr,
rtac_cal[cal_type].cal_data.kvaddr,
@@ -206,7 +206,7 @@ int rtac_free_cal_buffer(uint32_t cal_type)
result = msm_audio_ion_free(rtac_cal[cal_type].map_data.ion_client,
rtac_cal[cal_type].map_data.ion_handle);
if (result < 0) {
- pr_err("%s: ION free for RTAC failed! cal_type %d, paddr 0x%pa\n",
+ pr_err("%s: ION free for RTAC failed! cal_type %d, paddr 0x%pK\n",
__func__, cal_type, &rtac_cal[cal_type].cal_data.paddr);
goto done;
}
@@ -690,7 +690,7 @@ static int get_voice_index(u32 mode, u32 handle)
/* ADM APR */
void rtac_set_adm_handle(void *handle)
{
- pr_debug("%s: handle = %p\n", __func__, handle);
+ pr_debug("%s: handle = %pK\n", __func__, handle);
mutex_lock(&rtac_adm_apr_mutex);
rtac_adm_apr_data.apr_handle = handle;
@@ -748,7 +748,7 @@ int send_adm_apr(void *buf, u32 opcode)
if (copy_from_user(&user_buf_size, (void *)buf,
sizeof(user_buf_size))) {
- pr_err("%s: Copy from user failed! buf = 0x%p\n",
+ pr_err("%s: Copy from user failed! buf = 0x%pK\n",
__func__, buf);
goto done;
}
@@ -849,7 +849,7 @@ int send_adm_apr(void *buf, u32 opcode)
memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
atomic_set(&rtac_adm_apr_data.cmd_state, 1);
- pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pa\n",
+ pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
__func__, opcode,
&rtac_cal[ADM_RTAC_CAL].cal_data.paddr);
@@ -968,7 +968,7 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
if (copy_from_user(&user_buf_size, (void *)buf,
sizeof(user_buf_size))) {
- pr_err("%s: Copy from user failed! buf = 0x%p\n",
+ pr_err("%s: Copy from user failed! buf = 0x%pK\n",
__func__, buf);
goto done;
}
@@ -1069,7 +1069,7 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);
- pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pa\n",
+ pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
__func__, opcode,
&rtac_cal[ASM_RTAC_CAL].cal_data.paddr);
@@ -1209,7 +1209,7 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode)
if (copy_from_user(&user_afe_buf, (void *)buf,
sizeof(struct rtac_afe_user_data))) {
- pr_err("%s: Copy from user failed! buf = 0x%p\n",
+ pr_err("%s: Copy from user failed! buf = 0x%pK\n",
__func__, buf);
goto done;
}
@@ -1325,7 +1325,7 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode)
atomic_set(&rtac_afe_apr_data.cmd_state, 1);
- pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pa\n",
+ pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
__func__, opcode,
&rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
@@ -1449,7 +1449,7 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
if (copy_from_user(&user_buf_size, (void *)buf,
sizeof(user_buf_size))) {
- pr_err("%s: Copy from user failed! buf = 0x%p\n",
+ pr_err("%s: Copy from user failed! buf = 0x%pK\n",
__func__, buf);
goto done;
}
@@ -1551,7 +1551,7 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);
- pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pa\n",
+ pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
__func__, opcode,
&rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);