summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cnss/icnss.txt14
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ft5x06-ts.txt28
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt3
-rw-r--r--Documentation/misc-devices/qcom_invoke_driver.txt56
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi87
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi85
-rw-r--r--arch/arm64/configs/msm-perf_defconfig1
-rw-r--r--arch/arm64/configs/msm_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--drivers/char/adsprpc.c30
-rw-r--r--drivers/char/diag/Makefile2
-rw-r--r--drivers/char/diag/diag_debugfs.c115
-rw-r--r--drivers/char/diag/diagchar.h8
-rw-r--r--drivers/char/diag/diagchar_core.c4
-rw-r--r--drivers/char/diag/diagfwd.c9
-rw-r--r--drivers/char/diag/diagfwd_glink.c702
-rw-r--r--drivers/char/diag/diagfwd_glink.h53
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c143
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h15
-rw-r--r--drivers/char/diag/diagfwd_smd.c25
-rw-r--r--drivers/char/diag/diagfwd_socket.c30
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c6
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c378
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c153
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h12
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c26
-rw-r--r--drivers/gpu/msm/a5xx_reg.h1
-rw-r--r--drivers/gpu/msm/adreno.c3
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c15
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h3
-rw-r--r--drivers/gpu/msm/kgsl_device.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c250
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h11
-rw-r--r--drivers/gpu/msm/kgsl_trace.h5
-rw-r--r--drivers/hwtracing/coresight/Kconfig8
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-event.c169
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c35
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c12
-rw-r--r--drivers/hwtracing/coresight/coresight.c19
-rw-r--r--drivers/input/touchscreen/Kconfig9
-rw-r--r--drivers/input/touchscreen/ft5x06_ts.c791
-rw-r--r--drivers/misc/hdcp.c124
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_wma.c4
-rw-r--r--drivers/misc/qseecom.c366
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3.h12
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h23
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c60
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c56
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c179
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h27
-rw-r--r--drivers/platform/msm/ipa/test/Makefile2
-rw-r--r--drivers/platform/msm/ipa/test/ipa_test_mhi.c3306
-rw-r--r--drivers/platform/msm/ipa/test/ipa_ut_framework.c96
-rw-r--r--drivers/platform/msm/ipa/test/ipa_ut_framework.h40
-rw-r--r--drivers/platform/msm/ipa/test/ipa_ut_i.h4
-rw-r--r--drivers/platform/msm/ipa/test/ipa_ut_suite_list.h2
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c4
-rw-r--r--drivers/power/qcom-charger/smb-lib.c38
-rw-r--r--drivers/power/qcom-charger/smb-lib.h9
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c7
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c8
-rw-r--r--drivers/regulator/cpr3-regulator.c5
-rw-r--r--drivers/regulator/cpr3-regulator.h2
-rw-r--r--drivers/regulator/cpr3-util.c6
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c77
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.c135
-rw-r--r--drivers/scsi/ufs/ufshcd.h1
-rw-r--r--drivers/soc/qcom/Kconfig6
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink.c35
-rw-r--r--drivers/soc/qcom/icnss.c1659
-rw-r--r--drivers/soc/qcom/rpm-smd.c58
-rw-r--r--drivers/soc/qcom/smcinvoke.c500
-rw-r--r--drivers/soc/qcom/smcinvoke_object.h51
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c5
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/notify.c31
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/gadget/function/f_gsi.c30
-rw-r--r--drivers/video/fbdev/msm/mdp3.c18
-rw-r--r--drivers/video/fbdev/msm/mdp3_dma.c2
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp_hwio.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_debug_xlog.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c206
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c173
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h73
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c28
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_clk.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_phy_v3.c19
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c46
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c26
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c56
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c84
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_common.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_stub.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c82
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v3.c40
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h9
-rw-r--r--drivers/video/fbdev/msm/mdss_util.c2
-rw-r--r--drivers/video/fbdev/msm/mhl3/mhl_linux_tx.c2
-rw-r--r--drivers/video/fbdev/msm/mhl3/mhl_supp.c12
-rw-r--r--drivers/video/fbdev/msm/mhl3/platform.c6
-rw-r--r--drivers/video/fbdev/msm/mhl3/si_8620_drv.c4
-rw-r--r--drivers/video/fbdev/msm/mhl3/si_emsc_hid.c4
-rw-r--r--drivers/video/fbdev/msm/mhl3/si_mdt_inputdev.c25
-rw-r--r--drivers/video/fbdev/msm/mhl3/si_mhl2_edid_3d.c21
-rw-r--r--drivers/video/fbdev/msm/msm_dba/adv7533.c6
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c129
-rw-r--r--include/dt-bindings/clock/msm-clocks-cobalt.h9
-rw-r--r--include/linux/coresight.h4
-rw-r--r--include/linux/hdcp_qseecom.h15
-rw-r--r--include/linux/input/ft5x06_ts.h10
-rw-r--r--include/linux/ipa.h3
-rw-r--r--include/linux/msm_ext_display.h1
-rw-r--r--include/linux/usb.h3
-rw-r--r--include/soc/qcom/qseecomi.h47
-rw-r--r--include/sound/apr_audio-v2.h25
-rw-r--r--include/sound/q6asm-v2.h4
-rw-r--r--include/trace/events/exception.h124
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/msm_ipa.h8
-rw-r--r--include/uapi/linux/smcinvoke.h45
-rw-r--r--include/uapi/sound/compress_params.h3
-rw-r--r--kernel/cpuset.c38
-rw-r--r--kernel/panic.c8
-rw-r--r--sound/soc/msm/msmcobalt.c68
-rw-r--r--[-rwxr-xr-x]sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c37
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c11
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c85
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h2
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c72
165 files changed, 10669 insertions, 1744 deletions
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index f088074fa3ef..da5159006a98 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -18,12 +18,13 @@ Required properties:
- qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
format to be used for allocations associated between WLAN and SMMU
- <supply-name>-supply: phandle to the regulator device tree node
- Required "supply-name" is "vdd-io".
- - qcom,<supply>-voltage-level - specifies voltage levels for supply. Should be
- specified in pairs (min, max), units uV.
+ Required "supply-name" is "vdd-0.8-cx-mx".
+ - qcom,<supply>-config - specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV. There can
+ be optional load in uA and Regulator settle delay in
+ uS.
Optional properties:
- - qcom,skip-qmi: Boolean property to decide whether to use QMI or not
Example:
@@ -50,7 +51,6 @@ Example:
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x200000>;
- qcom,skip-qmi;
- vdd-io-supply = <&pmcobalt_l5>;
- qcom,vdd-io-voltage-level = <800000 800000>;
+ vdd-0.8-cx-mx-supply = <&pmcobalt_l5>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
};
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 92411011ed7a..fffb8cc39d0f 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -89,14 +89,10 @@ Optional Properties:
- qcom,gpubw-dev: a phandle to a device representing bus bandwidth requirements
(see devdw.txt)
- qcom,idle-timeout: This property represents the time in milliseconds for idle timeout.
-- qcom,deep-nap-timeout: This property represents the time in milliseconds for entering deeper
- power state.
- qcom,no-nap: If it exists software clockgating will be disabled at boot time.
- qcom,chipid: If it exists this property is used to replace
the chip identification read from the GPU hardware.
This is used to override faulty hardware readings.
-- qcom,strtstp-sleepwake: Boolean. Enables use of GPU SLUMBER instead of SLEEP for power savings
-- qcom,gx-retention: Boolean. Enables use of GX rail RETENTION voltage
- qcom,disable-busy-time-burst:
Boolean. Disables the busy time burst to avoid switching
of power level for large frames based on the busy time limit.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ft5x06-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/ft5x06-ts.txt
index 1d04a7e365e1..c852394254ff 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ft5x06-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ft5x06-ts.txt
@@ -27,6 +27,24 @@ Required properties:
- focaltech,group-id : group id of this device
- focaltech,hard-reset-delay-ms : hard reset delay in ms
- focaltech,soft-reset-delay-ms : soft reset delay in ms
+ - focaltech,fw-delay-aa-ms : specify the delay in ms after programming 0xaa
+ register for firmware upgrade
+ - focaltech,fw-delay-55-ms : specify the delay in ms after programming 0x55
+ register for firmware upgrade
+ - focaltech,fw-upgrade-id1 : specify the upgrade id1 for firmware upgrade
+ - focaltech,fw-upgrade-id2 : specify the upgrade id2 for firmware upgrade
+ - focaltech,fw-delay-readid-ms : specify the read id delay in ms for firmware upgrade
+ - focaltech,fw-delay-era-flsh-ms : specify the erase flash delay in ms for firmware upgrade
+ - pinctrl-names : This should be defined if a target uses pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
+ Specify the names of the configs that pinctrl can install in driver.
+ Following are the pinctrl configs that can be installed:
+ "pmx_ts_active" : Active configuration of pins, this should specify active
+ config defined in pin groups of interrupt and reset gpio.
+ "pmx_ts_suspend" : Disabled configuration of pins, this should specify sleep
+ config defined in pin groups of interrupt and reset gpio.
+ "pmx_ts_release" : Release configuration of pins, this should specify
+ release config defined in pin groups of interrupt and reset gpio.
Optional properties:
@@ -47,6 +65,11 @@ Optional properties:
- focaltech,fw-auto-cal : specify whether calibration is needed after firmware upgrade
- focaltech,fw-vkey-support : specify if virtual keys are supported through firmware
- focaltech,ignore-id-check : specify ignore family-id check
+ - focaltech,panel-coords : panel coordinates for the chip in pixels.
+ It is a four tuple consisting of min x,
+ min y, max x and max y values
+ - focaltech,fw-name : specify the firmware file name
+ - focaltech,psensor-support : specify whether support the proximity sensor
Example:
i2c@f9923000{
@@ -57,6 +80,10 @@ Example:
interrupts = <1 0x2>;
vdd-supply = <&pm8110_l19>;
vcc_i2c-supply = <&pm8110_l14>;
+ pinctrl-names = "pmx_ts_active","pmx_ts_suspend","pmx_ts_release";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ pinctrl-2 = <&ts_release>;
focaltech,name = "ft6x06";
focaltech,family-id = <0x06>;
focaltech,reset-gpio = <&msmgpio 0 0x00>;
@@ -78,5 +105,6 @@ Example:
focaltech,fw-delay-readid-ms = <10>;
focaltech,fw-delay-era-flsh-ms = <2000>;
focaltech,fw-auto-cal;
+ focaltech,psensor-support;
};
};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 11f36d8d9ebd..f3166d33f9e4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -212,6 +212,9 @@ qcom,ipa@fd4c0000 {
ipa_smmu_wlan: ipa_smmu_wlan {
compatible = "qcom,ipa-smmu-wlan-cb";
iommus = <&anoc2_smmu 0x31>;
+ qcom,additional-mapping =
+ /* ipa-uc ram */
+ <0x1E60000 0x1E60000 0x80000>;
};
ipa_smmu_uc: ipa_smmu_uc {
diff --git a/Documentation/misc-devices/qcom_invoke_driver.txt b/Documentation/misc-devices/qcom_invoke_driver.txt
new file mode 100644
index 000000000000..5ba6b27558ea
--- /dev/null
+++ b/Documentation/misc-devices/qcom_invoke_driver.txt
@@ -0,0 +1,56 @@
+Introduction:
+=============
+Invoke driver is a misc driver which helps communication between non secure
+and secure world. Invoke driver communicates with secure side using SCM
+driver. To use invoke driver, open must be called on invoke device i.e.
+/dev/invoke. Invoke driver exposes only one IOCTL invoke which passes
+userspace request to TZ.
+
+SW Architecture
+===============
+Following is SW stack for Invoke driver.
+
++++++++++++++++++++++++++++++++++++++++++
++ Applications +
++++++++++++++++++++++++++++++++++++++++++
++ System Layer +
++++++++++++++++++++++++++++++++++++++++++
++ Kernel +
++ +++++++++++++++++++ +
++ + Invoke driver + +
++ +++++++++++++++++++ +
++ + SCM Driver + +
++++++++++++++++++++++++++++++++++++++++++
+ ||
+ ||
+ \/
++++++++++++++++++++++++++++++++++++++++++
++ Trust Zone +
++ +++++++++++ +++++++++++ +
++ + TZ App1 + + TZ App2 + +
++++++++++++++++++++++++++++++++++++++++++
+
+
+Interfaces
+==========
+Invoke driver exposes INVOKE_IOCTL_INVOKE_REQ IOCTL for userspace to
+communicate with driver. More details of IOCTL are avilable in
+corresponding header file.
+
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+TBD
+
+Dependencies
+============
+Invoke driver depends on SCM driver to communicate with TZ.
+
+
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
index d273d757dba5..617e7ddd9730 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
@@ -68,12 +68,6 @@
qcom,idle-timeout = <80>; //<HZ/12>
qcom,no-nap;
- /*
- * Timeout to enter deeper power saving state
- * from NAP.
- */
- qcom,deep-nap-timeout = <200>;
- qcom,strtstp-sleepwake;
qcom,highest-bank-bit = <15>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index 6025d9b54351..b0e3751792dd 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -21,6 +21,9 @@
interrupt-controller;
#interrupt-cells = <1>;
vdd-supply = <&gdsc_mdss>;
+ vdd-cx-supply = <&pmcobalt_s1_level>;
+ vdd-cx-min-uV = <RPM_SMD_REGULATOR_LEVEL_LOW_SVS>;
+ vdd-cx-max-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
/* Bus Scale Settings */
qcom,msm-bus,name = "mdss_mdp";
@@ -387,11 +390,11 @@
"byte_clk_rcg", "pixel_clk_rcg",
"byte_intf_clk";
- qcom,platform-strength-ctrl = [ff 03
- ff 03
- ff 03
- ff 03
- ff 00];
+ qcom,platform-strength-ctrl = [55 03
+ 55 03
+ 55 03
+ 55 03
+ 55 00];
qcom,platform-lane-config = [00 00 00 00
00 00 00 00
00 00 00 00
@@ -425,11 +428,11 @@
"byte_clk_rcg", "pixel_clk_rcg",
"byte_intf_clk";
- qcom,platform-strength-ctrl = [ff 03
- ff 03
- ff 03
- ff 03
- ff 00];
+ qcom,platform-strength-ctrl = [55 03
+ 55 03
+ 55 03
+ 55 03
+ 55 00];
qcom,platform-lane-config = [00 00 00 00
00 00 00 00
00 00 00 00
@@ -456,8 +459,11 @@
reg = <0xc990000 0xa84>,
<0xc011000 0x910>,
- <0x1fcb200 0x050>;
- reg-names = "dp_ctrl", "dp_phy", "tcsr_regs";
+ <0x1fcb200 0x050>,
+ <0x780000 0x621c>,
+ <0xc9e1000 0x02c>;
+ reg-names = "dp_ctrl", "dp_phy", "tcsr_regs",
+ "qfprom_physical","hdcp_physical";
clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_mdss_ahb_clk>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index c3d5833461b1..256c404bb972 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -415,7 +415,7 @@
rpm-regulator-ldoa25 {
status = "okay";
pmcobalt_l25: regulator-l25 {
- regulator-min-microvolt = <3312000>;
+ regulator-min-microvolt = <3104000>;
regulator-max-microvolt = <3312000>;
status = "okay";
};
@@ -424,7 +424,7 @@
compatible = "qcom,rpm-smd-regulator";
regulator-name = "pmcobalt_l25_pin_ctrl";
qcom,set = <3>;
- regulator-min-microvolt = <3312000>;
+ regulator-min-microvolt = <3104000>;
regulator-max-microvolt = <3312000>;
/* Force NPM follows HW_EN2 */
qcom,init-pin-ctrl-mode = <4>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index 47b1ba078b7c..f4d5e106e403 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -402,7 +402,7 @@
< 0 0 0 0 0 0 0 0
0 0 1468 0 1429 1256 0 0>,
< 0 0 0 0 0 0 0 0
- 0 0 0 0 0 1353 0 0>;
+ 0 0 1627 0 1578 1353 0 0>;
qcom,cpr-ro-scaling-factor =
< 0 0 0 0 3005 3111 0 0
@@ -485,3 +485,88 @@
<444000000 6000000>,
<533000000 6000000>;
};
+
+/* GPU overrides */
+&msm_gpu {
+ /* Updated chip ID */
+ qcom,chipid = <0x05040001>;
+ qcom,initial-pwrlevel = <6>;
+
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <710000000>;
+ qcom,bus-freq = <12>;
+ qcom,bus-min = <12>;
+ qcom,bus-max = <12>;
+ };
+
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <670000000>;
+ qcom,bus-freq = <12>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <12>;
+ };
+
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <596000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <515000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <414000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <342000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <9>;
+ };
+
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <257000000>;
+ qcom,bus-freq = <5>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <8>;
+ };
+
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <5>;
+ };
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <27000000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 007966a1e52f..b7230cdf0d71 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -755,8 +755,8 @@
<&mdss_dsi1_pll clk_dsi1pll_pclk_mux>,
<&mdss_dsi0_pll clk_dsi0pll_byteclk_mux>,
<&mdss_dsi1_pll clk_dsi1pll_byteclk_mux>,
- <&mdss_dp_pll clk_dp_link_2x_clk_mux>,
- <&mdss_dp_pll clk_vco_divided_clk_src>,
+ <&mdss_dp_pll clk_dp_link_2x_clk_divsel_five>,
+ <&mdss_dp_pll clk_vco_divided_clk_src_mux>,
<&mdss_hdmi_pll clk_hdmi_vco_clk>;
#clock-cells = <1>;
};
@@ -984,32 +984,39 @@
qcom,do-not-use-ch-gsi-20;
qcom,ipa-wdi2;
qcom,use-64-bit-dma-mask;
- clocks = <&clock_gcc clk_ipa_clk>,
- <&clock_gcc clk_aggre2_noc_clk>;
- clock-names = "core_clk", "smmu_clk";
+ clocks = <&clock_gcc clk_ipa_clk>;
+ clock-names = "core_clk";
qcom,arm-smmu;
qcom,smmu-disable-htw;
qcom,smmu-s1-bypass;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,num-paths = <3>;
+ qcom,msm-bus,num-paths = <4>;
qcom,msm-bus,vectors-KBps =
/* No vote */
<90 512 0 0>,
<90 585 0 0>,
<1 676 0 0>,
+ /* SMMU smmu_aggre2_noc_clk */
+ <81 10065 0 0>,
/* SVS */
<90 512 80000 640000>,
<90 585 80000 640000>,
<1 676 80000 80000>,
+ /* SMMU smmu_aggre2_noc_clk */
+ <81 10065 0 16000>,
/* NOMINAL */
<90 512 206000 960000>,
<90 585 206000 960000>,
<1 676 206000 160000>,
+ /* SMMU smmu_aggre2_noc_clk */
+ <81 10065 0 16000>,
/* TURBO */
<90 512 206000 3600000>,
<90 585 206000 3600000>,
- <1 676 206000 300000>;
+ <1 676 206000 300000>,
+ /* SMMU smmu_aggre2_noc_clk */
+ <81 10065 0 16000>;
qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
/* IPA RAM mmap */
@@ -1895,14 +1902,14 @@
0x0d0 0x80 0x00
0x184 0x01 0x00
0x010 0x01 0x00
- 0x01c 0x40 0x00
+ 0x01c 0x31 0x00
0x020 0x01 0x00
- 0x014 0x02 0x00
+ 0x014 0x00 0x00
0x018 0x00 0x00
- 0x024 0x7e 0x00
- 0x028 0x15 0x00
+ 0x024 0x85 0x00
+ 0x028 0x07 0x00
0x430 0x0b 0x00
- 0x4d4 0xef 0x00
+ 0x4d4 0x0f 0x00
0x4d8 0x4e 0x00
0x4dc 0x18 0x00
0x4f8 0x07 0x00
@@ -1917,9 +1924,9 @@
0x414 0x06 0x00
0x500 0x00 0x00
0x4c0 0x03 0x00
- 0x564 0x07 0x00
+ 0x564 0x05 0x00
0x830 0x0b 0x00
- 0x8d4 0xef 0x00
+ 0x8d4 0x0f 0x00
0x8d8 0x4e 0x00
0x8dc 0x18 0x00
0x8f8 0x07 0x00
@@ -1934,7 +1941,7 @@
0x814 0x06 0x00
0x900 0x00 0x00
0x8c0 0x03 0x00
- 0x964 0x07 0x00
+ 0x964 0x05 0x00
0x260 0x10 0x00
0x2a4 0x12 0x00
0x28c 0x16 0x00
@@ -1947,29 +1954,29 @@
0xccc 0x09 0x00
0xcd0 0xa2 0x00
0xcd4 0x40 0x00
- 0xcc4 0x01 0x00
+ 0xcc4 0x02 0x00
0xc80 0xd1 0x00
0xc84 0x1f 0x00
0xc88 0x47 0x00
0xc64 0x1b 0x00
0xc0c 0x9f 0x00
- 0xc10 0x9e 0x00
- 0xc14 0xb0 0x00
- 0xc18 0x57 0x00
- 0xc1c 0x69 0x00
- 0xc20 0x69 0x00
- 0xc24 0x17 0x00
- 0xc28 0x0f 0x00
- 0xc2c 0x16 0x00
- 0xc30 0x0f 0x00
- 0xc34 0x11 0x00
- 0xc38 0x0c 0x00
- 0xc3c 0x19 0x00
- 0xc40 0x11 0x00
- 0xc44 0x10 0x00
- 0xc48 0x0b 0x00
- 0xc4c 0x10 0x00
- 0xc50 0x0b 0x00
+ 0xc10 0x9f 0x00
+ 0xc14 0xb7 0x00
+ 0xc18 0x4e 0x00
+ 0xc1c 0x65 0x00
+ 0xc20 0x6b 0x00
+ 0xc24 0x15 0x00
+ 0xc28 0x0d 0x00
+ 0xc2c 0x15 0x00
+ 0xc30 0x0d 0x00
+ 0xc34 0x15 0x00
+ 0xc38 0x0d 0x00
+ 0xc3c 0x15 0x00
+ 0xc40 0x0d 0x00
+ 0xc44 0x15 0x00
+ 0xc48 0x0d 0x00
+ 0xc4c 0x15 0x00
+ 0xc50 0x0d 0x00
0xc5c 0x02 0x00
0xca0 0x04 0x00
0xc8c 0x44 0x00
@@ -1977,7 +1984,7 @@
0xc74 0x03 0x00
0xc78 0x40 0x00
0xc7c 0x00 0x00
- 0xdd8 0x8c 0x00
+ 0xdd8 0x8a 0x00
0xcb8 0x75 0x00
0xcb0 0x86 0x00
0xcbc 0x13 0x00
@@ -2775,6 +2782,8 @@
<0xb0000000 0x10000>;
reg-names = "membase", "mpm_config",
"smmu_iova_base", "smmu_iova_ipa";
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "cxo_ref_clk_pin";
iommus = <&anoc2_smmu 0x1900>,
<&anoc2_smmu 0x1901>;
interrupts = <0 413 0 /* CE0 */ >,
@@ -2790,8 +2799,12 @@
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x100000>;
- vdd-io-supply = <&pmcobalt_l5>;
- qcom,vdd-io-voltage-level = <800000 800000>;
+ vdd-0.8-cx-mx-supply = <&pmcobalt_l5>;
+ vdd-1.8-xo-supply = <&pmcobalt_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pmcobalt_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pmcobalt_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
qcom,msm-bus,name = "msm-icnss";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index d7e78430fe7f..723af5c5113a 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -528,6 +528,7 @@ CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index 9d4228923595..a1c6992d5918 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -532,6 +532,7 @@ CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index f2eafd610cac..79b39a14657c 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -528,6 +528,7 @@ CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 3742fe210dc2..1a513584d699 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -547,6 +547,7 @@ CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index e8b1f7910490..48b75ece4c17 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -44,6 +44,8 @@
#include <asm/esr.h>
#include <asm/edac.h>
+#include <trace/events/exception.h>
+
static const char *handler[]= {
"Synchronous Abort",
"IRQ",
@@ -421,6 +423,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
+ trace_undef_instr(regs, (void *)pc);
+
if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7bb08670fc10..69079e5bfc84 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,8 @@
#include <asm/tlbflush.h>
#include <asm/edac.h>
+#include <trace/events/exception.h>
+
static const char *fault_name(unsigned int esr);
/*
@@ -118,6 +120,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
+ trace_user_fault(tsk, addr, esr);
+
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index d29192bfb9d0..13116f010e89 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1250,9 +1250,10 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
{
struct smq_msg *msg = &ctx->msg;
struct fastrpc_file *fl = ctx->fl;
+ struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
int err = 0, len;
- VERIFY(err, 0 != fl->apps->channel[fl->cid].chan);
+ VERIFY(err, 0 != channel_ctx->chan);
if (err)
goto bail;
msg->pid = current->tgid;
@@ -1266,13 +1267,21 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
msg->invoke.page.size = buf_page_size(ctx->used);
if (fl->apps->glink) {
- err = glink_tx(fl->apps->channel[fl->cid].chan,
+ if (fl->ssrcount != channel_ctx->ssrcount) {
+ err = -ECONNRESET;
+ goto bail;
+ }
+ VERIFY(err, channel_ctx->link.port_state ==
+ FASTRPC_LINK_CONNECTED);
+ if (err)
+ goto bail;
+ err = glink_tx(channel_ctx->chan,
(void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
GLINK_TX_REQ_INTENT);
} else {
spin_lock(&fl->apps->hlock);
len = smd_write((smd_channel_t *)
- fl->apps->channel[fl->cid].chan,
+ channel_ctx->chan,
msg, sizeof(*msg));
spin_unlock(&fl->apps->hlock);
VERIFY(err, len == sizeof(*msg));
@@ -1823,12 +1832,14 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
break;
case GLINK_LOCAL_DISCONNECTED:
link->port_state = FASTRPC_LINK_DISCONNECTED;
- fastrpc_notify_drivers(me, cid);
- if (link->link_state == FASTRPC_LINK_STATE_UP)
- fastrpc_glink_open(cid);
break;
case GLINK_REMOTE_DISCONNECTED:
- fastrpc_glink_close(me->channel[cid].chan, cid);
+ if (me->channel[cid].chan &&
+ link->link_state == FASTRPC_LINK_STATE_UP) {
+ fastrpc_glink_close(me->channel[cid].chan, cid);
+ me->channel[cid].chan = 0;
+ link->port_state = FASTRPC_LINK_DISCONNECTED;
+ }
break;
default:
break;
@@ -1962,7 +1973,9 @@ static void fastrpc_glink_close(void *chan, int cid)
if (err)
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED) {
+
+ if (link->port_state == FASTRPC_LINK_CONNECTED ||
+ link->port_state == FASTRPC_LINK_CONNECTING) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -1993,6 +2006,7 @@ static int fastrpc_glink_open(int cid)
link->port_state = FASTRPC_LINK_CONNECTING;
cfg->priv = (void *)(uintptr_t)cid;
cfg->edge = gcinfo[cid].link.link_info.edge;
+ cfg->transport = gcinfo[cid].link.link_info.transport;
cfg->name = FASTRPC_GLINK_GUID;
cfg->notify_rx = fastrpc_glink_notify_rx;
cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
index 3de7aba12a2f..c5ec4f081c55 100644
--- a/drivers/char/diag/Makefile
+++ b/drivers/char/diag/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
-diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_peripheral.o diagfwd_smd.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_smd.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index f5e4eba1e96b..b861d5f32d03 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -34,6 +34,7 @@
#include "diagfwd_peripheral.h"
#include "diagfwd_smd.h"
#include "diagfwd_socket.h"
+#include "diagfwd_glink.h"
#include "diag_debugfs.h"
#include "diag_ipc_logging.h"
@@ -44,6 +45,7 @@ static int diag_dbgfs_mempool_index;
static int diag_dbgfs_usbinfo_index;
static int diag_dbgfs_smdinfo_index;
static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_glinkinfo_index;
static int diag_dbgfs_hsicinfo_index;
static int diag_dbgfs_mhiinfo_index;
static int diag_dbgfs_bridgeinfo_index;
@@ -684,6 +686,110 @@ static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
return ret;
}
+static ssize_t diag_dbgfs_read_glinkinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_glink_info *info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_glinkinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_socketinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ info = &glink_data[j];
+ break;
+ case TYPE_CNTL:
+ info = &glink_cntl[j];
+ break;
+ case TYPE_DCI:
+ info = &glink_dci[j];
+ break;
+ case TYPE_CMD:
+ info = &glink_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ info = &glink_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "tx_intent_ready\t:\t%d\n"
+ "open pending\t:\t%d\n"
+ "close pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ info->name,
+ info->hdl,
+ info->inited,
+ atomic_read(&info->opened),
+ atomic_read(&info->diag_state),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ atomic_read(&info->tx_intent_ready),
+ work_pending(&info->open_work),
+ work_pending(&info->close_work),
+ work_pending(&info->read_work),
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_glinkinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -947,6 +1053,10 @@ const struct file_operations diag_dbgfs_socketinfo_ops = {
.read = diag_dbgfs_read_socketinfo,
};
+const struct file_operations diag_dbgfs_glinkinfo_ops = {
+ .read = diag_dbgfs_read_glinkinfo,
+};
+
const struct file_operations diag_dbgfs_table_ops = {
.read = diag_dbgfs_read_table,
};
@@ -994,6 +1104,11 @@ int diag_debugfs_init(void)
if (!entry)
goto err;
+ entry = debugfs_create_file("glinkinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_glinkinfo_ops);
+ if (!entry)
+ goto err;
+
entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
&diag_dbgfs_table_ops);
if (!entry)
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 5d7b1e7fe757..dccaa6a0d9c4 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -64,16 +64,19 @@
#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+
#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
- | DIAG_CON_SENSORS)
+ | DIAG_CON_SENSORS | DIAG_CON_WDSP)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
#define DIAG_STM_WCNSS 0x04
#define DIAG_STM_APPS 0x08
#define DIAG_STM_SENSORS 0x10
+#define DIAG_STM_WDSP 0x20
#define INVALID_PID -1
#define DIAG_CMD_FOUND 1
@@ -198,7 +201,8 @@
#define PERIPHERAL_LPASS 1
#define PERIPHERAL_WCNSS 2
#define PERIPHERAL_SENSORS 3
-#define NUM_PERIPHERALS 4
+#define PERIPHERAL_WDSP 4
+#define NUM_PERIPHERALS 5
#define APPS_DATA (NUM_PERIPHERALS)
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index ecdbf9f9480e..a39e4929d999 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -383,6 +383,8 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_WCNSS;
if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
ret |= DIAG_CON_SENSORS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
+ ret |= DIAG_CON_WDSP;
return ret;
}
@@ -1489,6 +1491,8 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_WCNSS);
if (peripheral_mask & DIAG_CON_SENSORS)
ret |= (1 << PERIPHERAL_SENSORS);
+ if (peripheral_mask & DIAG_CON_WDSP)
+ ret |= (1 << PERIPHERAL_WDSP);
return ret;
}
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index aec4f965b13e..8205e5b05d85 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -561,6 +561,9 @@ int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
if (mask & DIAG_STM_SENSORS)
diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
PERIPHERAL_SENSORS);
+ if (mask & DIAG_STM_WDSP)
+ diag_process_stm_mask(cmd, DIAG_STM_WDSP,
+ PERIPHERAL_WDSP);
if (mask & DIAG_STM_APPS)
diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
@@ -582,6 +585,9 @@ int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
if (driver->feature[PERIPHERAL_SENSORS].stm_support)
rsp_supported |= DIAG_STM_SENSORS;
+ if (driver->feature[PERIPHERAL_WDSP].stm_support)
+ rsp_supported |= DIAG_STM_WDSP;
+
rsp_supported |= DIAG_STM_APPS;
/* Set mask denoting STM state/status for each peripheral/APSS */
@@ -597,6 +603,9 @@ int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
if (driver->stm_state[PERIPHERAL_SENSORS])
rsp_status |= DIAG_STM_SENSORS;
+ if (driver->stm_state[PERIPHERAL_WDSP])
+ rsp_status |= DIAG_STM_WDSP;
+
if (driver->stm_state[APPS_DATA])
rsp_status |= DIAG_STM_APPS;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
new file mode 100644
index 000000000000..0a6d8bb7b21f
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -0,0 +1,702 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <soc/qcom/glink.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_glink.h"
+#include "diag_ipc_logging.h"
+
+struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .edge = "mpss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .edge = "lpass",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .edge = "wcnss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .edge = "dsps",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .edge = "wdsp",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .edge = "mpss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .edge = "lpass",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .edge = "wcnss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .edge = "dsps",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .edge = "wdsp",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .edge = "mpss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .edge = "lpass",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .edge = "dsps",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .edge = "mpss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .edge = "lpass",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .edge = "dsps",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .edge = "mpss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .edge = "lpass",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .edge = "dsps",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ }
+};
+
+static void diag_state_open_glink(void *ctxt);
+static void diag_state_close_glink(void *ctxt);
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len);
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_glink_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops glink_ops = {
+ .open = diag_state_open_glink,
+ .close = diag_state_close_glink,
+ .write = diag_glink_write,
+ .read = diag_glink_read,
+ .queue_read = diag_glink_queue_read
+};
+
+static void diag_state_open_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", glink_info->name);
+}
+
+static void diag_glink_queue_read(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (glink_info->hdl && glink_info->wq &&
+ atomic_read(&glink_info->opened))
+ queue_work(glink_info->wq, &(glink_info->read_work));
+}
+
+static void diag_state_close_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", glink_info->name);
+ wake_up_interruptible(&glink_info->read_wait_q);
+ flush_workqueue(glink_info->wq);
+}
+
+int diag_glink_check_state(void *ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_glink_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int ret_val = 0;
+
+ if (!ctxt || !buf || buf_len <= 0)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || !atomic_read(&glink_info->opened) ||
+ !glink_info->hdl || !glink_info->inited) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:Glink channel not opened");
+ return -EIO;
+ }
+
+ ret_val = glink_queue_rx_intent(glink_info->hdl, buf, buf_len);
+ if (ret_val == 0)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: queued an rx intent ch:%s perip:%d buf:%pK of len:%d\n",
+ glink_info->name, glink_info->peripheral, buf, buf_len);
+
+ return ret_val;
+}
+
+static void diag_glink_read_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ read_work);
+
+ if (!glink_info || !atomic_read(&glink_info->opened))
+ return;
+
+ if (!glink_info->inited) {
+ diag_ws_release();
+ return;
+ }
+
+ diagfwd_channel_read(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_rx(void *hdl, const void *priv,
+ const void *pkt_priv, const void *ptr,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+ int err = 0;
+
+ if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
+ return;
+
+ if (size <= 0)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+
+ memcpy((void *)pkt_priv, ptr, size);
+ err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)pkt_priv, size);
+ glink_rx_done(glink_info->hdl, ptr, false);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_remote_rx_intent(void *hdl, const void *priv,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ atomic_inc(&glink_info->tx_intent_ready);
+ wake_up_interruptible(&glink_info->wait_q);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:received remote rx intent for %d type %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_tx_done(void *hdl, const void *priv,
+ const void *pkt_priv,
+ const void *ptr)
+{
+ struct diag_glink_info *glink_info = NULL;
+ struct diagfwd_info *fwd_info = NULL;
+ int found = 0;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+
+ fwd_info = glink_info->fwd_ctxt;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Received glink tx done notify for ptr%pK pkt_priv %pK\n",
+ ptr, pkt_priv);
+ found = diagfwd_write_buffer_done(fwd_info, ptr);
+ if (!found)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Received Tx done on invalid buffer ptr %pK\n", ptr);
+}
+
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int err = 0;
+ uint32_t tx_flags = GLINK_TX_REQ_INTENT;
+
+ if (!ctxt || !buf)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || len <= 0) {
+ pr_err_ratelimited("diag: In %s, invalid params, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -EINVAL;
+ }
+
+ if (!glink_info->inited || !glink_info->hdl ||
+ !atomic_read(&glink_info->opened)) {
+ pr_err_ratelimited("diag: In %s, glink not inited, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -ENODEV;
+ }
+
+ err = wait_event_interruptible(glink_info->wait_q,
+ atomic_read(&glink_info->tx_intent_ready));
+ if (err) {
+ diagfwd_write_buffer_done(glink_info->fwd_ctxt, buf);
+ return -ERESTARTSYS;
+ }
+
+ atomic_dec(&glink_info->tx_intent_ready);
+ err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+ if (!err) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to glink, len: %d\n",
+ glink_info->name, len);
+ }
+
+ return err;
+
+}
+static void diag_glink_transport_notify_state(void *handle, const void *priv,
+ unsigned event)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ switch (event) {
+ case GLINK_CONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel connect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+ break;
+ case GLINK_LOCAL_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+
+ break;
+ case GLINK_REMOTE_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel remote disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ atomic_set(&glink_info->opened, 0);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received invalid notification\n",
+ glink_info->name);
+ break;
+ }
+
+}
+static void diag_glink_open_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ open_work);
+ struct glink_open_config open_cfg;
+ void *handle = NULL;
+
+ if (!glink_info)
+ return;
+
+ memset(&open_cfg, 0, sizeof(struct glink_open_config));
+ open_cfg.priv = glink_info;
+ open_cfg.edge = glink_info->edge;
+ open_cfg.name = glink_info->name;
+ open_cfg.notify_rx = diag_glink_notify_rx;
+ open_cfg.notify_tx_done = diag_glink_notify_tx_done;
+ open_cfg.notify_state = diag_glink_transport_notify_state;
+ open_cfg.notify_remote_rx_intent = diag_glink_notify_remote_rx_intent;
+ handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(handle)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "error opening channel %s",
+ glink_info->name);
+ } else
+ glink_info->hdl = handle;
+}
+
+static void diag_glink_close_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ close_work);
+ if (!glink_info->inited)
+ return;
+
+ glink_close(glink_info->hdl);
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+ if (!cb_info)
+ return;
+
+ switch (cb_info->link_state) {
+ case GLINK_LINK_STATE_UP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel opened for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->open_work);
+ break;
+ case GLINK_LINK_STATE_DOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel closed for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->close_work);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Invalid link state notification for ch:%s\n",
+ glink_info->name);
+ break;
+
+ }
+}
+
+static void glink_late_init(struct diag_glink_info *glink_info)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_register(TRANSPORT_GLINK, glink_info->peripheral,
+ glink_info->type, (void *)glink_info,
+ &glink_ops, &glink_info->fwd_ctxt);
+ fwd_info = glink_info->fwd_ctxt;
+ if (!fwd_info)
+ return;
+
+ glink_info->inited = 1;
+
+ if (atomic_read(&glink_info->opened))
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+int diag_glink_init_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ glink_late_init(&glink_data[peripheral]);
+ glink_late_init(&glink_dci[peripheral]);
+ glink_late_init(&glink_cmd[peripheral]);
+ glink_late_init(&glink_dci_cmd[peripheral]);
+
+ return 0;
+}
+
+static void __diag_glink_init(struct diag_glink_info *glink_info)
+{
+ char wq_name[DIAG_GLINK_NAME_SZ + 12];
+ struct glink_link_info link_info;
+
+ if (!glink_info)
+ return;
+
+ init_waitqueue_head(&glink_info->wait_q);
+ init_waitqueue_head(&glink_info->read_wait_q);
+ mutex_init(&glink_info->lock);
+ strlcpy(wq_name, "DIAG_GLINK_", 12);
+ strlcat(wq_name, glink_info->name, sizeof(glink_info->name));
+ glink_info->wq = create_singlethread_workqueue(wq_name);
+ if (!glink_info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for glink ch:%s\n",
+ __func__, glink_info->name);
+ return;
+ }
+ INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
+ INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
+ INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
+ link_info.transport = NULL;
+ strlcpy((char *)link_info.edge, glink_info->edge,
+ sizeof(link_info.edge));
+ glink_info->hdl = glink_register_link_state_cb(&link_info,
+ (void *)glink_info);
+ if (IS_ERR_OR_NULL(glink_info->hdl)) {
+ pr_err("diag: In %s, unable to register for glink channel %s\n",
+ __func__, glink_info->name);
+ destroy_workqueue(glink_info->wq);
+ return;
+ }
+ glink_info->fwd_ctxt = NULL;
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ atomic_set(&glink_info->opened, 0);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s initialized fwd_ctxt: %pK hdl: %pK\n",
+ glink_info->name, glink_info->fwd_ctxt, glink_info->hdl);
+}
+
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ info = (struct diag_glink_info *)ctxt;
+ info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_glink_init(void)
+{
+ uint8_t peripheral;
+ struct diag_glink_info *glink_info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ glink_info = &glink_cntl[peripheral];
+ __diag_glink_init(glink_info);
+ diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
+ (void *)glink_info, &glink_ops,
+ &(glink_info->fwd_ctxt));
+ glink_info->inited = 1;
+ __diag_glink_init(&glink_data[peripheral]);
+ __diag_glink_init(&glink_cmd[peripheral]);
+ __diag_glink_init(&glink_dci[peripheral]);
+ __diag_glink_init(&glink_dci_cmd[peripheral]);
+ }
+ return 0;
+}
+
+static void __diag_glink_exit(struct diag_glink_info *glink_info)
+{
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_deregister(glink_info->peripheral, glink_info->type,
+ (void *)glink_info);
+ glink_info->fwd_ctxt = NULL;
+ glink_info->hdl = NULL;
+ if (glink_info->wq)
+ destroy_workqueue(glink_info->wq);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+void diag_glink_early_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ __diag_glink_exit(&glink_cntl[peripheral]);
+ glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
+ }
+}
+
+void diag_glink_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ __diag_glink_exit(&glink_data[peripheral]);
+ __diag_glink_exit(&glink_cmd[peripheral]);
+ __diag_glink_exit(&glink_dci[peripheral]);
+ __diag_glink_exit(&glink_dci_cmd[peripheral]);
+ glink_unregister_link_state_cb(&glink_data[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_cmd[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci_cmd[peripheral].hdl);
+ }
+}
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
new file mode 100644
index 000000000000..3f00a7ed60a8
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_GLINK_H
+#define DIAGFWD_GLINK_H
+
+#define DIAG_GLINK_NAME_SZ 24
+#define GLINK_DRAIN_BUF_SIZE 4096
+
+struct diag_glink_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t fifo_size;
+ atomic_t tx_intent_ready;
+ void *hdl;
+ char edge[DIAG_GLINK_NAME_SZ];
+ char name[DIAG_GLINK_NAME_SZ];
+ struct mutex lock;
+ wait_queue_head_t read_wait_q;
+ wait_queue_head_t wait_q;
+ struct workqueue_struct *wq;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct work_struct read_work;
+ struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_glink_info glink_data[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cntl[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci[NUM_PERIPHERALS];
+
+int diag_glink_init_peripheral(uint8_t peripheral);
+void diag_glink_exit(void);
+int diag_glink_init(void);
+void diag_glink_early_exit(void);
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_glink_check_state(void *ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 755c8fab27b1..066890aebf39 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -30,6 +30,7 @@
#include "diagfwd_socket.h"
#include "diag_mux.h"
#include "diag_ipc_logging.h"
+#include "diagfwd_glink.h"
struct data_header {
uint8_t control_char;
@@ -51,7 +52,8 @@ static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len);
static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len);
-
+static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
static struct diag_channel_ops data_ch_ops = {
@@ -475,6 +477,7 @@ int diagfwd_peripheral_init(void)
diag_smd_init();
if (driver->supports_sockets)
diag_socket_init();
+ diag_glink_init();
return 0;
}
@@ -621,6 +624,7 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
int (*check_channel_state)(void *) = NULL;
uint8_t transport_open = 0;
+ int i = 0;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -633,10 +637,17 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
check_channel_state = diag_socket_check_state;
break;
case TRANSPORT_SOCKET:
- transport_open = TRANSPORT_SMD;
- init_fn = diag_smd_init_peripheral;
- invalidate_fn = diag_smd_invalidate;
- check_channel_state = diag_smd_check_state;
+ if (peripheral == PERIPHERAL_WDSP) {
+ transport_open = TRANSPORT_GLINK;
+ init_fn = diag_glink_init_peripheral;
+ invalidate_fn = diag_glink_invalidate;
+ check_channel_state = diag_glink_check_state;
+ } else {
+ transport_open = TRANSPORT_SMD;
+ init_fn = diag_smd_init_peripheral;
+ invalidate_fn = diag_smd_invalidate;
+ check_channel_state = diag_smd_check_state;
+ }
break;
default:
return;
@@ -660,6 +671,8 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
dest_info->buf_2 = fwd_info->buf_2;
dest_info->transport = fwd_info->transport;
invalidate_fn(dest_info->ctxt, dest_info);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++)
+ dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
if (!check_channel_state(dest_info->ctxt))
diagfwd_late_open(dest_info);
diagfwd_cntl_open(dest_info);
@@ -668,13 +681,30 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
}
+void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
+{
+ void *buf = NULL;
+ int index;
+
+ for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
+ if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
+ buf = fwd_info->buf_ptr[index]->data;
+ if (!buf)
+ return NULL;
+ atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
+ break;
+ }
+ }
+ return buf;
+}
+
int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
{
struct diagfwd_info *fwd_info = NULL;
int err = 0;
uint8_t retry_count = 0;
uint8_t max_retries = 3;
-
+ void *buf_ptr = NULL;
if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
return -EINVAL;
@@ -696,9 +726,21 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
return -EIO;
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ buf_ptr = diagfwd_request_write_buf(fwd_info);
+ if (buf_ptr)
+ memcpy(buf_ptr, buf, len);
+ else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: buffer not found for writing\n");
+ return -EIO;
+ }
+ } else
+ buf_ptr = buf;
+
while (retry_count < max_retries) {
err = 0;
- err = fwd_info->p_ops->write(fwd_info->ctxt, buf, len);
+ err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
if (err && err != -ENODEV) {
usleep_range(100000, 101000);
retry_count++;
@@ -715,6 +757,7 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
static void __diag_fwd_open(struct diagfwd_info *fwd_info)
{
+ int i;
if (!fwd_info)
return;
@@ -729,7 +772,10 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if (fwd_info->p_ops && fwd_info->p_ops->open)
fwd_info->p_ops->open(fwd_info->ctxt);
-
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i])
+ atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
+ }
diagfwd_queue_read(fwd_info);
}
@@ -807,6 +853,7 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info)
fwd_info->ch_open = 1;
diagfwd_buffers_init(fwd_info);
+ diagfwd_write_buffers_init(fwd_info);
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
diagfwd_queue_read(fwd_info);
@@ -885,6 +932,25 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
diagfwd_queue_read(fwd_info);
}
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
+{
+
+ int found = 0;
+ int index = 0;
+
+ if (!fwd_info || !ptr)
+ return found;
+
+ for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
+ if (fwd_info->buf_ptr[index]->data == ptr) {
+ atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
void diagfwd_channel_read(struct diagfwd_info *fwd_info)
{
int err = 0;
@@ -1114,3 +1180,64 @@ static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
}
+void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited) {
+ pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return;
+ }
+
+ spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (!fwd_info->buf_ptr[i])
+ fwd_info->buf_ptr[i] =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i])
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]);
+ if (!fwd_info->buf_ptr[i]->data) {
+ fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i]->data)
+ goto err;
+ fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ return;
+
+err:
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ pr_err("diag:unable to allocate write buffers\n");
+ diagfwd_write_buffers_exit(fwd_info);
+
+}
+
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i]) {
+ kfree(fwd_info->buf_ptr[i]->data);
+ fwd_info->buf_ptr[i]->data = NULL;
+ kfree(fwd_info->buf_ptr[i]);
+ fwd_info->buf_ptr[i] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index dc50d70e80b4..b511bf495bc2 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,19 +20,22 @@
#define TRANSPORT_UNKNOWN -1
#define TRANSPORT_SMD 0
#define TRANSPORT_SOCKET 1
-#define NUM_TRANSPORT 2
-
+#define TRANSPORT_GLINK 2
+#define NUM_TRANSPORT 3
+#define NUM_WRITE_BUFFERS 2
#define PERIPHERAL_MASK(x) \
((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS : \
((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS : \
((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS : \
- ((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : 0)))) \
+ ((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
+ ((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : 0))))) \
#define PERIPHERAL_STRING(x) \
((x == PERIPHERAL_MODEM) ? "MODEM" : \
((x == PERIPHERAL_LPASS) ? "LPASS" : \
((x == PERIPHERAL_WCNSS) ? "WCNSS" : \
- ((x == PERIPHERAL_SENSORS) ? "SENSORS" : "UNKNOWN")))) \
+ ((x == PERIPHERAL_SENSORS) ? "SENSORS" : \
+ ((x == PERIPHERAL_WDSP) ? "WDSP" : "UNKNOWN"))))) \
struct diagfwd_buf_t {
unsigned char *data;
@@ -72,6 +75,7 @@ struct diagfwd_info {
void *ctxt;
struct diagfwd_buf_t *buf_1;
struct diagfwd_buf_t *buf_2;
+ struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
struct diag_peripheral_ops *p_ops;
struct diag_channel_ops *c_ops;
};
@@ -108,5 +112,6 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info);
void diagfwd_channel_read(struct diagfwd_info *fwd_info);
int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, uint32_t len);
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr);
#endif
diff --git a/drivers/char/diag/diagfwd_smd.c b/drivers/char/diag/diagfwd_smd.c
index 3ee21101e2f2..12069df1224d 100644
--- a/drivers/char/diag/diagfwd_smd.c
+++ b/drivers/char/diag/diagfwd_smd.c
@@ -49,6 +49,11 @@ struct diag_smd_info smd_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DATA,
.name = "SENSORS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .name = "DIAG_DATA"
}
};
@@ -72,6 +77,11 @@ struct diag_smd_info smd_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_CNTL,
.name = "SENSORS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .name = "DIAG_CTRL"
}
};
@@ -95,6 +105,11 @@ struct diag_smd_info smd_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DCI,
.name = "SENSORS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .name = "DIAG_DCI_DATA"
}
};
@@ -118,6 +133,11 @@ struct diag_smd_info smd_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_CMD,
.name = "SENSORS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .name = "DIAG_CMD"
}
};
@@ -141,6 +161,11 @@ struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DCI_CMD,
.name = "SENSORS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "DIAG_DCI_CMD"
}
};
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 18f76ea89ec5..fd927e931414 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -40,6 +40,7 @@
#define LPASS_INST_BASE 64
#define WCNSS_INST_BASE 128
#define SENSORS_INST_BASE 192
+#define WDSP_INST_BASE 256
#define INST_ID_CNTL 0
#define INST_ID_CMD 1
@@ -69,6 +70,11 @@ struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DATA,
.name = "SENSORS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .name = "DIAG_DATA"
}
};
@@ -92,6 +98,11 @@ struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_CNTL,
.name = "SENSORS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .name = "DIAG_CTRL"
}
};
@@ -115,6 +126,11 @@ struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DCI,
.name = "SENSORS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .name = "DIAG_DCI_DATA"
}
};
@@ -138,7 +154,13 @@ struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_CMD,
.name = "SENSORS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .name = "DIAG_CMD"
}
+
};
struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
@@ -161,6 +183,11 @@ struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_SENSORS,
.type = TYPE_DCI_CMD,
.name = "SENSORS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "DIAG_DCI_CMD"
}
};
@@ -711,6 +738,9 @@ static void __diag_socket_init(struct diag_socket_info *info)
case PERIPHERAL_SENSORS:
ins_base = SENSORS_INST_BASE;
break;
+ case PERIPHERAL_WDSP:
+ ins_base = WDSP_INST_BASE;
+ break;
}
switch (info->type) {
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
index 2da10a2e4780..bbb9af961235 100644
--- a/drivers/clk/msm/clock-mmss-cobalt.c
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -1665,6 +1665,10 @@ static struct branch_clk mmss_camss_jpeg0_clk = {
},
};
+static DEFINE_CLK_VOTER(mmss_camss_jpeg0_vote_clk, &mmss_camss_jpeg0_clk.c, 0);
+static DEFINE_CLK_VOTER(mmss_camss_jpeg0_dma_vote_clk,
+ &mmss_camss_jpeg0_clk.c, 0);
+
static struct branch_clk mmss_camss_jpeg_ahb_clk = {
.cbcr_reg = MMSS_CAMSS_JPEG_AHB_CBCR,
.has_sibling = 1,
@@ -2572,6 +2576,8 @@ static struct clk_lookup msm_clocks_mmss_cobalt[] = {
CLK_LIST(mmss_camss_gp1_clk),
CLK_LIST(mmss_camss_ispif_ahb_clk),
CLK_LIST(mmss_camss_jpeg0_clk),
+ CLK_LIST(mmss_camss_jpeg0_vote_clk),
+ CLK_LIST(mmss_camss_jpeg0_dma_vote_clk),
CLK_LIST(mmss_camss_jpeg_ahb_clk),
CLK_LIST(mmss_camss_jpeg_axi_clk),
CLK_LIST(mmss_camss_mclk0_clk),
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index 74032aba22bc..c88a5089bd60 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -49,13 +49,8 @@ int link2xclk_divsel_set_div(struct div_clk *clk, int div)
link2xclk_div_tx1 |= 0x4;
/*configure DP PHY MODE */
- phy_mode = 0x48;
+ phy_mode = 0x58;
- if (div == 10) {
- link2xclk_div_tx0 |= 1;
- link2xclk_div_tx1 |= 1;
- phy_mode |= 1;
- }
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_BAND,
link2xclk_div_tx0);
@@ -64,7 +59,8 @@ int link2xclk_divsel_set_div(struct div_clk *clk, int div)
link2xclk_div_tx1);
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_MODE, phy_mode);
-
+ /* Make sure the PHY register writes are done */
+ wmb();
pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
__func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
@@ -105,63 +101,6 @@ int link2xclk_divsel_get_div(struct div_clk *clk)
return div;
}
-int hsclk_divsel_set_div(struct div_clk *clk, int div)
-{
- int rc;
- u32 hsclk_div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP PLL resources\n");
- return rc;
- }
-
- hsclk_div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
- hsclk_div &= ~0x0f; /* bits 0 to 3 */
-
- if (div == 3)
- hsclk_div |= 4;
- else
- hsclk_div |= 0;
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, hsclk_div);
-
- pr_debug("%s: div=%d hsclk_div=%x\n", __func__, div, hsclk_div);
-
- mdss_pll_resource_enable(dp_res, false);
-
- return rc;
-}
-
-int hsclk_divsel_get_div(struct div_clk *clk)
-{
- int rc;
- u32 hsclk_div, div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable dp_res resources\n");
- return rc;
- }
-
- hsclk_div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
- hsclk_div &= 0x0f;
-
- if (hsclk_div == 4)
- div = 3;
- else
- div = 2;
-
- mdss_pll_resource_enable(dp_res, false);
-
- pr_debug("%s: hsclk_div:%d, div=%d\n", __func__, hsclk_div, div);
-
- return div;
-}
-
int vco_divided_clk_set_div(struct div_clk *clk, int div)
{
int rc;
@@ -174,18 +113,18 @@ int vco_divided_clk_set_div(struct div_clk *clk, int div)
return rc;
}
- auxclk_div = MDSS_PLL_REG_R(dp_res->pll_base, DP_PHY_VCO_DIV);
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
auxclk_div &= ~0x03; /* bits 0 to 1 */
+
+ auxclk_div |= 1; /* Default divider */
+
if (div == 4)
auxclk_div |= 2;
- else if (div == 2)
- auxclk_div |= 1;
- else
- auxclk_div |= 2; /* Default divider */
- MDSS_PLL_REG_W(dp_res->pll_base,
+ MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_VCO_DIV, auxclk_div);
-
+ /* Make sure the PHY registers writes are done */
+ wmb();
pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
mdss_pll_resource_enable(dp_res, false);
@@ -215,15 +154,12 @@ int vco_divided_clk_get_div(struct div_clk *clk)
return rc;
}
- auxclk_div = MDSS_PLL_REG_R(dp_res->pll_base, DP_PHY_VCO_DIV);
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
auxclk_div &= 0x03;
+ div = 2; /* Default divider */
if (auxclk_div == 2)
div = 4;
- else if (auxclk_div == 1)
- div = 2;
- else
- div = 0;
mdss_pll_resource_enable(dp_res, false);
@@ -239,14 +175,12 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_PD_CTL, 0x3d);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_IVCO, 0x0f);
+ /* Make sure the PHY register writes are done */
+ wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_SYSCLK_EN_SEL, 0x37);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x06);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_CLK_ENABLE1, 0x0e);
@@ -255,16 +189,16 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_CLK_SEL, 0x30);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_CCTRL_MODE0, 0x34);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CP_CTRL_MODE0, 0x08);
/* Different for each clock rates */
- if (rate == DP_VCO_RATE_8100MHz) {
+ if (rate == DP_VCO_HSCLK_RATE_1620MHz) {
+ pr_debug("%s: VCO rate: %lld\n", __func__,
+ DP_VCO_RATE_8100MHz);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_HSCLK_SEL, 0x2c);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP_EN, 0x04);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_DEC_START_MODE0, 0x69);
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -273,58 +207,88 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
- } else if (rate == DP_VCO_RATE_9720MHz) {
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x7e);
+ QSERDES_COM_CMN_CONFIG, 0x42);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x09);
- } else if (rate == DP_VCO_RATE_10800MHz) {
+ QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ } else if (rate == DP_VCO_HSCLK_RATE_2700MHz) {
+ pr_debug("%s: VCO rate: %lld\n", __func__,
+ DP_VCO_RATE_8100MHz);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x8c);
+ QSERDES_COM_SYS_CLK_CTRL, 0x06);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_HSCLK_SEL, 0x84);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP_EN, 0x08);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, 0x69);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x0a);
- } else {
- pr_err("%s: unsupported rate: %ld\n", __func__, rate);
- return -EINVAL;
- }
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_VCO_TUNE_MAP, 0x00);
-
- if (rate == DP_VCO_RATE_8100MHz) {
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CMN_CONFIG, 0x02);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else if (rate == DP_VCO_RATE_9720MHz) {
+ } else if (rate == DP_VCO_HSCLK_RATE_5400MHz) {
+ pr_debug("%s: VCO rate: %lld\n", __func__,
+ DP_VCO_RATE_10800MHz);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
+ QSERDES_COM_SYS_CLK_CTRL, 0x06);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x43);
+ QSERDES_COM_HSCLK_SEL, 0x80);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else {
+ QSERDES_COM_LOCK_CMP_EN, 0x08);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, 0x8c);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CMN_CONFIG, 0x12);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ } else {
+ pr_err("%s: unsupported rate: %ld\n", __func__, rate);
+ return -EINVAL;
}
+ /* Make sure the PLL register writes are done */
+ wmb();
+
+ if ((rate == DP_VCO_HSCLK_RATE_1620MHz)
+ || (rate == DP_VCO_HSCLK_RATE_2700MHz)) {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_VCO_DIV, 0x1);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_VCO_DIV, 0x2);
+ }
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_VCO_TUNE_MAP, 0x00);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_BG_TIMER, 0x00);
@@ -335,58 +299,42 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_VCO_TUNE_CTRL, 0x00);
MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CP_CTRL_MODE0, 0x06);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_IVCO, 0x07);
+ MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_CORE_CLK_EN, 0x0f);
- /* Different for each clock rate */
- if (rate == DP_VCO_RATE_8100MHz) {
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x02);
- } else if (rate == DP_VCO_RATE_9720MHz) {
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x02);
- } else {
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x02);
- }
+ /* Make sure the PLL register writes are done */
+ wmb();
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
+ DP_PHY_MODE, 0x58);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x00);
+ DP_PHY_TX0_TX1_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x00);
+ DP_PHY_TX2_TX3_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
- 0x38);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
- 0x38);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x2c);
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x1a);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x2c);
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x1a);
MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX0_TX1_LANE_CTL, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX2_TX3_LANE_CTL, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
0x40);
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
0x40);
+
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
0x30);
@@ -429,6 +377,15 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
0x00);
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_BAND,
+ 0x4);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_BAND,
+ 0x4);
+ /* Make sure the PHY register writes are done */
+ wmb();
return res;
}
@@ -479,9 +436,12 @@ static int dp_pll_enable(struct clk *c)
DP_PHY_CFG, 0x01);
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x09);
+ /* Make sure the PHY register writes are done */
+ wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_RESETSM_CNTRL, 0x20);
-
+ /* Make sure the PLL register writes are done */
+ wmb();
/* poll for PLL ready status */
if (readl_poll_timeout_atomic((dp_res->pll_base +
QSERDES_COM_C_READY_STATUS),
@@ -497,7 +457,8 @@ static int dp_pll_enable(struct clk *c)
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x19);
-
+ /* Make sure the PHY register writes are done */
+ wmb();
/* poll for PHY ready status */
if (readl_poll_timeout_atomic((dp_res->phy_base +
DP_PHY_STATUS),
@@ -514,16 +475,16 @@ static int dp_pll_enable(struct clk *c)
pr_debug("%s: PLL is locked\n", __func__);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
0x3f);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
0x10);
MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x3f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
0x10);
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
@@ -533,6 +494,8 @@ static int dp_pll_enable(struct clk *c)
0x0a);
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x18);
+ udelay(2000);
+
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x19);
@@ -542,6 +505,77 @@ static int dp_pll_enable(struct clk *c)
*/
wmb();
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
+ 0xf6);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
+ 0xf6);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x1f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x1f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x0f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x0f);
+ /*
+ * Make sure all the register writes are completed before
+ * doing any other operation
+ */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x09);
+ udelay(2000);
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x19);
+ udelay(2000);
+ /* poll for PHY ready status */
+ if (readl_poll_timeout_atomic((dp_res->phy_base +
+ DP_PHY_STATUS),
+ status,
+ ((status & BIT(1)) > 0),
+ DP_PLL_POLL_SLEEP_US,
+ DP_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
+ __func__, status);
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
+ 0x2a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
+ 0x2a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
+ 0x20);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
+ 0x20);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
+ 0x11);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
+ 0x11);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
+ 0x11);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
+ 0x11);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
lock_err:
return rc;
}
@@ -554,7 +588,7 @@ static int dp_pll_disable(struct clk *c)
/* Assert DP PHY power down */
MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3c);
+ DP_PHY_PD_CTL, 0x2);
/*
* Make sure all the register writes to disable PLL are
* completed before doing any other operation
@@ -583,8 +617,10 @@ int dp_vco_prepare(struct clk *c)
mdss_pll_resource_enable(dp_pll_res, false);
pr_err("ndx=%d failed to enable dsi pll\n",
dp_pll_res->index);
+ goto error;
}
+ mdss_pll_resource_enable(dp_pll_res, false);
error:
return rc;
}
@@ -653,14 +689,20 @@ unsigned long dp_vco_get_rate(struct clk *c)
div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
div &= 0x0f;
- if (div == 4)
+ if (div == 12)
+ hsclk_div = 5; /* Default */
+ else if (div == 4)
hsclk_div = 3;
- else
+ else if (div == 0)
hsclk_div = 2;
+ else {
+ pr_debug("unknown divider. forcing to default\n");
+ hsclk_div = 5;
+ }
div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
- if (div & 0x48)
+ if (div & 0x58)
pr_err("%s: DP PAR Rate not correct\n", __func__);
if ((div & 0x3) == 1)
@@ -671,12 +713,14 @@ unsigned long dp_vco_get_rate(struct clk *c)
pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
if (link2xclk_div == 10) {
- vco_rate = DP_VCO_RATE_9720MHz;
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHz;
} else {
- if (hsclk_div == 3)
- vco_rate = DP_VCO_RATE_8100MHz;
+ if (hsclk_div == 5)
+ vco_rate = DP_VCO_HSCLK_RATE_1620MHz;
+ else if (hsclk_div == 3)
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHz;
else
- vco_rate = DP_VCO_RATE_10800MHz;
+ vco_rate = DP_VCO_HSCLK_RATE_5400MHz;
}
pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
@@ -693,8 +737,8 @@ long dp_vco_round_rate(struct clk *c, unsigned long rate)
if (rate <= vco->min_rate)
rrate = vco->min_rate;
- else if (rate <= DP_VCO_RATE_9720MHz)
- rrate = DP_VCO_RATE_9720MHz;
+ else if (rate <= DP_VCO_HSCLK_RATE_2700MHz)
+ rrate = DP_VCO_HSCLK_RATE_2700MHz;
else
rrate = vco->max_rate;
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c
index 8b06f07b00ee..47b5bd7d7579 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c
@@ -16,40 +16,37 @@
******** Display Port PLL driver block diagram for branch clocks **********
***************************************************************************
- +-------------------+
- | dp_vco_clk |
- | (DP PLL/VCO) |
- +---------+---------+
- |
- |
- v
- +----------+-----------+
- | hsclk_divsel_clk_src |
- +----------+-----------+
- |
+ +--------------------------+
+ | DP_VCO_CLK |
+ | |
+ | +-------------------+ |
+ | | (DP PLL/VCO) | |
+ | +---------+---------+ |
+ | v |
+ | +----------+-----------+ |
+ | | hsclk_divsel_clk_src | |
+ | +----------+-----------+ |
+ +--------------------------+
|
v
+------------<------------|------------>-------------+
| | |
- | | |
+----------v----------+ +----------v----------+ +----------v----------+
-|vco_divided_clk_src | | dp_link_2x_clk | | dp_link_2x_clk |
-| (aux_clk_ops) | | | | |
-v----------+----------v | divsel_five | | divsel_ten |
+| dp_link_2x_clk | | vco_divided_clk_src | | vco_divided_clk_src |
+| divsel_five | | | | |
+v----------+----------v | divsel_two | | divsel_four |
| +----------+----------+ +----------+----------+
| | |
v v v
- | +--------------------+ |
- Input to MMSSCC block | | | |
- for DP pixel clock +--> dp_link_2x_clk_mux <--+
- | |
- +----------+---------+
+ | +---------------------+ |
+ Input to MMSSCC block | | (aux_clk_ops) | |
+ for link clk, crypto clk +--> vco_divided_clk <-+
+ and interface clock | _src_mux |
+ +----------+----------+
|
v
Input to MMSSCC block
- for link clk, crypto clk
- and interface clock
-
+ for DP pixel clock
******************************************************************************
*/
@@ -68,14 +65,9 @@ v----------+----------v | divsel_five | | divsel_ten |
#include "mdss-dp-pll.h"
#include "mdss-dp-pll-cobalt.h"
-static struct clk_ops clk_ops_hsclk_divsel_clk_src_c;
static struct clk_ops clk_ops_vco_divided_clk_src_c;
static struct clk_ops clk_ops_link_2x_clk_div_c;
-
-static struct clk_div_ops hsclk_divsel_ops = {
- .set_div = hsclk_divsel_set_div,
- .get_div = hsclk_divsel_get_div,
-};
+static struct clk_ops clk_ops_gen_mux_dp;
static struct clk_div_ops link2xclk_divsel_ops = {
.set_div = link2xclk_divsel_set_div,
@@ -101,8 +93,8 @@ static struct clk_mux_ops mdss_mux_ops = {
};
static struct dp_pll_vco_clk dp_vco_clk = {
- .min_rate = DP_VCO_RATE_8100MHz,
- .max_rate = DP_VCO_RATE_10800MHz,
+ .min_rate = DP_VCO_HSCLK_RATE_1620MHz,
+ .max_rate = DP_VCO_HSCLK_RATE_5400MHz,
.c = {
.dbg_name = "dp_vco_clk",
.ops = &dp_cobalt_vco_clk_ops,
@@ -111,21 +103,6 @@ static struct dp_pll_vco_clk dp_vco_clk = {
},
};
-static struct div_clk hsclk_divsel_clk_src = {
- .data = {
- .min_div = 2,
- .max_div = 3,
- },
- .ops = &hsclk_divsel_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "hsclk_divsel_clk_src",
- .ops = &clk_ops_hsclk_divsel_clk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(hsclk_divsel_clk_src.c),
- },
-};
-
static struct div_clk dp_link_2x_clk_divsel_five = {
.data = {
.div = 5,
@@ -134,7 +111,7 @@ static struct div_clk dp_link_2x_clk_divsel_five = {
},
.ops = &link2xclk_divsel_ops,
.c = {
- .parent = &hsclk_divsel_clk_src.c,
+ .parent = &dp_vco_clk.c,
.dbg_name = "dp_link_2x_clk_divsel_five",
.ops = &clk_ops_link_2x_clk_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -142,61 +119,60 @@ static struct div_clk dp_link_2x_clk_divsel_five = {
},
};
-static struct div_clk dp_link_2x_clk_divsel_ten = {
+static struct div_clk vco_divsel_four_clk_src = {
.data = {
- .div = 10,
- .min_div = 10,
- .max_div = 10,
+ .div = 4,
+ .min_div = 4,
+ .max_div = 4,
},
- .ops = &link2xclk_divsel_ops,
+ .ops = &vco_divided_clk_ops,
.c = {
- .parent = &hsclk_divsel_clk_src.c,
- .dbg_name = "dp_link_2x_clk_divsel_ten",
- .ops = &clk_ops_link_2x_clk_div_c,
+ .parent = &dp_vco_clk.c,
+ .dbg_name = "vco_divsel_four_clk_src",
+ .ops = &clk_ops_vco_divided_clk_src_c,
.flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_link_2x_clk_divsel_ten.c),
+ CLK_INIT(vco_divsel_four_clk_src.c),
},
};
-static struct mux_clk dp_link_2x_clk_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dp_link_2x_clk_divsel_five.c, 0},
- {&dp_link_2x_clk_divsel_ten.c, 1},
+static struct div_clk vco_divsel_two_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
},
- .ops = &mdss_mux_ops,
+ .ops = &vco_divided_clk_ops,
.c = {
- .parent = &dp_link_2x_clk_divsel_five.c,
- .dbg_name = "dp_link_2x_clk_mux",
- .ops = &clk_ops_gen_mux,
+ .parent = &dp_vco_clk.c,
+ .dbg_name = "vco_divsel_two_clk_src",
+ .ops = &clk_ops_vco_divided_clk_src_c,
.flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_link_2x_clk_mux.c),
- }
+ CLK_INIT(vco_divsel_two_clk_src.c),
+ },
};
-static struct div_clk vco_divided_clk_src = {
- .data = {
- .div = 4,
- .min_div = 4,
- .max_div = 4,
+static struct mux_clk vco_divided_clk_src_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&vco_divsel_two_clk_src.c, 0},
+ {&vco_divsel_four_clk_src.c, 1},
},
- .ops = &vco_divided_clk_ops,
+ .ops = &mdss_mux_ops,
.c = {
- .parent = &hsclk_divsel_clk_src.c,
- .dbg_name = "vco_divided_clk",
- .ops = &clk_ops_vco_divided_clk_src_c,
+ .parent = &vco_divsel_two_clk_src.c,
+ .dbg_name = "vco_divided_clk_src_mux",
+ .ops = &clk_ops_gen_mux_dp,
.flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divided_clk_src.c),
- },
+ CLK_INIT(vco_divided_clk_src_mux.c),
+ }
};
static struct clk_lookup dp_pllcc_cobalt[] = {
CLK_LIST(dp_vco_clk),
- CLK_LIST(hsclk_divsel_clk_src),
CLK_LIST(dp_link_2x_clk_divsel_five),
- CLK_LIST(dp_link_2x_clk_divsel_ten),
- CLK_LIST(dp_link_2x_clk_mux),
- CLK_LIST(vco_divided_clk_src),
+ CLK_LIST(vco_divsel_four_clk_src),
+ CLK_LIST(vco_divsel_two_clk_src),
+ CLK_LIST(vco_divided_clk_src_mux),
};
int dp_pll_clock_register_cobalt(struct platform_device *pdev,
@@ -211,14 +187,10 @@ int dp_pll_clock_register_cobalt(struct platform_device *pdev,
/* Set client data for vco, mux and div clocks */
dp_vco_clk.priv = pll_res;
- hsclk_divsel_clk_src.priv = pll_res;
- dp_link_2x_clk_mux.priv = pll_res;
- vco_divided_clk_src.priv = pll_res;
+ vco_divided_clk_src_mux.priv = pll_res;
+ vco_divsel_two_clk_src.priv = pll_res;
+ vco_divsel_four_clk_src.priv = pll_res;
dp_link_2x_clk_divsel_five.priv = pll_res;
- dp_link_2x_clk_divsel_ten.priv = pll_res;
-
- clk_ops_hsclk_divsel_clk_src_c = clk_ops_div;
- clk_ops_hsclk_divsel_clk_src_c.prepare = mdss_pll_div_prepare;
clk_ops_link_2x_clk_div_c = clk_ops_div;
clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
@@ -233,6 +205,9 @@ int dp_pll_clock_register_cobalt(struct platform_device *pdev,
clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
+ clk_ops_gen_mux_dp = clk_ops_gen_mux;
+ clk_ops_gen_mux_dp.get_rate = parent_get_rate;
+
/* We can select different clock ops for future versions */
dp_vco_clk.c.ops = &dp_cobalt_vco_clk_ops;
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
index 290933c0cbb4..d2b5d98a2d41 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -59,12 +59,19 @@
#define TXn_SLEW_CNTL 0x0030
#define TXn_INTERFACE_SELECT 0x0034
+#define TXn_RES_CODE_LANE_TX 0x003C
+#define TXn_RES_CODE_LANE_RX 0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX 0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX 0x0048
+
#define TXn_DEBUG_BUS_SEL 0x0058
#define TXn_TRANSCEIVER_BIAS_EN 0x005C
#define TXn_HIGHZ_DRVR_EN 0x0060
#define TXn_TX_POL_INV 0x0064
#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
+#define TXn_LANE_MODE_1 0x008C
+
#define TXn_TRAN_DRVR_EMP_EN 0x00C0
#define TXn_TX_INTERFACE_MODE 0x00C4
@@ -149,9 +156,12 @@
#define DP_PLL_POLL_TIMEOUT_US 10000
#define DP_VCO_RATE_8100MHz 8100000000ULL
-#define DP_VCO_RATE_9720MHz 9720000000ULL
#define DP_VCO_RATE_10800MHz 10800000000ULL
+#define DP_VCO_HSCLK_RATE_1620MHz 1620000000ULL
+#define DP_VCO_HSCLK_RATE_2700MHz 2700000000ULL
+#define DP_VCO_HSCLK_RATE_5400MHz 5400000000ULL
+
int dp_vco_set_rate(struct clk *c, unsigned long rate);
unsigned long dp_vco_get_rate(struct clk *c);
long dp_vco_round_rate(struct clk *c, unsigned long rate);
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
index 19e813fd5a54..1228d925761b 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
@@ -454,8 +454,8 @@ static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
{
u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
ndelay(250);
}
@@ -468,6 +468,22 @@ static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
ndelay(250);
}
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
+}
+
static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
{
int rc;
@@ -494,6 +510,11 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
}
rsc->pll_on = true;
+
+ dsi_pll_enable_global_clk(rsc);
+ if (rsc->slave)
+ dsi_pll_enable_global_clk(rsc->slave);
+
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
if (rsc->slave)
MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
@@ -504,8 +525,9 @@ error:
static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
{
- dsi_pll_disable_pll_bias(rsc);
+ dsi_pll_disable_global_clk(rsc);
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(rsc);
}
static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index c6dc9032c0bc..cedd02987002 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -858,6 +858,7 @@
#define A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0xA87B
#define A5XX_GPMU_POWER_COUNTER_SELECT_0 0xA87C
#define A5XX_GPMU_POWER_COUNTER_SELECT_1 0xA87D
+#define A5XX_GPMU_GPMU_SP_CLOCK_CONTROL 0xA880
#define A5XX_GPMU_CLOCK_THROTTLE_CTRL 0xA8A3
#define A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0xA8A8
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index cbeb1a924cc9..18fdd400ac7a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -874,9 +874,6 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev,
device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout);
- device->pwrctrl.strtstp_sleepwake =
- of_property_read_bool(node, "qcom,strtstp-sleepwake");
-
device->pwrctrl.bus_control = of_property_read_bool(node,
"qcom,bus-control");
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 61d27ac8061f..3252bfb764f2 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -437,8 +437,10 @@ static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
{
unsigned int ret;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (!(adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev)))
+ if (!(adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))) {
+ a5xx_hwcg_set(adreno_dev, true);
return 0;
+ }
/*
* Turn on smaller power domain first to reduce voltage droop.
@@ -460,6 +462,15 @@ static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
return ret;
}
+ /* Disable SP clock */
+ kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
+ CNTL_IP_CLK_ENABLE, 0);
+ /* Enable hardware clockgating */
+ a5xx_hwcg_set(adreno_dev, true);
+ /* Enable SP clock */
+ kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
+ CNTL_IP_CLK_ENABLE, 1);
+
return 0;
}
@@ -1875,8 +1886,6 @@ static void a5xx_start(struct adreno_device *adreno_dev)
} else {
/* if not in ISDB mode enable ME/PFP split notification */
kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
- /* enable HWCG */
- a5xx_hwcg_set(adreno_dev, true);
}
kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL2, 0x0000003F);
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 27d5a4b31c71..e424e7a9f228 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -177,6 +177,9 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
/* A5XX_GPMU_GPMU_PWR_THRESHOLD */
#define PWR_THRESHOLD_VALID 0x80000000
+
+/* A5XX_GPMU_GPMU_SP_CLOCK_CONTROL */
+#define CNTL_IP_CLK_ENABLE BIT(0)
/* AGC */
#define AGC_INIT_BASE A5XX_GPMU_DATA_RAM_BASE
#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index f55b795b1d2b..f42d822b451b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -43,11 +43,9 @@
#define KGSL_STATE_INIT 0x00000001
#define KGSL_STATE_ACTIVE 0x00000002
#define KGSL_STATE_NAP 0x00000004
-#define KGSL_STATE_SLEEP 0x00000008
#define KGSL_STATE_SUSPEND 0x00000010
#define KGSL_STATE_AWARE 0x00000020
#define KGSL_STATE_SLUMBER 0x00000080
-#define KGSL_STATE_DEEP_NAP 0x00000100
/**
* enum kgsl_event_results - result codes passed to an event callback when the
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7ad7fdfb8181..ea760d9198ee 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -31,7 +31,6 @@
#define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
-#define KGSL_PWRFLAGS_RETENTION_ON 4
#define KGSL_PWRFLAGS_NAP_OFF 5
#define UPDATE_BUSY_VAL 1000000
@@ -80,10 +79,8 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
unsigned int state);
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
unsigned int state);
-static void kgsl_pwrctrl_retention_clk(struct kgsl_device *device, int state);
static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
-
/**
* _record_pwrevent() - Record the history of the new event
* @device: Pointer to the kgsl_device struct
@@ -816,8 +813,6 @@ static ssize_t __timer_store(struct device *dev, struct device_attribute *attr,
/* Let the timeout be requested in ms, but convert to jiffies. */
if (timer == KGSL_PWR_IDLE_TIMER)
device->pwrctrl.interval_timeout = msecs_to_jiffies(val);
- else if (timer == KGSL_PWR_DEEP_NAP_TIMER)
- device->pwrctrl.deep_nap_timeout = msecs_to_jiffies(val);
mutex_unlock(&device->mutex);
@@ -843,27 +838,6 @@ static ssize_t kgsl_pwrctrl_idle_timer_show(struct device *dev,
jiffies_to_msecs(device->pwrctrl.interval_timeout));
}
-static ssize_t kgsl_pwrctrl_deep_nap_timer_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
-
- return __timer_store(dev, attr, buf, count, KGSL_PWR_DEEP_NAP_TIMER);
-}
-
-static ssize_t kgsl_pwrctrl_deep_nap_timer_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct kgsl_device *device = kgsl_device_from_dev(dev);
-
- if (device == NULL)
- return 0;
- /* Show the idle_timeout converted to msec */
- return snprintf(buf, PAGE_SIZE, "%u\n",
- jiffies_to_msecs(device->pwrctrl.deep_nap_timeout));
-}
-
static ssize_t kgsl_pwrctrl_pmqos_active_latency_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -985,9 +959,6 @@ static void __force_on(struct kgsl_device *device, int flag, int on)
case KGSL_PWRFLAGS_POWER_ON:
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
break;
- case KGSL_PWRFLAGS_RETENTION_ON:
- kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
- break;
}
set_bit(flag, &device->pwrctrl.ctrl_flags);
} else {
@@ -1071,21 +1042,6 @@ static ssize_t kgsl_pwrctrl_force_rail_on_store(struct device *dev,
return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
}
-static ssize_t kgsl_pwrctrl_force_non_retention_on_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_RETENTION_ON);
-}
-
-static ssize_t kgsl_pwrctrl_force_non_retention_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return __force_on_store(dev, attr, buf, count,
- KGSL_PWRFLAGS_RETENTION_ON);
-}
-
static ssize_t kgsl_pwrctrl_force_no_nap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1221,8 +1177,6 @@ static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store);
static DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store);
-static DEVICE_ATTR(deep_nap_timer, 0644, kgsl_pwrctrl_deep_nap_timer_show,
- kgsl_pwrctrl_deep_nap_timer_store);
static DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
NULL);
static DEVICE_ATTR(gpu_available_frequencies, 0444,
@@ -1265,9 +1219,6 @@ static DEVICE_ATTR(default_pwrlevel, 0644,
kgsl_pwrctrl_default_pwrlevel_show,
kgsl_pwrctrl_default_pwrlevel_store);
static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store);
-static DEVICE_ATTR(force_non_retention_on, 0644,
- kgsl_pwrctrl_force_non_retention_on_show,
- kgsl_pwrctrl_force_non_retention_on_store);
static DEVICE_ATTR(force_no_nap, 0644,
kgsl_pwrctrl_force_no_nap_show,
kgsl_pwrctrl_force_no_nap_store);
@@ -1276,7 +1227,6 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
&dev_attr_max_gpuclk,
&dev_attr_idle_timer,
- &dev_attr_deep_nap_timer,
&dev_attr_gpubusy,
&dev_attr_gpu_available_frequencies,
&dev_attr_gpu_clock_stats,
@@ -1289,7 +1239,6 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_force_clk_on,
&dev_attr_force_bus_on,
&dev_attr_force_rail_on,
- &dev_attr_force_non_retention_on,
&dev_attr_force_no_nap,
&dev_attr_bus_split,
&dev_attr_default_pwrlevel,
@@ -1328,54 +1277,6 @@ void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy)
}
EXPORT_SYMBOL(kgsl_pwrctrl_busy_time);
-static void kgsl_pwrctrl_retention_clk(struct kgsl_device *device, int state)
-{
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int i = 0;
-
- if (!(pwr->gx_retention) || test_bit(KGSL_PWRFLAGS_RETENTION_ON,
- &device->pwrctrl.ctrl_flags))
- return;
-
- if (state == KGSL_PWRFLAGS_OFF) {
- if (test_and_clear_bit(KGSL_PWRFLAGS_RETENTION_ON,
- &pwr->power_flags)) {
- trace_kgsl_retention_clk(device, state);
- /* prepare the mx clk to avoid RPM transactions*/
- clk_set_rate(pwr->dummy_mx_clk,
- pwr->pwrlevels
- [pwr->active_pwrlevel].
- gpu_freq);
- clk_prepare(pwr->dummy_mx_clk);
- /*
- * Unprepare Gfx clocks to put Gfx rail to
- * retention voltage.
- */
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_unprepare(pwr->grp_clks[i]);
- }
- } else if (state == KGSL_PWRFLAGS_ON) {
- if (!test_and_set_bit(KGSL_PWRFLAGS_RETENTION_ON,
- &pwr->power_flags)) {
- trace_kgsl_retention_clk(device, state);
- /*
- * Prepare Gfx clocks to put Gfx rail out
- * of rentention
- */
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_prepare(pwr->grp_clks[i]);
-
- /* unprepare the dummy mx clk*/
- clk_unprepare(pwr->dummy_mx_clk);
- clk_set_rate(pwr->dummy_mx_clk,
- pwr->pwrlevels[pwr->num_pwrlevels - 1].
- gpu_freq);
- }
- }
-}
-
static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
int requested_state)
{
@@ -1401,9 +1302,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
clk_disable(pwr->grp_clks[i]);
/* High latency clock maintenance. */
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
- (requested_state != KGSL_STATE_NAP) &&
- (requested_state !=
- KGSL_STATE_DEEP_NAP)) {
+ (requested_state != KGSL_STATE_NAP)) {
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
clk_unprepare(pwr->grp_clks[i]);
clk_set_rate(pwr->grp_clks[0],
@@ -1415,7 +1314,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
/* Turn off the IOMMU clocks */
kgsl_mmu_disable_clk(&device->mmu);
- } else if (requested_state == KGSL_STATE_SLEEP) {
+ } else if (requested_state == KGSL_STATE_SLUMBER) {
/* High latency clock maintenance. */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
clk_unprepare(pwr->grp_clks[i]);
@@ -1433,8 +1332,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
trace_kgsl_clk(device, state,
kgsl_pwrctrl_active_freq(pwr));
/* High latency clock maintenance. */
- if ((device->state != KGSL_STATE_NAP) &&
- (device->state != KGSL_STATE_DEEP_NAP)) {
+ if (device->state != KGSL_STATE_NAP) {
if (pwr->pwrlevels[0].gpu_freq > 0) {
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels
@@ -1658,16 +1556,6 @@ static void kgsl_thermal_timer(unsigned long data)
kgsl_schedule_work(&device->pwrctrl.thermal_cycle_ws);
}
-void kgsl_deep_nap_timer(unsigned long data)
-{
- struct kgsl_device *device = (struct kgsl_device *) data;
-
- if (device->state == KGSL_STATE_NAP) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_DEEP_NAP);
- kgsl_schedule_work(&device->idle_check_ws);
- }
-}
-
#ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON
static int kgsl_pwrctrl_vbif_init(void)
{
@@ -1815,22 +1703,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
if (pwr->grp_clks[0] == NULL)
pwr->grp_clks[0] = pwr->grp_clks[1];
- if (of_property_read_u32(pdev->dev.of_node, "qcom,deep-nap-timeout",
- &result))
- result = 20;
-
- pwr->deep_nap_timeout = msecs_to_jiffies(result);
- pwr->gx_retention = of_property_read_bool(pdev->dev.of_node,
- "qcom,gx-retention");
- if (pwr->gx_retention) {
- pwr->dummy_mx_clk = clk_get(&pdev->dev, "mx_clk");
- if (IS_ERR(pwr->dummy_mx_clk)) {
- pwr->gx_retention = 0;
- pwr->dummy_mx_clk = NULL;
- KGSL_CORE_ERR("Couldn't get clock: mx_clk\n");
- }
- }
-
/* Getting gfx-bimc-interface-clk frequency */
if (!of_property_read_u32(pdev->dev.of_node,
"qcom,gpu-bimc-interface-clk-freq",
@@ -1838,8 +1710,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->gpu_bimc_int_clk = devm_clk_get(&pdev->dev,
"bimc_gpu_clk");
- pwr->power_flags = BIT(KGSL_PWRFLAGS_RETENTION_ON);
-
if (of_property_read_bool(pdev->dev.of_node, "qcom,no-nap"))
device->pwrctrl.ctrl_flags |= BIT(KGSL_PWRFLAGS_NAP_OFF);
@@ -1986,9 +1856,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_pwrctrl_vbif_init();
- setup_timer(&pwr->deep_nap_timer, kgsl_deep_nap_timer,
- (unsigned long) device);
-
return result;
}
@@ -2049,8 +1916,7 @@ void kgsl_idle_check(struct work_struct *work)
mutex_lock(&device->mutex);
if (device->state == KGSL_STATE_ACTIVE
- || device->state == KGSL_STATE_NAP
- || device->state == KGSL_STATE_DEEP_NAP) {
+ || device->state == KGSL_STATE_NAP) {
if (!atomic_read(&device->active_cnt))
kgsl_pwrctrl_change_state(device,
@@ -2062,8 +1928,7 @@ void kgsl_idle_check(struct work_struct *work)
jiffies +
device->pwrctrl.interval_timeout);
}
- if (device->state != KGSL_STATE_DEEP_NAP)
- kgsl_pwrscale_update(device);
+ kgsl_pwrscale_update(device);
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_idle_check);
@@ -2074,10 +1939,7 @@ void kgsl_timer(unsigned long data)
KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
if (device->requested_state != KGSL_STATE_SUSPEND) {
- if (device->pwrctrl.strtstp_sleepwake)
- kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
- else
- kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
/* Have work run in a non-interrupt context. */
kgsl_schedule_work(&device->idle_check_ws);
}
@@ -2139,7 +2001,7 @@ static void kgsl_pwrctrl_disable(struct kgsl_device *device)
/* Order pwrrail/clk sequence based upon platform */
device->ftbl->regulator_disable(device);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLUMBER);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
@@ -2151,14 +2013,7 @@ static int _init(struct kgsl_device *device)
{
int status = 0;
switch (device->state) {
- case KGSL_STATE_DEEP_NAP:
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- device->pwrctrl.pm_qos_active_latency);
- /* Get the device out of retention */
- kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
- /* fall through */
case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
/* Force power on to do the stop */
status = kgsl_pwrctrl_enable(device);
case KGSL_STATE_ACTIVE:
@@ -2178,7 +2033,7 @@ static int _init(struct kgsl_device *device)
}
/**
- * _wake() - Power up the GPU from a slumber/sleep state
+ * _wake() - Power up the GPU from a slumber state
* @device - Pointer to the kgsl_device struct
*
* Resume the GPU from a lower power state to ACTIVE.
@@ -2204,18 +2059,10 @@ static int _wake(struct kgsl_device *device)
KGSL_DRV_ERR(device, "start failed %d\n", status);
break;
}
- /* fall through */
- case KGSL_STATE_SLEEP:
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
kgsl_pwrscale_wake(device);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* fall through */
- case KGSL_STATE_DEEP_NAP:
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- device->pwrctrl.pm_qos_active_latency);
- /* Get the device out of retention */
- kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
- /* fall through */
case KGSL_STATE_NAP:
/* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
@@ -2237,8 +2084,6 @@ static int _wake(struct kgsl_device *device)
pwr->previous_pwrlevel = pwr->active_pwrlevel;
mod_timer(&device->idle_timer, jiffies +
device->pwrctrl.interval_timeout);
- del_timer_sync(&device->pwrctrl.deep_nap_timer);
-
break;
case KGSL_STATE_AWARE:
/* Enable state before turning on irq */
@@ -2246,7 +2091,6 @@ static int _wake(struct kgsl_device *device)
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
mod_timer(&device->idle_timer, jiffies +
device->pwrctrl.interval_timeout);
- del_timer_sync(&device->pwrctrl.deep_nap_timer);
break;
default:
KGSL_PWR_WARN(device, "unhandled state %s\n",
@@ -2277,9 +2121,7 @@ _aware(struct kgsl_device *device)
status = kgsl_pwrctrl_enable(device);
break;
/* The following 3 cases shouldn't occur, but don't panic. */
- case KGSL_STATE_DEEP_NAP:
case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
status = _wake(device);
case KGSL_STATE_ACTIVE:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
@@ -2317,12 +2159,8 @@ _nap(struct kgsl_device *device)
*/
kgsl_pwrscale_update_stats(device);
- mod_timer(&device->pwrctrl.deep_nap_timer, jiffies +
- device->pwrctrl.deep_nap_timeout);
-
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
- case KGSL_STATE_SLEEP:
case KGSL_STATE_SLUMBER:
break;
case KGSL_STATE_AWARE:
@@ -2336,63 +2174,6 @@ _nap(struct kgsl_device *device)
}
static int
-_deep_nap(struct kgsl_device *device)
-{
- switch (device->state) {
- /*
- * Device is expected to be clock gated to move to
- * a deeper low power state. No other transition is permitted
- */
- case KGSL_STATE_NAP:
- kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_OFF);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_DEEP_NAP);
- break;
- default:
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- break;
- }
- return 0;
-}
-
-static int
-_sleep(struct kgsl_device *device)
-{
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- if (!device->ftbl->is_hw_collapsible(device)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- return -EBUSY;
- }
- /* fall through */
- case KGSL_STATE_NAP:
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrscale_sleep(device);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
- if (device->pwrctrl.l2pc_cpus_mask)
- pm_qos_update_request(
- &device->pwrctrl.l2pc_cpus_qos,
- PM_QOS_DEFAULT_VALUE);
- break;
- case KGSL_STATE_SLUMBER:
- break;
- case KGSL_STATE_AWARE:
- KGSL_PWR_WARN(device,
- "transition AWARE -> SLEEP is not permitted\n");
- default:
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- break;
- }
-
- return 0;
-}
-
-static int
_slumber(struct kgsl_device *device)
{
int status = 0;
@@ -2404,17 +2185,12 @@ _slumber(struct kgsl_device *device)
}
/* fall through */
case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
- case KGSL_STATE_DEEP_NAP:
del_timer_sync(&device->idle_timer);
if (device->pwrctrl.thermal_cycle == CYCLE_ACTIVE) {
device->pwrctrl.thermal_cycle = CYCLE_ENABLE;
del_timer_sync(&device->pwrctrl.thermal_timer);
}
- del_timer_sync(&device->pwrctrl.deep_nap_timer);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- /* Get the device out of retention */
- kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
/* make sure power is on to stop the device*/
status = kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
@@ -2519,18 +2295,12 @@ int kgsl_pwrctrl_change_state(struct kgsl_device *device, int state)
case KGSL_STATE_NAP:
status = _nap(device);
break;
- case KGSL_STATE_SLEEP:
- status = _sleep(device);
- break;
case KGSL_STATE_SLUMBER:
status = _slumber(device);
break;
case KGSL_STATE_SUSPEND:
status = _suspend(device);
break;
- case KGSL_STATE_DEEP_NAP:
- status = _deep_nap(device);
- break;
default:
KGSL_PWR_INFO(device, "bad state request 0x%x\n", state);
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@@ -2576,10 +2346,6 @@ const char *kgsl_pwrstate_to_str(unsigned int state)
return "ACTIVE";
case KGSL_STATE_NAP:
return "NAP";
- case KGSL_STATE_DEEP_NAP:
- return "DEEP_NAP";
- case KGSL_STATE_SLEEP:
- return "SLEEP";
case KGSL_STATE_SUSPEND:
return "SUSPEND";
case KGSL_STATE_SLUMBER:
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 8fd06531aa81..ae21a274fada 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -52,7 +52,6 @@
enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER,
- KGSL_PWR_DEEP_NAP_TIMER,
};
/*
@@ -111,7 +110,6 @@ struct kgsl_regulator {
* struct kgsl_pwrctrl - Power control settings for a KGSL device
* @interrupt_num - The interrupt number for the device
* @grp_clks - Array of clocks structures that we control
- * @dummy_mx_clk - mx clock that is contolled during retention
* @power_flags - Control flags for power
* @pwrlevels - List of supported power levels
* @active_pwrlevel - The currently active power level
@@ -123,7 +121,6 @@ struct kgsl_regulator {
* @num_pwrlevels - number of available power levels
* @interval_timeout - timeout in jiffies to be idle before a power event
* @clock_times - Each GPU frequency's accumulated active time in us
- * @strtstp_sleepwake - true if the device supports low latency GPU start/stop
* @regulators - array of pointers to kgsl_regulator structs
* @pcl - bus scale identifier
* @ocmem - ocmem bus scale identifier
@@ -153,9 +150,6 @@ struct kgsl_regulator {
* @limits - list head for limits
* @limits_lock - spin lock to protect limits list
* @sysfs_pwr_limit - pointer to the sysfs limits node
- * @deep_nap_timer - Timer struct for entering deep nap
- * @deep_nap_timeout - Timeout for entering deep nap
- * @gx_retention - true if retention voltage is allowed
* isense_clk_indx - index of isense clock, 0 if no isense
* isense_clk_on_level - isense clock rate is XO rate below this level.
*/
@@ -163,7 +157,6 @@ struct kgsl_regulator {
struct kgsl_pwrctrl {
int interrupt_num;
struct clk *grp_clks[KGSL_MAX_CLKS];
- struct clk *dummy_mx_clk;
struct clk *gpu_bimc_int_clk;
int isense_clk_indx;
int isense_clk_on_level;
@@ -180,7 +173,6 @@ struct kgsl_pwrctrl {
unsigned int num_pwrlevels;
unsigned long interval_timeout;
u64 clock_times[KGSL_MAX_PWRLEVELS];
- bool strtstp_sleepwake;
struct kgsl_regulator regulators[KGSL_MAX_REGULATORS];
uint32_t pcl;
uint32_t ocmem_pcl;
@@ -210,9 +202,6 @@ struct kgsl_pwrctrl {
struct list_head limits;
spinlock_t limits_lock;
struct kgsl_pwr_limit *sysfs_pwr_limit;
- struct timer_list deep_nap_timer;
- uint32_t deep_nap_timeout;
- bool gx_retention;
unsigned int gpu_bimc_int_clk_freq;
bool gpu_bimc_interface_enabled;
};
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index bac09175cf12..1b51eb591036 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -221,11 +221,6 @@ DEFINE_EVENT(kgsl_pwr_template, kgsl_rail,
TP_ARGS(device, on)
);
-DEFINE_EVENT(kgsl_pwr_template, kgsl_retention_clk,
- TP_PROTO(struct kgsl_device *device, int on),
- TP_ARGS(device, on)
-);
-
TRACE_EVENT(kgsl_clk,
TP_PROTO(struct kgsl_device *device, unsigned int on,
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 3228282dc49c..8c92a564299d 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -13,6 +13,14 @@ menuconfig CORESIGHT
if CORESIGHT
+config CORESIGHT_EVENT
+ tristate "CoreSight Event driver"
+ help
+ This driver provides support for registering with various events
+ and performing CoreSight actions like aborting trace on their
+ occurrence. These events can be controlled by using module
+ parameters.
+
config CORESIGHT_CSR
bool "CoreSight Slave Register driver"
help
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index ee3a77fede53..09433897b6a2 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CORESIGHT) += coresight.o
obj-$(CONFIG_OF) += of_coresight.o
obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
+obj-$(CONFIG_CORESIGHT_EVENT) += coresight-event.o
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
diff --git a/drivers/hwtracing/coresight/coresight-event.c b/drivers/hwtracing/coresight/coresight-event.c
new file mode 100644
index 000000000000..0bced010d4c5
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-event.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/coresight.h>
+
+#include <trace/events/exception.h>
+
+static int event_abort_enable;
+static int event_abort_set(const char *val, struct kernel_param *kp);
+module_param_call(event_abort_enable, event_abort_set, param_get_int,
+ &event_abort_enable, 0644);
+
+static int event_abort_early_panic = 1;
+static int event_abort_on_panic_set(const char *val, struct kernel_param *kp);
+module_param_call(event_abort_early_panic, event_abort_on_panic_set,
+ param_get_int, &event_abort_early_panic, 0644);
+
+static void event_abort_user_fault(void *ignore,
+ struct task_struct *task,
+ unsigned long addr,
+ unsigned int fsr)
+{
+ coresight_abort();
+ pr_debug("coresight_event: task_name: %s, addr: %lu, fsr:%u",
+ (char *)task->comm, addr, fsr);
+}
+
+static void event_abort_undef_instr(void *ignore,
+ struct pt_regs *regs,
+ void *pc)
+{
+ if (user_mode(regs)) {
+ coresight_abort();
+ pr_debug("coresight_event: pc: %p", pc);
+ }
+}
+
+static void event_abort_unhandled_abort(void *ignore,
+ struct pt_regs *regs,
+ unsigned long addr,
+ unsigned int fsr)
+{
+ if (user_mode(regs)) {
+ coresight_abort();
+ pr_debug("coresight_event: addr: %lu, fsr:%u", addr, fsr);
+ }
+}
+
+static void event_abort_kernel_panic(void *ignore, long state)
+{
+ coresight_abort();
+}
+
+static int event_abort_register(void)
+{
+ int ret;
+
+ ret = register_trace_user_fault(event_abort_user_fault, NULL);
+ if (ret)
+ goto err_usr_fault;
+ ret = register_trace_undef_instr(event_abort_undef_instr, NULL);
+ if (ret)
+ goto err_undef_instr;
+ ret = register_trace_unhandled_abort(event_abort_unhandled_abort, NULL);
+ if (ret)
+ goto err_unhandled_abort;
+
+ return 0;
+
+err_unhandled_abort:
+ unregister_trace_undef_instr(event_abort_undef_instr, NULL);
+err_undef_instr:
+ unregister_trace_user_fault(event_abort_user_fault, NULL);
+err_usr_fault:
+ return ret;
+}
+
+static void event_abort_unregister(void)
+{
+ unregister_trace_user_fault(event_abort_user_fault, NULL);
+ unregister_trace_undef_instr(event_abort_undef_instr, NULL);
+ unregister_trace_unhandled_abort(event_abort_unhandled_abort, NULL);
+}
+
+static int event_abort_set(const char *val, struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("coresight_event: error setting value %d\n", ret);
+ return ret;
+ }
+
+ if (event_abort_enable)
+ ret = event_abort_register();
+ else
+ event_abort_unregister();
+
+ return ret;
+}
+
+static int event_abort_on_panic_set(const char *val, struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("coresight_event: error setting val on panic %d\n", ret);
+ return ret;
+ }
+
+ if (event_abort_early_panic) {
+ unregister_trace_kernel_panic_late(event_abort_kernel_panic,
+ NULL);
+ ret = register_trace_kernel_panic(event_abort_kernel_panic,
+ NULL);
+ if (ret)
+ goto err;
+ } else {
+ unregister_trace_kernel_panic(event_abort_kernel_panic, NULL);
+ ret = register_trace_kernel_panic_late(event_abort_kernel_panic,
+ NULL);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ pr_err("coresight_event: error registering panic event %d\n", ret);
+ return ret;
+}
+
+static int __init event_init(void)
+{
+ int ret;
+
+ ret = register_trace_kernel_panic(event_abort_kernel_panic, NULL);
+ if (ret) {
+ /* We do not want to fail module init. This module can still
+ * be used to register other abort events.
+ */
+ pr_err("coresight_event: error registering on panic %d\n", ret);
+ }
+ return 0;
+}
+module_init(event_init);
+
+static void __exit event_exit(void)
+{
+ unregister_trace_kernel_panic(event_abort_kernel_panic, NULL);
+}
+module_exit(event_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Coresight Event driver to abort tracing");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 10e50df1e6d5..d48d8485f979 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1064,9 +1064,44 @@ static void tmc_disable_link(struct coresight_device *csdev, int inport,
tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
}
+static void tmc_abort(struct coresight_device *csdev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ enum tmc_mode mode;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading)
+ goto out0;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_disable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_disable_hw(drvdata);
+ else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ __tmc_etr_disable_to_bam(drvdata);
+ } else {
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode == TMC_MODE_CIRCULAR_BUFFER)
+ tmc_etb_disable_hw(drvdata);
+ else
+ goto out1;
+ }
+out0:
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC aborted\n");
+ return;
+out1:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
static const struct coresight_ops_sink tmc_sink_ops = {
.enable = tmc_enable_sink,
.disable = tmc_disable_sink,
+ .abort = tmc_abort,
};
static const struct coresight_ops_link tmc_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7214efd10db5..7baa1e750a23 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -103,9 +103,19 @@ static void tpiu_disable(struct coresight_device *csdev)
dev_info(drvdata->dev, "TPIU disabled\n");
}
+static void tpiu_abort(struct coresight_device *csdev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ tpiu_disable_hw(drvdata);
+
+ dev_info(drvdata->dev, "TPIU aborted\n");
+}
+
static const struct coresight_ops_sink tpiu_sink_ops = {
.enable = tpiu_enable,
.disable = tpiu_disable,
+ .abort = tpiu_abort,
};
static const struct coresight_ops tpiu_cs_ops = {
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index a4d2ac601556..c34599c0594d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -383,6 +383,25 @@ out:
}
EXPORT_SYMBOL_GPL(coresight_disable);
+void coresight_abort(void)
+{
+ if (!mutex_trylock(&coresight_mutex)) {
+ pr_err_ratelimited("coresight: abort could not be processed\n");
+ return;
+ }
+ if (!curr_sink)
+ goto out;
+
+ if (curr_sink->enable && sink_ops(curr_sink)->abort) {
+ sink_ops(curr_sink)->abort(curr_sink);
+ curr_sink->enable = false;
+ }
+
+out:
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_abort);
+
static int coresight_disable_all_source(struct device *dev, void *data)
{
struct coresight_device *csdev;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 69028bd45fdd..7c4a7c7c16e7 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1110,6 +1110,15 @@ config TOUCHSCREEN_COLIBRI_VF50
To compile this driver as a module, choose M here: the
module will be called colibri_vf50_ts.
+config TOUCHSCREEN_FT5X06_PSENSOR
+ tristate "FocalTech proximity feature support"
+ depends on TOUCHSCREEN_FT5X06 && SENSORS
+ help
+ Say Y here if you want to support ft5x06's proximity
+ feature.
+
+ If unsure, say N.
+
config TOUCHSCREEN_MSTAR21XX
tristate "Mstar touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index 4f5b5b4ecd7f..beb86c9dea11 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -29,6 +29,7 @@
#include <linux/regulator/consumer.h>
#include <linux/firmware.h>
#include <linux/debugfs.h>
+#include <linux/sensors.h>
#include <linux/input/ft5x06_ts.h>
#if defined(CONFIG_FB)
@@ -65,15 +66,28 @@
#define FT_REG_ID 0xA3
#define FT_REG_PMODE 0xA5
#define FT_REG_FW_VER 0xA6
+#define FT_REG_FW_VENDOR_ID 0xA8
#define FT_REG_POINT_RATE 0x88
#define FT_REG_THGROUP 0x80
#define FT_REG_ECC 0xCC
#define FT_REG_RESET_FW 0x07
-#define FT_REG_FW_MAJ_VER 0xB1
#define FT_REG_FW_MIN_VER 0xB2
#define FT_REG_FW_SUB_MIN_VER 0xB3
-/* power register bits */
+/* psensor register address*/
+#define FT_REG_PSENSOR_ENABLE 0xB0
+#define FT_REG_PSENSOR_STATUS 0x01
+
+/* psensor register bits*/
+#define FT_PSENSOR_ENABLE_MASK 0x01
+#define FT_PSENSOR_STATUS_NEAR 0xC0
+#define FT_PSENSOR_STATUS_FAR 0xE0
+#define FT_PSENSOR_FAR_TO_NEAR 0
+#define FT_PSENSOR_NEAR_TO_FAR 1
+#define FT_PSENSOR_ORIGINAL_STATE_FAR 1
+#define FT_PSENSOR_WAKEUP_TIMEOUT 100
+
+/* power register bits*/
#define FT_PMODE_ACTIVE 0x00
#define FT_PMODE_MONITOR 0x01
#define FT_PMODE_STANDBY 0x02
@@ -104,6 +118,7 @@
#define FT5316_ID 0x0A
#define FT5306I_ID 0x55
#define FT6X06_ID 0x06
+#define FT6X36_ID 0x36
#define FT_UPGRADE_AA 0xAA
#define FT_UPGRADE_55 0x55
@@ -115,11 +130,24 @@
#define FT_FW_FILE_MAJ_VER(x) ((x)->data[(x)->size - 2])
#define FT_FW_FILE_MIN_VER(x) 0
#define FT_FW_FILE_SUB_MIN_VER(x) 0
-
-#define FT_FW_CHECK(x) \
+#define FT_FW_FILE_VENDOR_ID(x) ((x)->data[(x)->size - 1])
+
+#define FT_FW_FILE_MAJ_VER_FT6X36(x) ((x)->data[0x10a])
+#define FT_FW_FILE_VENDOR_ID_FT6X36(x) ((x)->data[0x108])
+
+/**
+* Application data verification will be run before upgrade flow.
+* Firmware image stores some flags with negative and positive value
+* in corresponding addresses, we need pick them out do some check to
+* make sure the application data is valid.
+*/
+#define FT_FW_CHECK(x, ts_data) \
+ (ts_data->family_id == FT6X36_ID ? \
+ (((x)->data[0x104] ^ (x)->data[0x105]) == 0xFF \
+ && ((x)->data[0x106] ^ (x)->data[0x107]) == 0xFF) : \
(((x)->data[(x)->size - 8] ^ (x)->data[(x)->size - 6]) == 0xFF \
- && (((x)->data[(x)->size - 7] ^ (x)->data[(x)->size - 5]) == 0xFF \
- && (((x)->data[(x)->size - 3] ^ (x)->data[(x)->size - 4]) == 0xFF)))
+ && ((x)->data[(x)->size - 7] ^ (x)->data[(x)->size - 5]) == 0xFF \
+ && ((x)->data[(x)->size - 3] ^ (x)->data[(x)->size - 4]) == 0xFF))
#define FT_MAX_TRIES 5
#define FT_RETRY_DLY 20
@@ -131,8 +159,9 @@
#define FT_FW_PKT_DLY_MS 20
#define FT_FW_LAST_PKT 0x6ffa
#define FT_EARSE_DLY_MS 100
+#define FT_55_AA_DLY_NS 5000
-#define FT_UPGRADE_LOOP 10
+#define FT_UPGRADE_LOOP 30
#define FT_CAL_START 0x04
#define FT_CAL_FIN 0x00
#define FT_CAL_STORE 0x05
@@ -142,6 +171,34 @@
#define FT_INFO_MAX_LEN 512
+#define FT_BLOADER_SIZE_OFF 12
+#define FT_BLOADER_NEW_SIZE 30
+#define FT_DATA_LEN_OFF_OLD_FW 8
+#define FT_DATA_LEN_OFF_NEW_FW 14
+#define FT_FINISHING_PKT_LEN_OLD_FW 6
+#define FT_FINISHING_PKT_LEN_NEW_FW 12
+#define FT_MAGIC_BLOADER_Z7 0x7bfa
+#define FT_MAGIC_BLOADER_LZ4 0x6ffa
+#define FT_MAGIC_BLOADER_GZF_30 0x7ff4
+#define FT_MAGIC_BLOADER_GZF 0x7bf4
+
+#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_ts_release"
+
+enum {
+ FT_BLOADER_VERSION_LZ4 = 0,
+ FT_BLOADER_VERSION_Z7 = 1,
+ FT_BLOADER_VERSION_GZF = 2,
+};
+
+enum {
+ FT_FT5336_FAMILY_ID_0x11 = 0x11,
+ FT_FT5336_FAMILY_ID_0x12 = 0x12,
+ FT_FT5336_FAMILY_ID_0x13 = 0x13,
+ FT_FT5336_FAMILY_ID_0x14 = 0x14,
+};
+
#define FT_STORE_TS_INFO(buf, id, name, max_tch, group_id, fw_vkey_support, \
fw_name, fw_maj, fw_min, fw_sub_min) \
snprintf(buf, FT_INFO_MAX_LEN, \
@@ -164,6 +221,7 @@ struct ft5x06_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
const struct ft5x06_ts_platform_data *pdata;
+ struct ft5x06_psensor_platform_data *psensor_pdata;
struct regulator *vdd;
struct regulator *vcc_i2c;
char fw_name[FT_FW_NAME_MAX_LEN];
@@ -176,13 +234,41 @@ struct ft5x06_ts_data {
u8 *tch_data;
u32 tch_data_len;
u8 fw_ver[3];
+ u8 fw_vendor_id;
#if defined(CONFIG_FB)
struct notifier_block fb_notif;
#elif defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend early_suspend;
#endif
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
};
+static struct sensors_classdev __maybe_unused sensors_proximity_cdev = {
+ .name = "ft5x06-proximity",
+ .vendor = "FocalTech",
+ .version = 1,
+ .handle = SENSORS_PROXIMITY_HANDLE,
+ .type = SENSOR_TYPE_PROXIMITY,
+ .max_range = "5.0",
+ .resolution = "5.0",
+ .sensor_power = "0.1",
+ .min_delay = 0,
+ .fifo_reserved_event_count = 0,
+ .fifo_max_event_count = 0,
+ .enabled = 0,
+ .delay_msec = 200,
+ .sensors_enable = NULL,
+ .sensors_poll_delay = NULL,
+};
+
+static inline bool ft5x06_psensor_support_enabled(void)
+{
+ return config_enabled(CONFIG_TOUCHSCREEN_FT5X06_PSENSOR);
+}
+
static int ft5x06_i2c_read(struct i2c_client *client, char *writebuf,
int writelen, char *readbuf, int readlen)
{
@@ -258,13 +344,103 @@ static int ft5x0x_read_reg(struct i2c_client *client, u8 addr, u8 *val)
return ft5x06_i2c_read(client, &addr, 1, val, 1);
}
+#ifdef CONFIG_TOUCHSCREEN_FT5X06_PSENSOR
+static void ft5x06_psensor_enable(struct ft5x06_ts_data *data, int enable)
+{
+ u8 state;
+ int ret = -1;
+
+ if (data->client == NULL)
+ return;
+
+ ft5x0x_read_reg(data->client, FT_REG_PSENSOR_ENABLE, &state);
+ if (enable)
+ state |= FT_PSENSOR_ENABLE_MASK;
+ else
+ state &= ~FT_PSENSOR_ENABLE_MASK;
+
+ ret = ft5x0x_write_reg(data->client, FT_REG_PSENSOR_ENABLE, state);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "write psensor switch command failed\n");
+}
+
+static int ft5x06_psensor_enable_set(struct sensors_classdev *sensors_cdev,
+ unsigned int enable)
+{
+ struct ft5x06_psensor_platform_data *psensor_pdata =
+ container_of(sensors_cdev,
+ struct ft5x06_psensor_platform_data, ps_cdev);
+ struct ft5x06_ts_data *data = psensor_pdata->data;
+ struct input_dev *input_dev = data->psensor_pdata->input_psensor_dev;
+
+ mutex_lock(&input_dev->mutex);
+ ft5x06_psensor_enable(data, enable);
+ psensor_pdata->tp_psensor_data = FT_PSENSOR_ORIGINAL_STATE_FAR;
+ if (enable)
+ psensor_pdata->tp_psensor_opened = 1;
+ else
+ psensor_pdata->tp_psensor_opened = 0;
+ mutex_unlock(&input_dev->mutex);
+ return enable;
+}
+
+static int ft5x06_read_tp_psensor_data(struct ft5x06_ts_data *data)
+{
+ u8 psensor_status;
+ char tmp;
+ int ret = 0;
+
+ ft5x0x_read_reg(data->client,
+ FT_REG_PSENSOR_STATUS, &psensor_status);
+
+ tmp = data->psensor_pdata->tp_psensor_data;
+ if (psensor_status == FT_PSENSOR_STATUS_NEAR)
+ data->psensor_pdata->tp_psensor_data =
+ FT_PSENSOR_FAR_TO_NEAR;
+ else if (psensor_status == FT_PSENSOR_STATUS_FAR)
+ data->psensor_pdata->tp_psensor_data =
+ FT_PSENSOR_NEAR_TO_FAR;
+
+ if (tmp != data->psensor_pdata->tp_psensor_data) {
+ dev_info(&data->client->dev,
+ "%s sensor data changed\n", __func__);
+ ret = 1;
+ }
+ return ret;
+}
+#else
+static int ft5x06_psensor_enable_set(struct sensors_classdev *sensors_cdev,
+ unsigned int enable)
+{
+ return enable;
+}
+
+static int ft5x06_read_tp_psensor_data(struct ft5x06_ts_data *data)
+{
+ return 0;
+}
+#endif
+
+static void ft5x06_update_fw_vendor_id(struct ft5x06_ts_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 reg_addr;
+ int err;
+
+ reg_addr = FT_REG_FW_VENDOR_ID;
+ err = ft5x06_i2c_read(client, &reg_addr, 1, &data->fw_vendor_id, 1);
+ if (err < 0)
+ dev_err(&client->dev, "fw vendor id read failed");
+}
+
static void ft5x06_update_fw_ver(struct ft5x06_ts_data *data)
{
struct i2c_client *client = data->client;
u8 reg_addr;
int err;
- reg_addr = FT_REG_FW_MAJ_VER;
+ reg_addr = FT_REG_FW_VER;
err = ft5x06_i2c_read(client, &reg_addr, 1, &data->fw_ver[0], 1);
if (err < 0)
dev_err(&client->dev, "fw major version read failed");
@@ -288,8 +464,8 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
struct ft5x06_ts_data *data = dev_id;
struct input_dev *ip_dev;
int rc, i;
- u32 id, x, y, pressure, status, num_touches;
- u8 reg = 0x00, *buf;
+ u32 id, x, y, status, num_touches;
+ u8 reg, *buf;
bool update_input = false;
if (!data) {
@@ -300,8 +476,31 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
ip_dev = data->input_dev;
buf = data->tch_data;
- rc = ft5x06_i2c_read(data->client, &reg, 1,
- buf, data->tch_data_len);
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
+ data->psensor_pdata->tp_psensor_opened) {
+ rc = ft5x06_read_tp_psensor_data(data);
+ if (rc) {
+ if (data->suspended)
+ pm_wakeup_event(&data->client->dev,
+ FT_PSENSOR_WAKEUP_TIMEOUT);
+ input_report_abs(data->psensor_pdata->input_psensor_dev,
+ ABS_DISTANCE,
+ data->psensor_pdata->tp_psensor_data);
+ input_sync(data->psensor_pdata->input_psensor_dev);
+ if (data->suspended)
+ return IRQ_HANDLED;
+ }
+ if (data->suspended)
+ return IRQ_HANDLED;
+ }
+
+ /**
+ * Read touch data start from register FT_REG_DEV_MODE.
+ * The touch x/y value start from FT_TOUCH_X_H/L_POS and
+ * FT_TOUCH_Y_H/L_POS in buf.
+ */
+ reg = FT_REG_DEV_MODE;
+ rc = ft5x06_i2c_read(data->client, &reg, 1, buf, data->tch_data_len);
if (rc < 0) {
dev_err(&data->client->dev, "%s: read data fail\n", __func__);
return IRQ_HANDLED;
@@ -329,11 +528,9 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
input_mt_slot(ip_dev, id);
if (status == FT_TOUCH_DOWN || status == FT_TOUCH_CONTACT) {
- pressure = FT_PRESS;
input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 1);
input_report_abs(ip_dev, ABS_MT_POSITION_X, x);
input_report_abs(ip_dev, ABS_MT_POSITION_Y, y);
- input_report_abs(ip_dev, ABS_MT_PRESSURE, pressure);
} else {
input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 0);
}
@@ -347,6 +544,79 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int ft5x06_gpio_configure(struct ft5x06_ts_data *data, bool on)
+{
+ int err = 0;
+
+ if (on) {
+ if (gpio_is_valid(data->pdata->irq_gpio)) {
+ err = gpio_request(data->pdata->irq_gpio,
+ "ft5x06_irq_gpio");
+ if (err) {
+ dev_err(&data->client->dev,
+ "irq gpio request failed");
+ goto err_irq_gpio_req;
+ }
+
+ err = gpio_direction_input(data->pdata->irq_gpio);
+ if (err) {
+ dev_err(&data->client->dev,
+ "set_direction for irq gpio failed\n");
+ goto err_irq_gpio_dir;
+ }
+ }
+
+ if (gpio_is_valid(data->pdata->reset_gpio)) {
+ err = gpio_request(data->pdata->reset_gpio,
+ "ft5x06_reset_gpio");
+ if (err) {
+ dev_err(&data->client->dev,
+ "reset gpio request failed");
+ goto err_irq_gpio_dir;
+ }
+
+ err = gpio_direction_output(data->pdata->reset_gpio, 0);
+ if (err) {
+ dev_err(&data->client->dev,
+ "set_direction for reset gpio failed\n");
+ goto err_reset_gpio_dir;
+ }
+ msleep(data->pdata->hard_rst_dly);
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+ }
+
+ return 0;
+ }
+ if (gpio_is_valid(data->pdata->irq_gpio))
+ gpio_free(data->pdata->irq_gpio);
+ if (gpio_is_valid(data->pdata->reset_gpio)) {
+ /*
+ * This is intended to save leakage current
+ * only. Even if the call(gpio_direction_input)
+ * fails, only leakage current will be more but
+ * functionality will not be affected.
+ */
+ err = gpio_direction_input(data->pdata->reset_gpio);
+ if (err) {
+ dev_err(&data->client->dev,
+ "unable to set direction for gpio [%d]\n",
+ data->pdata->irq_gpio);
+ }
+ gpio_free(data->pdata->reset_gpio);
+ }
+
+ return 0;
+
+err_reset_gpio_dir:
+ if (gpio_is_valid(data->pdata->reset_gpio))
+ gpio_free(data->pdata->reset_gpio);
+err_irq_gpio_dir:
+ if (gpio_is_valid(data->pdata->irq_gpio))
+ gpio_free(data->pdata->irq_gpio);
+err_irq_gpio_req:
+ return err;
+}
+
static int ft5x06_power_on(struct ft5x06_ts_data *data, bool on)
{
int rc;
@@ -382,7 +652,11 @@ power_off:
if (rc) {
dev_err(&data->client->dev,
"Regulator vcc_i2c disable failed rc=%d\n", rc);
- regulator_enable(data->vdd);
+ rc = regulator_enable(data->vdd);
+ if (rc) {
+ dev_err(&data->client->dev,
+ "Regulator vdd enable failed rc=%d\n", rc);
+ }
}
return rc;
@@ -455,6 +729,60 @@ pwr_deinit:
return 0;
}
+static int ft5x06_ts_pinctrl_init(struct ft5x06_ts_data *ft5x06_data)
+{
+ int retval;
+
+ /* Get pinctrl if target uses pinctrl */
+ ft5x06_data->ts_pinctrl = devm_pinctrl_get(&(ft5x06_data->client->dev));
+ if (IS_ERR_OR_NULL(ft5x06_data->ts_pinctrl)) {
+ retval = PTR_ERR(ft5x06_data->ts_pinctrl);
+ dev_dbg(&ft5x06_data->client->dev,
+ "Target does not use pinctrl %d\n", retval);
+ goto err_pinctrl_get;
+ }
+
+ ft5x06_data->pinctrl_state_active
+ = pinctrl_lookup_state(ft5x06_data->ts_pinctrl,
+ PINCTRL_STATE_ACTIVE);
+ if (IS_ERR_OR_NULL(ft5x06_data->pinctrl_state_active)) {
+ retval = PTR_ERR(ft5x06_data->pinctrl_state_active);
+ dev_err(&ft5x06_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ ft5x06_data->pinctrl_state_suspend
+ = pinctrl_lookup_state(ft5x06_data->ts_pinctrl,
+ PINCTRL_STATE_SUSPEND);
+ if (IS_ERR_OR_NULL(ft5x06_data->pinctrl_state_suspend)) {
+ retval = PTR_ERR(ft5x06_data->pinctrl_state_suspend);
+ dev_err(&ft5x06_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ ft5x06_data->pinctrl_state_release
+ = pinctrl_lookup_state(ft5x06_data->ts_pinctrl,
+ PINCTRL_STATE_RELEASE);
+ if (IS_ERR_OR_NULL(ft5x06_data->pinctrl_state_release)) {
+ retval = PTR_ERR(ft5x06_data->pinctrl_state_release);
+ dev_dbg(&ft5x06_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_RELEASE, retval);
+ }
+
+ return 0;
+
+err_pinctrl_lookup:
+ devm_pinctrl_put(ft5x06_data->ts_pinctrl);
+err_pinctrl_get:
+ ft5x06_data->ts_pinctrl = NULL;
+ return retval;
+}
+
#ifdef CONFIG_PM
static int ft5x06_ts_suspend(struct device *dev)
{
@@ -472,6 +800,16 @@ static int ft5x06_ts_suspend(struct device *dev)
return 0;
}
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
+ device_may_wakeup(dev) &&
+ data->psensor_pdata->tp_psensor_opened) {
+ err = enable_irq_wake(data->client->irq);
+ if (err)
+ dev_err(&data->client->dev,
+ "%s: set_irq_wake failed\n", __func__);
+ data->suspended = true;
+ return err;
+ }
disable_irq(data->client->irq);
/* release all touches */
@@ -479,7 +817,7 @@ static int ft5x06_ts_suspend(struct device *dev)
input_mt_slot(data->input_dev, i);
input_mt_report_slot_state(data->input_dev, MT_TOOL_FINGER, 0);
}
- input_report_key(data->input_dev, BTN_TOUCH, 0);
+ input_mt_report_pointer_emulation(data->input_dev, false);
input_sync(data->input_dev);
if (gpio_is_valid(data->pdata->reset_gpio)) {
@@ -502,10 +840,40 @@ static int ft5x06_ts_suspend(struct device *dev)
}
}
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_suspend);
+ if (err < 0)
+ dev_err(dev, "Cannot get suspend pinctrl state\n");
+ }
+
+ err = ft5x06_gpio_configure(data, false);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "failed to put gpios in suspend state\n");
+ goto gpio_configure_fail;
+ }
+
data->suspended = true;
return 0;
+gpio_configure_fail:
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_active);
+ if (err < 0)
+ dev_err(dev, "Cannot get active pinctrl state\n");
+ }
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(true);
+ if (err)
+ dev_err(dev, "power on failed");
+ } else {
+ err = ft5x06_power_on(data, true);
+ if (err)
+ dev_err(dev, "power on failed");
+ }
pwr_off_fail:
if (gpio_is_valid(data->pdata->reset_gpio)) {
gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
@@ -526,6 +894,18 @@ static int ft5x06_ts_resume(struct device *dev)
return 0;
}
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
+ device_may_wakeup(dev) &&
+ data->psensor_pdata->tp_psensor_opened) {
+ err = disable_irq_wake(data->client->irq);
+ if (err)
+ dev_err(&data->client->dev,
+ "%s: disable_irq_wake failed\n",
+ __func__);
+ data->suspended = false;
+ return err;
+ }
+
if (data->pdata->power_on) {
err = data->pdata->power_on(true);
if (err) {
@@ -540,6 +920,20 @@ static int ft5x06_ts_resume(struct device *dev)
}
}
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_active);
+ if (err < 0)
+ dev_err(dev, "Cannot get active pinctrl state\n");
+ }
+
+ err = ft5x06_gpio_configure(data, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "failed to put gpios in resue state\n");
+ goto err_gpio_configuration;
+ }
+
if (gpio_is_valid(data->pdata->reset_gpio)) {
gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
msleep(data->pdata->hard_rst_dly);
@@ -553,8 +947,46 @@ static int ft5x06_ts_resume(struct device *dev)
data->suspended = false;
return 0;
+
+err_gpio_configuration:
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_suspend);
+ if (err < 0)
+ dev_err(dev, "Cannot get suspend pinctrl state\n");
+ }
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(false);
+ if (err)
+ dev_err(dev, "power off failed");
+ } else {
+ err = ft5x06_power_on(data, false);
+ if (err)
+ dev_err(dev, "power off failed");
+ }
+ return err;
+}
+
+static const struct dev_pm_ops ft5x06_ts_pm_ops = {
+#if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND))
+ .suspend = ft5x06_ts_suspend,
+ .resume = ft5x06_ts_resume,
+#endif
+};
+
+#else
+static int ft5x06_ts_suspend(struct device *dev)
+{
+ return 0;
}
+static int ft5x06_ts_resume(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
@@ -595,14 +1027,6 @@ static void ft5x06_ts_late_resume(struct early_suspend *handler)
}
#endif
-static const struct dev_pm_ops ft5x06_ts_pm_ops = {
-#if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND))
- .suspend = ft5x06_ts_suspend,
- .resume = ft5x06_ts_resume,
-#endif
-};
-#endif
-
static int ft5x06_auto_cal(struct i2c_client *client)
{
struct ft5x06_ts_data *data = i2c_get_clientdata(client);
@@ -647,13 +1071,23 @@ static int ft5x06_fw_upgrade_start(struct i2c_client *client,
u8 reset_reg;
u8 w_buf[FT_MAX_WR_BUF] = {0}, r_buf[FT_MAX_RD_BUF] = {0};
u8 pkt_buf[FT_FW_PKT_LEN + FT_FW_PKT_META_LEN];
- int rc, i, j, temp;
+ int i, j, temp;
u32 pkt_num, pkt_len;
+ u8 is_5336_new_bootloader = false;
+ u8 is_5336_fwsize_30 = false;
u8 fw_ecc;
+ /* determine firmware size */
+ if (*(data + data_len - FT_BLOADER_SIZE_OFF) == FT_BLOADER_NEW_SIZE)
+ is_5336_fwsize_30 = true;
+ else
+ is_5336_fwsize_30 = false;
+
for (i = 0, j = 0; i < FT_UPGRADE_LOOP; i++) {
+ msleep(FT_EARSE_DLY_MS);
/* reset - write 0xaa and 0x55 to reset register */
- if (ts_data->family_id == FT6X06_ID)
+ if (ts_data->family_id == FT6X06_ID
+ || ts_data->family_id == FT6X36_ID)
reset_reg = FT_RST_CMD_REG2;
else
reset_reg = FT_RST_CMD_REG1;
@@ -662,16 +1096,17 @@ static int ft5x06_fw_upgrade_start(struct i2c_client *client,
msleep(info.delay_aa);
ft5x0x_write_reg(client, reset_reg, FT_UPGRADE_55);
- msleep(info.delay_55);
+ if (i <= (FT_UPGRADE_LOOP / 2))
+ msleep(info.delay_55 + i * 3);
+ else
+ msleep(info.delay_55 - (i - (FT_UPGRADE_LOOP / 2)) * 2);
/* Enter upgrade mode */
w_buf[0] = FT_UPGRADE_55;
- w_buf[1] = FT_UPGRADE_AA;
- do {
- j++;
- rc = ft5x06_i2c_write(client, w_buf, 2);
- msleep(FT_RETRY_DLY);
- } while (rc <= 0 && j < FT_MAX_TRIES);
+ ft5x06_i2c_write(client, w_buf, 1);
+ usleep_range(FT_55_AA_DLY_NS, FT_55_AA_DLY_NS + 1);
+ w_buf[0] = FT_UPGRADE_AA;
+ ft5x06_i2c_write(client, w_buf, 1);
/* check READ_ID */
msleep(info.delay_readid);
@@ -684,7 +1119,9 @@ static int ft5x06_fw_upgrade_start(struct i2c_client *client,
if (r_buf[0] != info.upgrade_id_1
|| r_buf[1] != info.upgrade_id_2) {
- dev_err(&client->dev, "Upgrade ID mismatch(%d)\n", i);
+ dev_err(&client->dev, "Upgrade ID mismatch(%d), IC=0x%x 0x%x, info=0x%x 0x%x\n",
+ i, r_buf[0], r_buf[1],
+ info.upgrade_id_1, info.upgrade_id_2);
} else
break;
}
@@ -694,17 +1131,44 @@ static int ft5x06_fw_upgrade_start(struct i2c_client *client,
return -EIO;
}
+ w_buf[0] = 0xcd;
+ ft5x06_i2c_read(client, w_buf, 1, r_buf, 1);
+
+ if (r_buf[0] <= 4)
+ is_5336_new_bootloader = FT_BLOADER_VERSION_LZ4;
+ else if (r_buf[0] == 7)
+ is_5336_new_bootloader = FT_BLOADER_VERSION_Z7;
+ else if (r_buf[0] >= 0x0f &&
+ ((ts_data->family_id == FT_FT5336_FAMILY_ID_0x11) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x12) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x13) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x14)))
+ is_5336_new_bootloader = FT_BLOADER_VERSION_GZF;
+ else
+ is_5336_new_bootloader = FT_BLOADER_VERSION_LZ4;
+
+ dev_dbg(&client->dev, "bootloader type=%d, r_buf=0x%x, family_id=0x%x\n",
+ is_5336_new_bootloader, r_buf[0], ts_data->family_id);
+ /* is_5336_new_bootloader = FT_BLOADER_VERSION_GZF; */
+
/* erase app and panel paramenter area */
w_buf[0] = FT_ERASE_APP_REG;
ft5x06_i2c_write(client, w_buf, 1);
msleep(info.delay_erase_flash);
- w_buf[0] = FT_ERASE_PANEL_REG;
- ft5x06_i2c_write(client, w_buf, 1);
+ if (is_5336_fwsize_30) {
+ w_buf[0] = FT_ERASE_PANEL_REG;
+ ft5x06_i2c_write(client, w_buf, 1);
+ }
msleep(FT_EARSE_DLY_MS);
/* program firmware */
- data_len = data_len - 8;
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_LZ4
+ || is_5336_new_bootloader == FT_BLOADER_VERSION_Z7)
+ data_len = data_len - FT_DATA_LEN_OFF_OLD_FW;
+ else
+ data_len = data_len - FT_DATA_LEN_OFF_NEW_FW;
+
pkt_num = (data_len) / FT_FW_PKT_LEN;
pkt_len = FT_FW_PKT_LEN;
pkt_buf[0] = FT_FW_START_REG;
@@ -747,17 +1211,45 @@ static int ft5x06_fw_upgrade_start(struct i2c_client *client,
}
/* send the finishing packet */
- for (i = 0; i < 6; i++) {
- temp = FT_FW_LAST_PKT + i;
- pkt_buf[2] = (u8) (temp >> 8);
- pkt_buf[3] = (u8) temp;
- temp = 1;
- pkt_buf[4] = (u8) (temp >> 8);
- pkt_buf[5] = (u8) temp;
- pkt_buf[6] = data[data_len + i];
- fw_ecc ^= pkt_buf[6];
- ft5x06_i2c_write(client, pkt_buf, temp + FT_FW_PKT_META_LEN);
- msleep(FT_FW_PKT_DLY_MS);
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_LZ4 ||
+ is_5336_new_bootloader == FT_BLOADER_VERSION_Z7) {
+ for (i = 0; i < FT_FINISHING_PKT_LEN_OLD_FW; i++) {
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_Z7)
+ temp = FT_MAGIC_BLOADER_Z7 + i;
+ else if (is_5336_new_bootloader ==
+ FT_BLOADER_VERSION_LZ4)
+ temp = FT_MAGIC_BLOADER_LZ4 + i;
+ pkt_buf[2] = (u8)(temp >> 8);
+ pkt_buf[3] = (u8)temp;
+ temp = 1;
+ pkt_buf[4] = (u8)(temp >> 8);
+ pkt_buf[5] = (u8)temp;
+ pkt_buf[6] = data[data_len + i];
+ fw_ecc ^= pkt_buf[6];
+
+ ft5x06_i2c_write(client,
+ pkt_buf, temp + FT_FW_PKT_META_LEN);
+ msleep(FT_FW_PKT_DLY_MS);
+ }
+ } else if (is_5336_new_bootloader == FT_BLOADER_VERSION_GZF) {
+ for (i = 0; i < FT_FINISHING_PKT_LEN_NEW_FW; i++) {
+ if (is_5336_fwsize_30)
+ temp = FT_MAGIC_BLOADER_GZF_30 + i;
+ else
+ temp = FT_MAGIC_BLOADER_GZF + i;
+ pkt_buf[2] = (u8)(temp >> 8);
+ pkt_buf[3] = (u8)temp;
+ temp = 1;
+ pkt_buf[4] = (u8)(temp >> 8);
+ pkt_buf[5] = (u8)temp;
+ pkt_buf[6] = data[data_len + i];
+ fw_ecc ^= pkt_buf[6];
+
+ ft5x06_i2c_write(client,
+ pkt_buf, temp + FT_FW_PKT_META_LEN);
+ msleep(FT_FW_PKT_DLY_MS);
+
+ }
}
/* verify checksum */
@@ -784,9 +1276,14 @@ static int ft5x06_fw_upgrade(struct device *dev, bool force)
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
const struct firmware *fw = NULL;
int rc;
- u8 fw_file_maj, fw_file_min, fw_file_sub_min;
+ u8 fw_file_maj, fw_file_min, fw_file_sub_min, fw_file_vendor_id;
bool fw_upgrade = false;
+ if (data->suspended) {
+ dev_err(dev, "Device is in suspend state: Exit FW upgrade\n");
+ return -EBUSY;
+ }
+
rc = request_firmware(&fw, data->fw_name, dev);
if (rc < 0) {
dev_err(dev, "Request firmware failed - %s (%d)\n",
@@ -795,31 +1292,32 @@ static int ft5x06_fw_upgrade(struct device *dev, bool force)
}
if (fw->size < FT_FW_MIN_SIZE || fw->size > FT_FW_MAX_SIZE) {
- dev_err(dev, "Invalid firmware size (%d)\n", fw->size);
+ dev_err(dev, "Invalid firmware size (%zu)\n", fw->size);
rc = -EIO;
goto rel_fw;
}
- fw_file_maj = FT_FW_FILE_MAJ_VER(fw);
+ if (data->family_id == FT6X36_ID) {
+ fw_file_maj = FT_FW_FILE_MAJ_VER_FT6X36(fw);
+ fw_file_vendor_id = FT_FW_FILE_VENDOR_ID_FT6X36(fw);
+ } else {
+ fw_file_maj = FT_FW_FILE_MAJ_VER(fw);
+ fw_file_vendor_id = FT_FW_FILE_VENDOR_ID(fw);
+ }
fw_file_min = FT_FW_FILE_MIN_VER(fw);
fw_file_sub_min = FT_FW_FILE_SUB_MIN_VER(fw);
+ fw_file_vendor_id = FT_FW_FILE_VENDOR_ID(fw);
dev_info(dev, "Current firmware: %d.%d.%d", data->fw_ver[0],
data->fw_ver[1], data->fw_ver[2]);
dev_info(dev, "New firmware: %d.%d.%d", fw_file_maj,
fw_file_min, fw_file_sub_min);
- if (force) {
+ if (force)
+ fw_upgrade = true;
+ else if ((data->fw_ver[0] < fw_file_maj) &&
+ data->fw_vendor_id == fw_file_vendor_id)
fw_upgrade = true;
- } else if (data->fw_ver[0] == fw_file_maj) {
- if (data->fw_ver[1] < fw_file_min)
- fw_upgrade = true;
- else if (data->fw_ver[2] < fw_file_sub_min)
- fw_upgrade = true;
- else
- dev_info(dev, "No need to upgrade\n");
- } else
- dev_info(dev, "Firmware versions do not match\n");
if (!fw_upgrade) {
dev_info(dev, "Exiting fw upgrade...\n");
@@ -828,7 +1326,7 @@ static int ft5x06_fw_upgrade(struct device *dev, bool force)
}
/* start firmware upgrade */
- if (FT_FW_CHECK(fw)) {
+ if (FT_FW_CHECK(fw, data)) {
rc = ft5x06_fw_upgrade_start(data->client, fw->data, fw->size);
if (rc < 0)
dev_err(dev, "update failed (%d). try later...\n", rc);
@@ -1251,6 +1749,8 @@ static int ft5x06_parse_dt(struct device *dev,
pdata->ignore_id_check = of_property_read_bool(np,
"focaltech,ignore-id-check");
+ pdata->psensor_support = of_property_read_bool(np,
+ "focaltech,psensor-support");
rc = of_property_read_u32(np, "focaltech,family-id", &temp_val);
if (!rc)
pdata->family_id = temp_val;
@@ -1286,8 +1786,10 @@ static int ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ft5x06_ts_platform_data *pdata;
+ struct ft5x06_psensor_platform_data *psensor_pdata;
struct ft5x06_ts_data *data;
struct input_dev *input_dev;
+ struct input_dev *psensor_input_dev;
struct dentry *temp;
u8 reg_value;
u8 reg_addr;
@@ -1335,7 +1837,7 @@ static int ft5x06_ts_probe(struct i2c_client *client,
data->tch_data_len = FT_TCH_LEN(pdata->num_max_touches);
data->tch_data = devm_kzalloc(&client->dev,
data->tch_data_len, GFP_KERNEL);
- if (!data)
+ if (!data->tch_data)
return -ENOMEM;
input_dev = input_allocate_device();
@@ -1360,12 +1862,11 @@ static int ft5x06_ts_probe(struct i2c_client *client,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
- input_mt_init_slots(input_dev, pdata->num_max_touches);
+ input_mt_init_slots(input_dev, pdata->num_max_touches, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X, pdata->x_min,
pdata->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y, pdata->y_min,
pdata->y_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, FT_PRESS, 0, 0);
err = input_register_device(input_dev);
if (err) {
@@ -1401,35 +1902,26 @@ static int ft5x06_ts_probe(struct i2c_client *client,
}
}
- if (gpio_is_valid(pdata->irq_gpio)) {
- err = gpio_request(pdata->irq_gpio, "ft5x06_irq_gpio");
- if (err) {
- dev_err(&client->dev, "irq gpio request failed");
- goto pwr_off;
- }
- err = gpio_direction_input(pdata->irq_gpio);
- if (err) {
+ err = ft5x06_ts_pinctrl_init(data);
+ if (!err && data->ts_pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_active);
+ if (err < 0) {
dev_err(&client->dev,
- "set_direction for irq gpio failed\n");
- goto free_irq_gpio;
+ "failed to select pin to active state");
}
}
- if (gpio_is_valid(pdata->reset_gpio)) {
- err = gpio_request(pdata->reset_gpio, "ft5x06_reset_gpio");
- if (err) {
- dev_err(&client->dev, "reset gpio request failed");
- goto free_irq_gpio;
- }
-
- err = gpio_direction_output(pdata->reset_gpio, 0);
- if (err) {
- dev_err(&client->dev,
- "set_direction for reset gpio failed\n");
- goto free_reset_gpio;
- }
- msleep(data->pdata->hard_rst_dly);
- gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+ err = ft5x06_gpio_configure(data, true);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to configure the gpios\n");
+ goto err_gpio_req;
}
/* make sure CTP already finish startup process */
@@ -1440,30 +1932,75 @@ static int ft5x06_ts_probe(struct i2c_client *client,
err = ft5x06_i2c_read(client, &reg_addr, 1, &reg_value, 1);
if (err < 0) {
dev_err(&client->dev, "version read failed");
- goto free_reset_gpio;
+ goto free_gpio;
}
dev_info(&client->dev, "Device ID = 0x%x\n", reg_value);
if ((pdata->family_id != reg_value) && (!pdata->ignore_id_check)) {
dev_err(&client->dev, "%s:Unsupported controller\n", __func__);
- goto free_reset_gpio;
+ goto free_gpio;
}
- data->family_id = reg_value;
+ data->family_id = pdata->family_id;
err = request_threaded_irq(client->irq, NULL,
- ft5x06_ts_interrupt, pdata->irqflags,
- client->dev.driver->name, data);
+ ft5x06_ts_interrupt,
+ IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "request irq failed\n");
- goto free_reset_gpio;
+ goto free_gpio;
+ }
+
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
+ device_init_wakeup(&client->dev, 1);
+ psensor_pdata = devm_kzalloc(&client->dev,
+ sizeof(struct ft5x06_psensor_platform_data),
+ GFP_KERNEL);
+ if (!psensor_pdata) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ goto irq_free;
+ }
+ data->psensor_pdata = psensor_pdata;
+
+ psensor_input_dev = input_allocate_device();
+ if (!psensor_input_dev) {
+ dev_err(&data->client->dev,
+ "Failed to allocate device\n");
+ goto free_psensor_pdata;
+ }
+
+ __set_bit(EV_ABS, psensor_input_dev->evbit);
+ input_set_abs_params(psensor_input_dev,
+ ABS_DISTANCE, 0, 1, 0, 0);
+ psensor_input_dev->name = "proximity";
+ psensor_input_dev->id.bustype = BUS_I2C;
+ psensor_input_dev->dev.parent = &data->client->dev;
+ data->psensor_pdata->input_psensor_dev = psensor_input_dev;
+
+ err = input_register_device(psensor_input_dev);
+ if (err) {
+ dev_err(&data->client->dev,
+ "Unable to register device, err=%d\n", err);
+ goto free_psensor_input_dev;
+ }
+
+ psensor_pdata->ps_cdev = sensors_proximity_cdev;
+ psensor_pdata->ps_cdev.sensors_enable =
+ ft5x06_psensor_enable_set;
+ psensor_pdata->data = data;
+
+ err = sensors_classdev_register(&client->dev,
+ &psensor_pdata->ps_cdev);
+ if (err)
+ goto unregister_psensor_input_device;
}
err = device_create_file(&client->dev, &dev_attr_fw_name);
if (err) {
dev_err(&client->dev, "sys file creation failed\n");
- goto irq_free;
+ goto free_psensor_class_sysfs;
}
err = device_create_file(&client->dev, &dev_attr_update_fw);
@@ -1538,6 +2075,7 @@ static int ft5x06_ts_probe(struct i2c_client *client,
dev_dbg(&client->dev, "touch threshold = %d\n", reg_value * 4);
ft5x06_update_fw_ver(data);
+ ft5x06_update_fw_vendor_id(data);
FT_STORE_TS_INFO(data->ts_info, data->family_id, data->pdata->name,
data->pdata->num_max_touches, data->pdata->group_id,
@@ -1571,15 +2109,41 @@ free_update_fw_sys:
device_remove_file(&client->dev, &dev_attr_update_fw);
free_fw_name_sys:
device_remove_file(&client->dev, &dev_attr_fw_name);
+free_psensor_class_sysfs:
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
+ sensors_classdev_unregister(&psensor_pdata->ps_cdev);
+unregister_psensor_input_device:
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
+ input_unregister_device(data->psensor_pdata->input_psensor_dev);
+free_psensor_input_dev:
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
+ input_free_device(data->psensor_pdata->input_psensor_dev);
+free_psensor_pdata:
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
+ devm_kfree(&client->dev, psensor_pdata);
+ data->psensor_pdata = NULL;
+ }
irq_free:
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
+ device_init_wakeup(&client->dev, 0);
free_irq(client->irq, data);
-free_reset_gpio:
+free_gpio:
if (gpio_is_valid(pdata->reset_gpio))
gpio_free(pdata->reset_gpio);
-free_irq_gpio:
if (gpio_is_valid(pdata->irq_gpio))
gpio_free(pdata->irq_gpio);
-pwr_off:
+err_gpio_req:
+ if (data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(data->pinctrl_state_release)) {
+ devm_pinctrl_put(data->ts_pinctrl);
+ data->ts_pinctrl = NULL;
+ } else {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_release);
+ if (err)
+ pr_err("failed to select relase pinctrl state\n");
+ }
+ }
if (pdata->power_on)
pdata->power_on(false);
else
@@ -1591,15 +2155,26 @@ pwr_deinit:
ft5x06_power_init(data, false);
unreg_inputdev:
input_unregister_device(input_dev);
- input_dev = NULL;
free_inputdev:
input_free_device(input_dev);
+ input_dev = NULL;
return err;
}
static int ft5x06_ts_remove(struct i2c_client *client)
{
struct ft5x06_ts_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
+
+ device_init_wakeup(&client->dev, 0);
+ sensors_classdev_unregister(&data->psensor_pdata->ps_cdev);
+ input_unregister_device(data->psensor_pdata->input_psensor_dev);
+ input_free_device(data->psensor_pdata->input_psensor_dev);
+ devm_kfree(&client->dev, data->psensor_pdata);
+ data->psensor_pdata = NULL;
+ }
debugfs_remove_recursive(data->dir);
device_remove_file(&client->dev, &dev_attr_force_update_fw);
@@ -1620,6 +2195,18 @@ static int ft5x06_ts_remove(struct i2c_client *client)
if (gpio_is_valid(data->pdata->irq_gpio))
gpio_free(data->pdata->irq_gpio);
+ if (data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(data->pinctrl_state_release)) {
+ devm_pinctrl_put(data->ts_pinctrl);
+ data->ts_pinctrl = NULL;
+ } else {
+ retval = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_release);
+ if (retval < 0)
+ pr_err("failed to select release pinctrl state\n");
+ }
+ }
+
if (data->pdata->power_on)
data->pdata->power_on(false);
else
@@ -1653,7 +2240,7 @@ static const struct of_device_id ft5x06_match_table[] = {
static struct i2c_driver ft5x06_ts_driver = {
.probe = ft5x06_ts_probe,
- .remove = __devexit_p(ft5x06_ts_remove),
+ .remove = ft5x06_ts_remove,
.driver = {
.name = "ft5x06_ts",
.owner = THIS_MODULE,
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 0c6f1de2465b..76add503b6b8 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -51,6 +51,8 @@
(MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES)
/* all message IDs */
+#define INVALID_MESSAGE_ID 0
+#define AKE_INIT_MESSAGE_ID 2
#define AKE_SEND_CERT_MESSAGE_ID 3
#define AKE_NO_STORED_KM_MESSAGE_ID 4
#define AKE_STORED_KM_MESSAGE_ID 5
@@ -63,6 +65,8 @@
#define REPEATER_AUTH_SEND_ACK_MESSAGE_ID 15
#define REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID 16
#define REPEATER_AUTH_STREAM_READY_MESSAGE_ID 17
+#define HDCP2P2_MAX_MESSAGES 18
+
#define HDCP1_SET_KEY_MESSAGE_ID 202
#define HDCP1_SET_ENC_MESSAGE_ID 205
@@ -144,6 +148,9 @@
#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \
((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF))
+#define REAUTH_REQ BIT(3)
+#define LINK_INTEGRITY_FAILURE BIT(4)
+
#define HDCP_LIB_EXECUTE(x) {\
if (handle->tethered)\
hdcp_lib_##x(handle);\
@@ -151,6 +158,48 @@
queue_kthread_work(&handle->worker, &handle->wk_##x);\
}
+static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = {
+ [AKE_INIT_MESSAGE_ID] = { 2,
+ { {0x69000, 8}, {0x69008, 3} },
+ 0 },
+ [AKE_SEND_CERT_MESSAGE_ID] = { 3,
+ { {0x6900B, 522}, {0x69215, 8}, {0x6921D, 3} },
+ 0 },
+ [AKE_NO_STORED_KM_MESSAGE_ID] = { 1,
+ { {0x69220, 128} },
+ 0 },
+ [AKE_STORED_KM_MESSAGE_ID] = { 2,
+ { {0x692A0, 16}, {0x692B0, 16} },
+ 0 },
+ [AKE_SEND_H_PRIME_MESSAGE_ID] = { 1,
+ { {0x692C0, 32} },
+ (1 << 1) },
+ [AKE_SEND_PAIRING_INFO_MESSAGE_ID] = { 1,
+ { {0x692E0, 16} },
+ (1 << 2) },
+ [LC_INIT_MESSAGE_ID] = { 1,
+ { {0x692F0, 8} },
+ 0 },
+ [LC_SEND_L_PRIME_MESSAGE_ID] = { 1,
+ { {0x692F8, 32} },
+ 0 },
+ [SKE_SEND_EKS_MESSAGE_ID] = { 2,
+ { {0x69318, 16}, {0x69328, 8} },
+ 0 },
+ [REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID] = { 4,
+ { {0x69330, 2}, {0x69332, 3}, {0x69335, 16}, {0x69345, 155} },
+ (1 << 0) },
+ [REPEATER_AUTH_SEND_ACK_MESSAGE_ID] = { 1,
+ { {0x693E0, 16} },
+ 0 },
+ [REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID] = { 3,
+ { {0x693F0, 3}, {0x693F3, 2}, {0x693F5, 126} },
+ 0 },
+ [REPEATER_AUTH_STREAM_READY_MESSAGE_ID] = { 1,
+ { {0x69473, 32} },
+ 0 }
+};
+
enum hdcp_state {
HDCP_STATE_INIT = 0x00,
HDCP_STATE_APP_LOADED = 0x01,
@@ -451,6 +500,7 @@ struct hdcp_lib_handle {
bool tethered;
struct qseecom_handle *qseecom_handle;
int last_msg_sent;
+ int last_msg;
char *last_msg_recvd_buf;
uint32_t last_msg_recvd_len;
atomic_t hdcp_off;
@@ -522,6 +572,50 @@ static const char *hdcp_lib_message_name(int msg_id)
return "UNKNOWN";
}
+static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
+ struct hdmi_hdcp_wakeup_data *data)
+{
+ switch (handle->last_msg) {
+ case INVALID_MESSAGE_ID:
+ return AKE_INIT_MESSAGE_ID;
+ case AKE_INIT_MESSAGE_ID:
+ return AKE_SEND_CERT_MESSAGE_ID;
+ case AKE_SEND_CERT_MESSAGE_ID:
+ if (handle->no_stored_km_flag)
+ return AKE_NO_STORED_KM_MESSAGE_ID;
+ else
+ return AKE_STORED_KM_MESSAGE_ID;
+ case AKE_STORED_KM_MESSAGE_ID:
+ case AKE_NO_STORED_KM_MESSAGE_ID:
+ return AKE_SEND_H_PRIME_MESSAGE_ID;
+ case AKE_SEND_H_PRIME_MESSAGE_ID:
+ if (handle->no_stored_km_flag)
+ return AKE_SEND_PAIRING_INFO_MESSAGE_ID;
+ else
+ return LC_INIT_MESSAGE_ID;
+ case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
+ return LC_INIT_MESSAGE_ID;
+ case LC_INIT_MESSAGE_ID:
+ return LC_SEND_L_PRIME_MESSAGE_ID;
+ case LC_SEND_L_PRIME_MESSAGE_ID:
+ return SKE_SEND_EKS_MESSAGE_ID;
+ case SKE_SEND_EKS_MESSAGE_ID:
+ case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
+ case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+ if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE)
+ return REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID;
+ else
+ return REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID;
+ case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
+ return REPEATER_AUTH_SEND_ACK_MESSAGE_ID;
+ case REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID:
+ return REPEATER_AUTH_STREAM_READY_MESSAGE_ID;
+ default:
+ pr_err("Uknown message ID (%d)", handle->last_msg);
+ return -EINVAL;
+ }
+}
+
static inline void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle,
struct hdmi_hdcp_wakeup_data *data)
{
@@ -529,6 +623,20 @@ static inline void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle,
if (handle && handle->client_ops && handle->client_ops->wakeup &&
data && (data->cmd != HDMI_HDCP_WKUP_CMD_INVALID)) {
+ data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
+
+ if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE ||
+ data->cmd == HDMI_HDCP_WKUP_CMD_RECV_MESSAGE ||
+ data->cmd == HDMI_HDCP_WKUP_CMD_LINK_POLL) {
+ handle->last_msg =
+ hdcp_lib_get_next_message(handle, data);
+
+ if (handle->last_msg > INVALID_MESSAGE_ID &&
+ handle->last_msg < HDCP2P2_MAX_MESSAGES)
+ data->message_data =
+ &hdcp_msg_lookup[handle->last_msg];
+ }
+
rc = handle->client_ops->wakeup(data);
if (rc)
pr_err("error sending %s to client\n",
@@ -1470,6 +1578,7 @@ static int hdcp_lib_wakeup(struct hdcp_lib_wakeup_data *data)
handle->repeater_flag = false;
handle->update_stream = false;
handle->last_msg_sent = 0;
+ handle->last_msg = INVALID_MESSAGE_ID;
handle->hdcp_timeout = 0;
handle->timeout_left = 0;
handle->legacy_app = false;
@@ -1750,6 +1859,7 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
struct hdcp_rcvd_msg_rsp *rsp_buf;
uint32_t msglen;
char *msg = NULL;
+ uint32_t message_id_bytes = 0;
if (!handle || !handle->qseecom_handle ||
!handle->qseecom_handle->sbuf) {
@@ -1774,6 +1884,12 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
goto exit;
}
+ /* If the client is DP then allocate extra byte for message ID. */
+ if (handle->device_type == HDCP_TXMTR_DP)
+ message_id_bytes = 1;
+
+ msglen += message_id_bytes;
+
msg = kzalloc(msglen, GFP_KERNEL);
if (!msg) {
mutex_unlock(&handle->msg_lock);
@@ -1781,7 +1897,13 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
goto exit;
}
- memcpy(msg, handle->last_msg_recvd_buf, msglen);
+ /* copy the message id if needed */
+ if (message_id_bytes)
+ memcpy(msg, &handle->last_msg, message_id_bytes);
+
+ memcpy(msg + message_id_bytes,
+ handle->last_msg_recvd_buf,
+ handle->last_msg_recvd_len);
mutex_unlock(&handle->msg_lock);
diff --git a/drivers/misc/qcom/qdsp6v2/audio_wma.c b/drivers/misc/qcom/qdsp6v2/audio_wma.c
index 3d57d38d0fd1..74f678da925a 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_wma.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_wma.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -166,6 +166,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_wma_config_v2 *wma_config;
struct msm_audio_wma_config_v2_32 wma_config_32;
+ memset(&wma_config_32, 0, sizeof(wma_config_32));
+
wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg;
wma_config_32.format_tag = wma_config->format_tag;
wma_config_32.numchannels = wma_config->numchannels;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 779994a1c9dd..a15211fd33aa 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -205,6 +205,7 @@ struct qseecom_control {
uint32_t qseos_version;
uint32_t qsee_version;
struct device *pdev;
+ bool whitelist_support;
bool commonlib_loaded;
bool commonlib64_loaded;
struct ion_handle *cmnlib_ion_handle;
@@ -267,6 +268,30 @@ struct qseecom_listener_handle {
static struct qseecom_control qseecom;
+struct sglist_info {
+ uint32_t indexAndFlags;
+ uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set, the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
+ ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
+
struct qseecom_dev_handle {
enum qseecom_client_handle_type type;
union {
@@ -280,6 +305,8 @@ struct qseecom_dev_handle {
bool perf_enabled;
bool fast_load_enabled;
enum qseecom_bandwidth_request_mode mode;
+ struct sglist_info *sglistinfo_ptr;
+ uint32_t sglist_cnt;
};
struct qseecom_key_id_usage_desc {
@@ -612,6 +639,38 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
ret = scm_call2(smc_id, &desc);
break;
}
+ case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+ struct qseecom_client_send_data_ireq *req;
+ struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_client_send_data_ireq *)
+ req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->rsp_ptr;
+ desc.args[4] = req->rsp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit =
+ (struct qseecom_client_send_data_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->rsp_ptr;
+ desc.args[4] = req_64bit->rsp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
struct qseecom_client_send_service_ireq *req;
req = (struct qseecom_client_send_service_ireq *)
@@ -754,6 +813,36 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
ret = scm_call2(smc_id, &desc);
break;
}
+ case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
case QSEOS_TEE_INVOKE_COMMAND: {
struct qseecom_qteec_ireq *req;
struct qseecom_qteec_64bit_ireq *req_64bit;
@@ -778,6 +867,36 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
ret = scm_call2(smc_id, &desc);
break;
}
+ case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
case QSEOS_TEE_CLOSE_SESSION: {
struct qseecom_qteec_ireq *req;
struct qseecom_qteec_64bit_ireq *req_64bit;
@@ -2632,14 +2751,15 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
{
int ret = 0;
u32 reqd_len_sb_in = 0;
- struct qseecom_client_send_data_ireq send_data_req;
- struct qseecom_client_send_data_64bit_ireq send_data_req_64bit;
+ struct qseecom_client_send_data_ireq send_data_req = {0};
+ struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
struct qseecom_command_scm_resp resp;
unsigned long flags;
struct qseecom_registered_app_list *ptr_app;
bool found_app = false;
void *cmd_buf = NULL;
size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
reqd_len_sb_in = req->cmd_req_len + req->resp_len;
/* find app_id & img_name from list */
@@ -2661,7 +2781,6 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
}
if (qseecom.qsee_version < QSEE_VERSION_40) {
- send_data_req.qsee_cmd_id = QSEOS_CLIENT_SEND_DATA_COMMAND;
send_data_req.app_id = data->client.app_id;
send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
data, (uintptr_t)req->cmd_req_buf));
@@ -2669,11 +2788,14 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
data, (uintptr_t)req->resp_buf));
send_data_req.rsp_len = req->resp_len;
+ send_data_req.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
cmd_buf = (void *)&send_data_req;
cmd_len = sizeof(struct qseecom_client_send_data_ireq);
} else {
- send_data_req_64bit.qsee_cmd_id =
- QSEOS_CLIENT_SEND_DATA_COMMAND;
send_data_req_64bit.app_id = data->client.app_id;
send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
(uintptr_t)req->cmd_req_buf);
@@ -2695,10 +2817,20 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
send_data_req_64bit.rsp_len);
return -EFAULT;
}
+ send_data_req_64bit.sglistinfo_ptr =
+ (uint64_t)virt_to_phys(table);
+ send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
cmd_buf = (void *)&send_data_req_64bit;
cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
}
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+ else
+ *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
data->client.sb_virt,
reqd_len_sb_in,
@@ -2952,14 +3084,26 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
goto err;
}
}
- if (cleanup)
+
+ if (cleanup) {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,
ION_IOC_INV_CACHES);
- else
+ } else {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
+ if (data->type == QSEECOM_CLIENT_APP) {
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0,
+ req->ifd_data[i].cmd_buf_offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ }
+ }
/* Deallocate the handle */
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
@@ -3158,14 +3302,25 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
}
}
cleanup:
- if (cleanup)
+ if (cleanup) {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,
ION_IOC_INV_CACHES);
- else
+ } else {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
+ if (data->type == QSEECOM_CLIENT_APP) {
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 1,
+ req->ifd_data[i].cmd_buf_offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ }
+ }
/* Deallocate the handle */
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
@@ -5828,14 +5983,23 @@ static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
}
clean:
- if (cleanup)
+ if (cleanup) {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, sg->length,
ION_IOC_INV_CACHES);
- else
+ } else {
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, sg->length,
ION_IOC_CLEAN_INV_CACHES);
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0,
+ req->ifd_data[i].cmd_buf_offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ }
/* Deallocate the handle */
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
@@ -5860,6 +6024,7 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
uint32_t reqd_len_sb_in = 0;
void *cmd_buf = NULL;
size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
ret = __qseecom_qteec_validate_msg(data, req);
if (ret)
@@ -5882,8 +6047,15 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
return -ENOENT;
}
+ if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+ (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+ ret = __qseecom_update_qteec_req_buf(
+ (struct qseecom_qteec_modfd_req *)req, data, false);
+ if (ret)
+ return ret;
+ }
+
if (qseecom.qsee_version < QSEE_VERSION_40) {
- ireq.qsee_cmd_id = cmd_id;
ireq.app_id = data->client.app_id;
ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
(uintptr_t)req->req_ptr);
@@ -5891,10 +6063,13 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
(uintptr_t)req->resp_ptr);
ireq.resp_len = req->resp_len;
+ ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+ ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
cmd_buf = (void *)&ireq;
cmd_len = sizeof(struct qseecom_qteec_ireq);
} else {
- ireq_64bit.qsee_cmd_id = cmd_id;
ireq_64bit.app_id = data->client.app_id;
ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
(uintptr_t)req->req_ptr);
@@ -5914,17 +6089,19 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
ireq_64bit.resp_ptr, ireq_64bit.resp_len);
return -EFAULT;
}
+ ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+ ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
cmd_buf = (void *)&ireq_64bit;
cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
}
+ if (qseecom.whitelist_support == true
+ && cmd_id == QSEOS_TEE_OPEN_SESSION)
+ *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+ else
+ *(uint32_t *)cmd_buf = cmd_id;
- if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
- (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
- ret = __qseecom_update_qteec_req_buf(
- (struct qseecom_qteec_modfd_req *)req, data, false);
- if (ret)
- return ret;
- }
reqd_len_sb_in = req->req_len + req->resp_len;
msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
data->client.sb_virt,
@@ -6022,6 +6199,9 @@ static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
uint32_t reqd_len_sb_in = 0;
void *cmd_buf = NULL;
size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
+ void *req_ptr = NULL;
+ void *resp_ptr = NULL;
ret = copy_from_user(&req, argp,
sizeof(struct qseecom_qteec_modfd_req));
@@ -6033,6 +6213,8 @@ static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
(struct qseecom_qteec_req *)(&req));
if (ret)
return ret;
+ req_ptr = req.req_ptr;
+ resp_ptr = req.resp_ptr;
/* find app_id & img_name from list */
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
@@ -6051,45 +6233,56 @@ static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
return -ENOENT;
}
+ /* validate offsets */
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if (req.ifd_data[i].fd) {
+ if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+ return -EINVAL;
+ }
+ }
+ req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.req_ptr);
+ req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.resp_ptr);
+ ret = __qseecom_update_qteec_req_buf(&req, data, false);
+ if (ret)
+ return ret;
+
if (qseecom.qsee_version < QSEE_VERSION_40) {
- ireq.qsee_cmd_id = QSEOS_TEE_INVOKE_COMMAND;
ireq.app_id = data->client.app_id;
ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req.req_ptr);
+ (uintptr_t)req_ptr);
ireq.req_len = req.req_len;
ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req.resp_ptr);
+ (uintptr_t)resp_ptr);
ireq.resp_len = req.resp_len;
cmd_buf = (void *)&ireq;
cmd_len = sizeof(struct qseecom_qteec_ireq);
+ ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+ ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
} else {
- ireq_64bit.qsee_cmd_id = QSEOS_TEE_INVOKE_COMMAND;
ireq_64bit.app_id = data->client.app_id;
ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req.req_ptr);
+ (uintptr_t)req_ptr);
ireq_64bit.req_len = req.req_len;
ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req.resp_ptr);
+ (uintptr_t)resp_ptr);
ireq_64bit.resp_len = req.resp_len;
cmd_buf = (void *)&ireq_64bit;
cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+ ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+ ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
}
reqd_len_sb_in = req.req_len + req.resp_len;
+ if (qseecom.whitelist_support == true)
+ *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+ else
+ *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
- /* validate offsets */
- for (i = 0; i < MAX_ION_FD; i++) {
- if (req.ifd_data[i].fd) {
- if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
- return -EINVAL;
- }
- }
- req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
- (uintptr_t)req.req_ptr);
- req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
- (uintptr_t)req.resp_ptr);
- ret = __qseecom_update_qteec_req_buf(&req, data, false);
- if (ret)
- return ret;
msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
data->client.sb_virt,
reqd_len_sb_in,
@@ -6152,6 +6345,15 @@ static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
return ret;
}
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+ if (data->sglist_cnt) {
+ memset(data->sglistinfo_ptr, 0,
+ SGLISTINFO_TABLE_SIZE);
+ data->sglist_cnt = 0;
+ }
+}
+
long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
int ret = 0;
@@ -6331,6 +6533,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
mutex_unlock(&app_access_lock);
if (ret)
pr_err("failed qseecom_send_cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
break;
}
case QSEECOM_IOCTL_RECEIVE_REQ: {
@@ -6728,6 +6931,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
mutex_unlock(&app_access_lock);
if (ret)
pr_err("failed open_session_cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
break;
}
case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
@@ -6776,6 +6980,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
mutex_unlock(&app_access_lock);
if (ret)
pr_err("failed Invoke cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
break;
}
case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
@@ -6852,6 +7057,9 @@ static int qseecom_open(struct inode *inode, struct file *file)
init_waitqueue_head(&data->abort_wq);
atomic_set(&data->ioctl_count, 0);
+ data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
+ if (!(data->sglistinfo_ptr))
+ return -ENOMEM;
return ret;
}
@@ -6906,6 +7114,7 @@ static int qseecom_release(struct inode *inode, struct file *file)
if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
}
+ kfree(data->sglistinfo_ptr);
kfree(data);
return ret;
@@ -7653,6 +7862,74 @@ out:
return ret;
}
+/*
+ * Check if whitelist feature is supported by making a test scm_call
+ * to send a whitelist command to an invalid app ID 0
+ */
+static int qseecom_check_whitelist_feature(void)
+{
+ struct qseecom_client_send_data_ireq send_data_req = {0};
+ struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+ struct qseecom_command_scm_resp resp;
+ uint32_t buf_size = 128;
+ void *buf = NULL;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ int ret = 0;
+ phys_addr_t pa;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pa = virt_to_phys(buf);
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_req.qsee_cmd_id =
+ QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+ send_data_req.app_id = 0;
+ send_data_req.req_ptr = (uint32_t)pa;
+ send_data_req.req_len = buf_size;
+ send_data_req.rsp_ptr = (uint32_t)pa;
+ send_data_req.rsp_len = buf_size;
+ send_data_req.sglistinfo_ptr = (uint32_t)pa;
+ send_data_req.sglistinfo_len = buf_size;
+ cmd_buf = (void *)&send_data_req;
+ cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+ } else {
+ send_data_req_64bit.qsee_cmd_id =
+ QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+ send_data_req_64bit.app_id = 0;
+ send_data_req_64bit.req_ptr = (uint64_t)pa;
+ send_data_req_64bit.req_len = buf_size;
+ send_data_req_64bit.rsp_ptr = (uint64_t)pa;
+ send_data_req_64bit.rsp_len = buf_size;
+ send_data_req_64bit.sglistinfo_ptr = (uint64_t)pa;
+ send_data_req_64bit.sglistinfo_len = buf_size;
+ cmd_buf = (void *)&send_data_req_64bit;
+ cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+ }
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+/*
+ * If this cmd exists and whitelist is supported, scm_call return -2 (scm
+ * driver remap it to -EINVAL) and resp.result 0xFFFFFFED(-19); Otherwise,
+ * scm_call return -1 (remap to -EIO).
+ */
+ if (ret == -EIO) {
+ qseecom.whitelist_support = false;
+ ret = 0;
+ } else if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD) {
+ qseecom.whitelist_support = true;
+ ret = 0;
+ } else {
+ pr_err("Failed to check whitelist: ret = %d, result = 0x%x\n",
+ ret, resp.result);
+ }
+ kfree(buf);
+ return ret;
+}
+
static int qseecom_probe(struct platform_device *pdev)
{
int rc;
@@ -7685,6 +7962,7 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.app_block_ref_cnt = 0;
init_waitqueue_head(&qseecom.app_block_wq);
+ qseecom.whitelist_support = true;
rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
if (rc < 0) {
@@ -7900,6 +8178,14 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.qsee_perf_client = msm_bus_scale_register_client(
qseecom_platform_support);
+ rc = qseecom_check_whitelist_feature();
+ if (rc) {
+ rc = -EINVAL;
+ goto exit_destroy_ion_client;
+ }
+ pr_warn("qseecom.whitelist_support = %d\n",
+ qseecom.whitelist_support);
+
if (!qseecom.qsee_perf_client)
pr_err("Unable to register bus client\n");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index e9ac76b43812..8b77e3a7fee2 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -195,22 +195,22 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index a08d157e2b0f..05ce3969a5c7 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2568,6 +2568,12 @@ const char *ipa_get_version_string(enum ipa_hw_type ver)
case IPA_HW_v3_1:
str = "3.1";
break;
+ case IPA_HW_v3_5:
+ str = "3.5";
+ break;
+ case IPA_HW_v3_5_1:
+ str = "3.5.1";
+ break;
default:
str = "Invalid version";
break;
@@ -2626,6 +2632,8 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
break;
case IPA_HW_v3_0:
case IPA_HW_v3_1:
+ case IPA_HW_v3_5:
+ case IPA_HW_v3_5_1:
result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
break;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index b5f84bdafea8..6addf14d7126 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -40,9 +40,6 @@
#define IPA_MHI_MAX_UL_CHANNELS 1
#define IPA_MHI_MAX_DL_CHANNELS 1
-#define IPA_MHI_GSI_ER_START 10
-#define IPA_MHI_GSI_ER_END 16
-
#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
(IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
#error not enought event rings for MHI
@@ -1504,10 +1501,11 @@ int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
/* for event context address index needs to read from host */
- IPA_MHI_DBG("client %d channelHandle %d channelIndex %d, state %d\n",
+ IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
channel->client, channel->index, channel->id, channel->state);
- IPA_MHI_DBG("channel_context_addr 0x%llx\n",
- channel->channel_context_addr);
+ IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
+ channel->channel_context_addr,
+ channel->cached_gsi_evt_ring_hdl);
IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
@@ -2585,5 +2583,27 @@ int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
return 0;
}
+int ipa_mhi_is_using_dma(bool *flag)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("not initialized\n");
+ return -EPERM;
+ }
+
+ *flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(ipa_mhi_is_using_dma);
+
+const char *ipa_mhi_get_state_str(int state)
+{
+ return MHI_STATE_STR(state);
+}
+EXPORT_SYMBOL(ipa_mhi_get_state_str);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA MHI client driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index c5f75046cd2c..981129eb9f3a 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -141,6 +141,27 @@ struct ipa_mem_buffer {
u32 size;
};
+#define IPA_MHI_GSI_ER_START 10
+#define IPA_MHI_GSI_ER_END 16
+
+/**
+ * enum ipa3_mhi_burst_mode - MHI channel burst mode state
+ *
+ * Values are according to MHI specification
+ * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
+ * disabled for SW channels
+ * @IPA_MHI_BURST_MODE_RESERVED:
+ * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
+ * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
+ *
+ */
+enum ipa3_mhi_burst_mode {
+ IPA_MHI_BURST_MODE_DEFAULT,
+ IPA_MHI_BURST_MODE_RESERVED,
+ IPA_MHI_BURST_MODE_DISABLE,
+ IPA_MHI_BURST_MODE_ENABLE,
+};
+
/**
* enum ipa_hw_mhi_channel_states - MHI channel state machine
*
@@ -325,6 +346,8 @@ int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
int ipa_mhi_query_ch_info(enum ipa_client_type client,
struct gsi_chan_info *ch_info);
int ipa_mhi_destroy_channel(enum ipa_client_type client);
+int ipa_mhi_is_using_dma(bool *flag);
+const char *ipa_mhi_get_state_str(int state);
/* MHI uC */
int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index d5d2abe137f4..137a43a1217b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -160,7 +160,7 @@ static int handle_install_filter_rule_req(void *req_h, void *req)
resp.filter_handle_list_len = MAX_NUM_Q6_RULE;
IPAWANERR("installed (%d) max Q6-UL rules ",
MAX_NUM_Q6_RULE);
- IPAWANERR("but modem gives total (%d)\n",
+ IPAWANERR("but modem gives total (%u)\n",
rule_req->filter_spec_list_len);
} else {
resp.filter_handle_list_len =
@@ -513,7 +513,7 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
if (req->filter_spec_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
} else {
- IPAWANDBG("IPACM pass %d rules to Q6\n",
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
req->filter_spec_list_len);
}
@@ -649,6 +649,11 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
IPAWANERR(" delete UL filter rule for pipe %d\n",
req->source_pipe_index);
return -EINVAL;
+ } else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) {
+ IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+ req->source_pipe_index,
+ req->filter_index_list_len);
+ return -EINVAL;
} else if (req->filter_index_list[0].filter_index == 0 &&
req->source_pipe_index !=
ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 9115e30b2b21..c9120ce83da8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -219,7 +219,6 @@ static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
static struct clk *ipa3_clk;
-static struct clk *smmu_clk;
struct ipa3_context *ipa3_ctx;
static struct device *master_dev;
@@ -2887,22 +2886,6 @@ static int ipa3_get_clks(struct device *dev)
IPAERR("fail to get ipa clk\n");
return PTR_ERR(ipa3_clk);
}
-
- if (smmu_info.present && smmu_info.arm_smmu) {
- smmu_clk = clk_get(dev, "smmu_clk");
- if (IS_ERR(smmu_clk)) {
- if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
- IPAERR("fail to get smmu clk\n");
- return PTR_ERR(smmu_clk);
- }
-
- if (clk_get_rate(smmu_clk) == 0) {
- long rate = clk_round_rate(smmu_clk, 1000);
-
- clk_set_rate(smmu_clk, rate);
- }
- }
-
return 0;
}
@@ -2922,8 +2905,6 @@ void _ipa_enable_clks_v3_0(void)
WARN_ON(1);
}
- if (smmu_clk)
- clk_prepare_enable(smmu_clk);
ipa3_suspend_apps_pipes(false);
}
@@ -2982,9 +2963,6 @@ void _ipa_disable_clks_v3_0(void)
clk_disable_unprepare(ipa3_clk);
else
WARN_ON(1);
-
- if (smmu_clk)
- clk_disable_unprepare(smmu_clk);
}
/**
@@ -3860,10 +3838,10 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0)
- result = ipa3_trigger_fw_loading_mdms();
- else if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)
+ if (ipa3_is_msm_device())
result = ipa3_trigger_fw_loading_msms();
+ else
+ result = ipa3_trigger_fw_loading_mdms();
/* No IPAv3.x chipsets that don't support FW loading */
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -4660,6 +4638,9 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
int fast = 1;
int bypass = 1;
int ret;
+ u32 add_map_size;
+ const u32 *add_map;
+ int i;
IPADBG("sub pdev=%p\n", dev);
@@ -4720,7 +4701,35 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
cb->valid = false;
return ret;
}
+ /* MAP ipa-uc ram */
+ add_map = of_get_property(dev->of_node,
+ "qcom,additional-mapping", &add_map_size);
+ if (add_map) {
+ /* mapping size is an array of 3-tuple of u32 */
+ if (add_map_size % (3 * sizeof(u32))) {
+ IPAERR("wrong additional mapping format\n");
+ cb->valid = false;
+ return -EFAULT;
+ }
+
+ /* iterate of each entry of the additional mapping array */
+ for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+ u32 iova = be32_to_cpu(add_map[i]);
+ u32 pa = be32_to_cpu(add_map[i + 1]);
+ u32 size = be32_to_cpu(add_map[i + 2]);
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->iommu,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+ }
return 0;
}
@@ -5150,7 +5159,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
if (!ipa3_bus_scale_table)
ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
-
/* Proceed to real initialization */
result = ipa3_pre_init(&ipa3_res, dev);
if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 95ef9afbbd3e..3915f652d87b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -115,10 +115,10 @@ static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
struct ipahal_reg_shared_mem_size smem_sz;
memset(&smem_sz, 0, sizeof(smem_sz));
- ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_VERSION=0x%x\n"
"IPA_COMP_HW_VERSION=0x%x\n"
@@ -1412,7 +1412,7 @@ static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
dbg_cnt_ctrl.product = true;
- dbg_cnt_ctrl.src_pipe = 0x1f;
+ dbg_cnt_ctrl.src_pipe = 0xff;
dbg_cnt_ctrl.rule_idx_pipe_rule = false;
dbg_cnt_ctrl.rule_idx = 0;
if (option == 1)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 8f61827b50b4..19eb1ee9c881 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2011,7 +2011,7 @@ static void ipa3_alloc_wlan_rx_common_cache(u32 size)
goto fail_skb_alloc;
}
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
- rx_pkt->data.dma_addr = dma_map_single(NULL, ptr,
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
if (rx_pkt->data.dma_addr == 0 ||
rx_pkt->data.dma_addr == ~0) {
@@ -4181,7 +4181,7 @@ static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
{
u16 temp;
/* Add the check but it might have throughput issue */
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1) {
+ if (ipa3_is_msm_device()) {
temp = (u16) (~((unsigned long) tx_pkt &
0xFFFF000000000000) >> 48);
if (temp) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index f9018e3f47bf..4309fbc3154f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2015,4 +2015,5 @@ int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
int ipa3_ntn_init(void);
int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
struct dentry *ipa_debugfs_get_root(void);
+bool ipa3_is_msm_device(void);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index f0102a703812..75711c0f7264 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -46,7 +46,7 @@ static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = -1,
[IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = -1,
[IPA_BAD_SNOC_ACCESS_IRQ] = 0,
- [IPA_EOT_COAL_IRQ] = 1,
+ [IPA_EOT_COAL_IRQ] = -1,
[IPA_UC_IRQ_0] = 2,
[IPA_UC_IRQ_1] = 3,
[IPA_UC_IRQ_2] = 4,
@@ -61,7 +61,7 @@ static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
[IPA_PROC_ERR_IRQ] = 13,
[IPA_TX_SUSPEND_IRQ] = 14,
[IPA_TX_HOLB_DROP_IRQ] = 15,
- [IPA_BAM_IDLE_IRQ] = 16,
+ [IPA_BAM_GSI_IDLE_IRQ] = 16,
};
static void ipa3_interrupt_defer(struct work_struct *work);
@@ -395,7 +395,7 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
- (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)) {
+ (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
val = ~0;
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
@@ -448,7 +448,7 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
- (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)) {
+ (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index e83c249ad425..4ef1a96c8450 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -67,24 +67,6 @@
#define IPA_MHI_HOST_ADDR_COND(addr) \
((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
-/**
- * enum ipa3_mhi_burst_mode - MHI channel burst mode state
- *
- * Values are according to MHI specification
- * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
- * disabled for SW channels
- * @IPA_MHI_BURST_MODE_RESERVED:
- * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
- * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
- *
- */
-enum ipa3_mhi_burst_mode {
- IPA_MHI_BURST_MODE_DEFAULT,
- IPA_MHI_BURST_MODE_RESERVED,
- IPA_MHI_BURST_MODE_DISABLE,
- IPA_MHI_BURST_MODE_ENABLE,
-};
-
enum ipa3_mhi_polling_mode {
IPA_MHI_POLLING_MODE_DB_MODE,
IPA_MHI_POLLING_MODE_POLL_MODE,
@@ -224,7 +206,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
/* allocate event ring only for the first time pipe is connected */
if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
- IPA_MHI_DBG("allocating event ring\n");
memset(&ev_props, 0, sizeof(ev_props));
ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
ev_props.intr = GSI_INTR_MSI;
@@ -247,6 +228,8 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
ev_props.user_data = params->channel;
ev_props.evchid_valid = true;
ev_props.evchid = params->evchid;
+ IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
+ ipa_ep_idx, ev_props.evchid);
res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_evt_ring_hdl);
if (res) {
@@ -260,6 +243,10 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
*params->cached_gsi_evt_ring_hdl =
ep->gsi_evt_ring_hdl;
+ } else {
+ IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
+ *params->cached_gsi_evt_ring_hdl);
+ ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
}
memset(&ch_props, 0, sizeof(ch_props));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 534a37d906fc..d68a2ce3c041 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -169,7 +169,7 @@ static int ipa3_handle_install_filter_rule_req(void *req_h, void *req)
resp.rule_id_len = MAX_NUM_Q6_RULE;
IPAWANERR("installed (%d) max Q6-UL rules ",
MAX_NUM_Q6_RULE);
- IPAWANERR("but modem gives total (%d)\n",
+ IPAWANERR("but modem gives total (%u)\n",
rule_req->filter_spec_ex_list_len);
} else {
resp.rule_id_len =
@@ -592,7 +592,7 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
if (req->filter_spec_ex_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
} else {
- IPAWANDBG("IPACM pass %d rules to Q6\n",
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
req->filter_spec_ex_list_len);
}
@@ -725,6 +725,11 @@ int ipa3_qmi_filter_notify_send(
IPAWANERR(" delete UL filter rule for pipe %d\n",
req->source_pipe_index);
return -EINVAL;
+ } else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
+ IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+ req->source_pipe_index,
+ req->rule_id_len);
+ return -EINVAL;
}
/* cache the qmi_filter_request */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e0f32bdcbb3d..ab4911462ddf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -547,7 +547,8 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
}
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- phys = page_to_phys(sg_page(sg));
+ /* directly get sg_tbl PA from wlan-driver */
+ phys = sg->dma_address;
len = PAGE_ALIGN(sg->offset + sg->length);
ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
@@ -647,7 +648,8 @@ static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
wdi_res[res_idx].nents = sgt->nents;
wdi_res[res_idx].valid = true;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- wdi_res[res_idx].res[i].pa = page_to_phys(sg_page(sg));
+ /* directly get sg_tbl PA from wlan */
+ wdi_res[res_idx].res[i].pa = sg->dma_address;
wdi_res[res_idx].res[i].iova = curr_iova;
wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
sg->length);
@@ -811,16 +813,12 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
in->u.ul.rdy_ring_size);
IPADBG("rx_ring_rp_pa=0x%pa\n",
&in->u.ul.rdy_ring_rp_pa);
- IPADBG("rdy_ring_rp value =%d\n",
- *in->u.ul.rdy_ring_rp_va);
IPADBG("rx_comp_ring_base_pa=0x%pa\n",
&in->u.ul.rdy_comp_ring_base_pa);
IPADBG("rx_comp_ring_size=%d\n",
in->u.ul.rdy_comp_ring_size);
IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
&in->u.ul.rdy_comp_ring_wp_pa);
- IPADBG("rx_comp_ring_wp value=%d\n",
- *in->u.ul.rdy_comp_ring_wp_va);
ipa3_ctx->uc_ctx.rdy_ring_base_pa =
in->u.ul.rdy_ring_base_pa;
ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
@@ -833,21 +831,34 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
in->u.ul.rdy_comp_ring_wp_pa;
ipa3_ctx->uc_ctx.rdy_comp_ring_size =
in->u.ul.rdy_comp_ring_size;
- ipa3_ctx->uc_ctx.rdy_ring_rp_va =
- in->u.ul.rdy_ring_rp_va;
- ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va =
- in->u.ul.rdy_comp_ring_wp_va;
+
/* check if the VA is empty */
- if (!in->u.ul.rdy_ring_rp_va && ipa3_ctx->ipa_wdi2) {
- IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n",
- ipa3_ctx->ipa_wdi2);
- goto dma_alloc_fail;
- }
- if (!in->u.ul.rdy_comp_ring_wp_va &&
- ipa3_ctx->ipa_wdi2) {
- IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n",
- ipa3_ctx->ipa_wdi2);
- goto dma_alloc_fail;
+ if (ipa3_ctx->ipa_wdi2) {
+ if (in->smmu_enabled) {
+ if (!in->u.ul_smmu.rdy_ring_rp_va ||
+ !in->u.ul_smmu.rdy_comp_ring_wp_va)
+ goto dma_alloc_fail;
+ } else {
+ if (!in->u.ul.rdy_ring_rp_va ||
+ !in->u.ul.rdy_comp_ring_wp_va)
+ goto dma_alloc_fail;
+ }
+ IPADBG("rdy_ring_rp value =%d\n",
+ in->smmu_enabled ?
+ *in->u.ul_smmu.rdy_ring_rp_va :
+ *in->u.ul.rdy_ring_rp_va);
+ IPADBG("rx_comp_ring_wp value=%d\n",
+ in->smmu_enabled ?
+ *in->u.ul_smmu.rdy_comp_ring_wp_va :
+ *in->u.ul.rdy_comp_ring_wp_va);
+ ipa3_ctx->uc_ctx.rdy_ring_rp_va =
+ in->smmu_enabled ?
+ in->u.ul_smmu.rdy_ring_rp_va :
+ in->u.ul.rdy_ring_rp_va;
+ ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va =
+ in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_wp_va :
+ in->u.ul.rdy_comp_ring_wp_va;
}
}
@@ -894,6 +905,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
in->smmu_enabled,
in->u.dl_smmu.ce_ring_size,
in->u.dl.ce_ring_size);
+ /* WA: wlan passed ce_ring sg_table PA directly */
if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
in->smmu_enabled,
in->u.dl.ce_ring_base_pa,
@@ -933,7 +945,9 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
tx_2->ce_ring_doorbell_pa_hi,
tx_2->ce_ring_doorbell_pa);
- tx_2->num_tx_buffers = in->u.dl.num_tx_buffers;
+ tx_2->num_tx_buffers = in->smmu_enabled ?
+ in->u.dl_smmu.num_tx_buffers :
+ in->u.dl.num_tx_buffers;
tx_2->ipa_pipe_number = ipa_ep_idx;
} else {
tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 395cf62c9728..a21eb9c1530b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3349,6 +3349,12 @@ void ipa3_set_resorce_groups_min_max_limits(void)
}
}
+ /* move resource group configuration from HLOS to TZ */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+ IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
+ return;
+ }
+
IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
ipa3_configure_rx_hps_clients(0, true);
@@ -3604,3 +3610,27 @@ int ipa3_load_fws(const struct firmware *firmware)
IPADBG("IPA FWs (GSI FW, HPS and DPS) were loaded\n");
return 0;
}
+
+/**
+ * ipa3_is_msm_device() - Is the running device a MSM or MDM?
+ * Determine according to IPA version
+ *
+ * Return value: true if MSM, false if MDM
+ *
+ */
+bool ipa3_is_msm_device(void)
+{
+ switch (ipa3_ctx->ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_5:
+ return false;
+ case IPA_HW_v3_1:
+ case IPA_HW_v3_5_1:
+ return true;
+ default:
+ IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
+ ipa_assert();
+ }
+
+ return false;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index a3345d7ac305..cef9f7ef3fe4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -53,7 +53,6 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = {
__stringify(IPA_IRQ_EE_UC_n),
__stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
__stringify(IPA_ENDP_INIT_HDR_METADATA_n),
- __stringify(IPA_ENABLE_GSI),
__stringify(IPA_ENDP_INIT_RSRC_GRP_n),
__stringify(IPA_SHARED_MEM_SIZE),
__stringify(IPA_SRAM_DIRECT_ACCESS_n),
@@ -80,6 +79,7 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = {
__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
__stringify(IPA_QSB_MAX_WRITES),
__stringify(IPA_QSB_MAX_READS),
+ __stringify(IPA_TX_CFG),
};
static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -136,6 +136,29 @@ static void ipareg_construct_rx_hps_clients_depth0(
IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
}
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
static void ipareg_construct_rsrg_grp_xy(
enum ipahal_reg_name reg, const void *fields, u32 *val)
{
@@ -156,6 +179,31 @@ static void ipareg_construct_rsrg_grp_xy(
IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
}
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+ /* DST_23 register has only X fields at ipa V3_5 */
+ if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+ return;
+
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
static void ipareg_construct_hash_cfg_n(
enum ipahal_reg_name reg, const void *fields, u32 *val)
{
@@ -423,13 +471,19 @@ static void ipareg_construct_debug_cnt_ctrl_n(
IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
- IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
- IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
- IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
-
- IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
- IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
- IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK);
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+ );
+ } else {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+ }
}
static void ipareg_parse_shared_mem_size(
@@ -459,6 +513,17 @@ static void ipareg_construct_endp_init_rsrc_grp_n(
IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
}
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
static void ipareg_construct_endp_init_hdr_metadata_n(
enum ipahal_reg_name reg, const void *fields, u32 *val)
{
@@ -824,6 +889,26 @@ static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
}
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_tx_cfg *tx_cfg;
+
+ tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
/*
* struct ipahal_reg_obj - Register H/W information for specific IPA version
* @construct - CB to construct register value from abstracted structure
@@ -955,9 +1040,6 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
ipareg_construct_endp_init_hdr_metadata_n,
ipareg_parse_dummy,
0x0000081c, 0x70},
- [IPA_HW_v3_0][IPA_ENABLE_GSI] = {
- ipareg_construct_dummy, ipareg_parse_dummy,
- 0x5500, 0},
[IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
ipareg_construct_endp_init_rsrc_grp_n,
ipareg_parse_dummy,
@@ -1049,6 +1131,60 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
[IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
ipareg_construct_dummy, ipareg_parse_dummy,
0x00003038, 0x1000},
+
+
+ /* IPAv3.5 */
+ [IPA_HW_v3_5][IPA_TX_CFG] = {
+ ipareg_construct_tx_cfg, ipareg_parse_dummy,
+ 0x000001FC, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002780, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002784, 0},
};
/*
@@ -1334,22 +1470,29 @@ u32 ipahal_aggr_get_max_pkt_limit(void)
IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
}
-
void ipahal_get_aggr_force_close_valmask(int ep_idx,
struct ipahal_reg_valmask *valmask)
{
+ u32 shft;
+ u32 bmsk;
+
if (!valmask) {
IPAHAL_ERR("Input error\n");
return;
}
- IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx,
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT,
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK);
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+ bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+ } else {
+ shft =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+ bmsk =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+ }
- valmask->mask =
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK <<
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+ IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+ valmask->mask = bmsk << shft;
}
void ipahal_get_fltrt_hash_flush_valmask(
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index f1acab2e2db6..8fb9040360ea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -56,7 +56,6 @@ enum ipahal_reg_name {
IPA_IRQ_EE_UC_n,
IPA_ENDP_INIT_HDR_METADATA_MASK_n,
IPA_ENDP_INIT_HDR_METADATA_n,
- IPA_ENABLE_GSI,
IPA_ENDP_INIT_RSRC_GRP_n,
IPA_SHARED_MEM_SIZE,
IPA_SRAM_DIRECT_ACCESS_n,
@@ -83,6 +82,7 @@ enum ipahal_reg_name {
IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
IPA_QSB_MAX_WRITES,
IPA_QSB_MAX_READS,
+ IPA_TX_CFG,
IPA_REG_MAX,
};
@@ -116,7 +116,7 @@ struct ipahal_reg_endp_init_route {
};
/*
- * struct ipahal_reg_endp_init_rsrc_grp - PA_ENDP_INIT_RSRC_GRP_n register
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
* @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
* index is for source-resource-group. If destination ENPD, index is
* for destination-resoruce-group.
@@ -231,7 +231,8 @@ enum ipahal_reg_dbg_cnt_type {
* @src_pipe - Specific Pipe to match. If FF, no need to match
* specific pipe
* @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
- * src_pipe
+ * src_pipe. Starting at IPA V3_5,
+ * no support on Global Rule. This field will be ignored.
* @rule_idx - Rule index. Irrelevant for type General
*/
struct ipahal_reg_debug_cnt_ctrl {
@@ -240,7 +241,7 @@ struct ipahal_reg_debug_cnt_ctrl {
bool product;
u8 src_pipe;
bool rule_idx_pipe_rule;
- u8 rule_idx;
+ u16 rule_idx;
};
/*
@@ -317,6 +318,18 @@ struct ipahal_reg_qcncm {
};
/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @prefetch_almost_empty_size: Prefetch almost empty size
+ */
+struct ipahal_reg_tx_cfg {
+ bool tx0_prefetch_disable;
+ bool tx1_prefetch_disable;
+ u16 prefetch_almost_empty_size;
+};
+
+/*
* ipahal_reg_name_str() - returns string that represent the register
* @reg_name: [in] register name
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 2ca0dcd4d6cb..1606a2ff41c7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -89,8 +89,10 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
/* IPA_AGGR_FORCE_CLOSE register */
-#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
-#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
/* IPA_ENDP_INIT_ROUTE_n register */
#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
@@ -177,6 +179,8 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
/* IPA_ENDP_INIT_RSRC_GRP_n register */
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
/* IPA_SHARED_MEM_SIZE register */
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
@@ -188,6 +192,7 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
@@ -271,9 +276,20 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+ (0xF << (8 * (n)))
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
/* IPA_QSB_MAX_WRITES register */
@@ -288,5 +304,12 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index 62bb9a783c89..e1686e608906 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
new file mode 100644
index 000000000000..5a41d641de4f
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -0,0 +1,3306 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ipa_mhi.h>
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../../gsi/gsi.h"
+#include "../../gsi/gsi_reg.h"
+#include "ipa_ut_framework.h"
+
+#define IPA_MHI_TEST_NUM_CHANNELS 8
+#define IPA_MHI_TEST_NUM_EVENT_RINGS 8
+#define IPA_MHI_TEST_FIRST_CHANNEL_ID 100
+#define IPA_MHI_TEST_FIRST_EVENT_RING_ID 100
+#define IPA_MHI_TEST_LAST_CHANNEL_ID \
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS - 1)
+#define IPA_MHI_TEST_LAST_EVENT_RING_ID \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID + IPA_MHI_TEST_NUM_EVENT_RINGS - 1)
+#define IPA_MHI_TEST_MAX_DATA_BUF_SIZE 1500
+#define IPA_MHI_TEST_SEQ_TYPE_DMA 0x00000000
+
+#define IPA_MHI_TEST_LOOP_NUM 5
+#define IPA_MHI_RUN_TEST_UNIT_IN_LOOP(test_unit, rc, args...) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < IPA_MHI_TEST_LOOP_NUM; __i++) { \
+ IPA_UT_LOG(#test_unit " START iter %d\n", __i); \
+ rc = test_unit(args); \
+ if (!rc) \
+ continue; \
+ IPA_UT_LOG(#test_unit " failed %d\n", rc); \
+ break; \
+ } \
+ } while (0)
+
+/**
+ * check for MSI interrupt for one or both channels:
+ * OUT channel MSI my be missed as it
+ * will be overwritten by the IN channel MSI
+ */
+#define IPA_MHI_TEST_CHECK_MSI_INTR(__both, __timeout) \
+ do { \
+ int i; \
+ for (i = 0; i < 20; i++) { \
+ if (*((u32 *)test_mhi_ctx->msi.base) == \
+ (0x10000000 | \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1))) { \
+ __timeout = false; \
+ break; \
+ } \
+ if (__both && (*((u32 *)test_mhi_ctx->msi.base) == \
+ (0x10000000 | \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID)))) { \
+ /* sleep to be sure IN MSI is generated */ \
+ msleep(20); \
+ __timeout = false; \
+ break; \
+ } \
+ msleep(20); \
+ } \
+ } while (0)
+
+static DECLARE_COMPLETION(mhi_test_ready_comp);
+static DECLARE_COMPLETION(mhi_test_wakeup_comp);
+
+/**
+ * enum ipa_mhi_ring_elements_type - MHI ring elements types.
+ */
+enum ipa_mhi_ring_elements_type {
+ IPA_MHI_RING_ELEMENT_NO_OP = 1,
+ IPA_MHI_RING_ELEMENT_TRANSFER = 2
+};
+
+/**
+ * enum ipa_mhi_channel_direction - MHI channel directions
+ */
+enum ipa_mhi_channel_direction {
+ IPA_MHI_OUT_CHAHNNEL = 1,
+ IPA_MHI_IN_CHAHNNEL = 2,
+};
+
+/**
+ * struct ipa_mhi_channel_context_array - MHI Channel context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_channel_context_array {
+ u32 chstate:8; /*0-7*/
+ u32 brsmode:2; /*8-9*/
+ u32 pollcfg:6; /*10-15*/
+ u32 reserved:16; /*16-31*/
+ u32 chtype; /*channel type (inbound/outbound)*/
+ u32 erindex; /*event ring index*/
+ u64 rbase; /*ring base address in the host addr spc*/
+ u64 rlen; /*ring length in bytes*/
+ u64 rp; /*read pointer in the host system addr spc*/
+ u64 wp; /*write pointer in the host system addr spc*/
+} __packed;
+
+/**
+ * struct ipa_mhi_event_context_array - MGI event ring context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_context_array {
+ u16 intmodc;
+ u16 intmodt;/* Interrupt moderation timer (in microseconds) */
+ u32 ertype;
+ u32 msivec; /* MSI vector for interrupt (MSI data)*/
+ u64 rbase; /* ring base address in host address space*/
+ u64 rlen; /* ring length in bytes*/
+ u64 rp; /* read pointer in the host system address space*/
+ u64 wp; /* write pointer in the host system address space*/
+} __packed;
+
+/**
+ *
+ * struct ipa_mhi_mmio_register_set - MHI configuration registers,
+ * control registers, status registers, pointers to doorbell arrays,
+ * pointers to channel and event context arrays.
+ *
+ * The structure is defined in mhi spec (register names are taken from there).
+ * Only values accessed by HWP or test are documented
+ */
+struct ipa_mhi_mmio_register_set {
+ u32 mhireglen;
+ u32 reserved_08_04;
+ u32 mhiver;
+ u32 reserved_10_0c;
+ struct mhicfg {
+ u8 nch;
+ u8 reserved_15_8;
+ u8 ner;
+ u8 reserved_31_23;
+ } __packed mhicfg;
+
+ u32 reserved_18_14;
+ u32 chdboff;
+ u32 reserved_20_1C;
+ u32 erdboff;
+ u32 reserved_28_24;
+ u32 bhioff;
+ u32 reserved_30_2C;
+ u32 debugoff;
+ u32 reserved_38_34;
+
+ struct mhictrl {
+ u32 rs : 1;
+ u32 reset : 1;
+ u32 reserved_7_2 : 6;
+ u32 mhistate : 8;
+ u32 reserved_31_16 : 16;
+ } __packed mhictrl;
+
+ u64 reserved_40_3c;
+ u32 reserved_44_40;
+
+ struct mhistatus {
+ u32 ready : 1;
+ u32 reserved_3_2 : 1;
+ u32 syserr : 1;
+ u32 reserved_7_3 : 5;
+ u32 mhistate : 8;
+ u32 reserved_31_16 : 16;
+ } __packed mhistatus;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the handle for
+ * the buffer of channel context array
+ */
+ u32 reserved_50_4c;
+
+ u32 mhierror;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the handle for
+ * the buffer of event ring context array
+ */
+ u32 reserved_58_54;
+
+ /**
+ * 64-bit pointer to the channel context array in the host memory space
+ * host sets the pointer to the channel context array during
+ * initialization.
+ */
+ u64 ccabap;
+ /**
+ * 64-bit pointer to the event context array in the host memory space
+ * host sets the pointer to the event context array during
+ * initialization
+ */
+ u64 ecabap;
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of virtual address
+ * for the buffer of channel context array
+ */
+ u64 crcbap;
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of virtual address
+ * for the buffer of event ring context array
+ */
+ u64 crdb;
+
+ u64 reserved_80_78;
+
+ struct mhiaddr {
+ /**
+ * Base address (64-bit) of the memory region in
+ * the host address space where the MHI control
+ * data structures are allocated by the host,
+ * including channel context array, event context array,
+ * and rings.
+ * The device uses this information to set up its internal
+ * address translation tables.
+ * value must be aligned to 4 Kbytes.
+ */
+ u64 mhicrtlbase;
+ /**
+ * Upper limit address (64-bit) of the memory region in
+ * the host address space where the MHI control
+ * data structures are allocated by the host.
+ * The device uses this information to setup its internal
+ * address translation tables.
+ * The most significant 32 bits of MHICTRLBASE and
+ * MHICTRLLIMIT registers must be equal.
+ */
+ u64 mhictrllimit;
+ u64 reserved_18_10;
+ /**
+ * Base address (64-bit) of the memory region in
+ * the host address space where the MHI data buffers
+ * are allocated by the host.
+ * The device uses this information to setup its
+ * internal address translation tables.
+ * value must be aligned to 4 Kbytes.
+ */
+ u64 mhidatabase;
+ /**
+ * Upper limit address (64-bit) of the memory region in
+ * the host address space where the MHI data buffers
+ * are allocated by the host.
+ * The device uses this information to setup its
+ * internal address translation tables.
+ * The most significant 32 bits of MHIDATABASE and
+ * MHIDATALIMIT registers must be equal.
+ */
+ u64 mhidatalimit;
+ u64 reserved_30_28;
+ } __packed mhiaddr;
+
+} __packed;
+
+/**
+ * struct ipa_mhi_event_ring_element - MHI Event ring element
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_ring_element {
+ /**
+ * pointer to ring element that generated event in
+ * the host system address space
+ */
+ u64 ptr;
+ union {
+ struct {
+ u32 len : 24;
+ u32 code : 8;
+ } __packed bits;
+ u32 dword;
+ } __packed dword_8;
+ u16 reserved;
+ u8 type;
+ u8 chid;
+} __packed;
+
+/**
+* struct ipa_mhi_transfer_ring_element - MHI Transfer ring element
+*
+* mapping is taken from MHI spec
+*/
+struct ipa_mhi_transfer_ring_element {
+ u64 ptr; /*pointer to buffer in the host system address space*/
+ u16 len; /*transaction length in bytes*/
+ u16 reserved0;
+ union {
+ struct {
+ u16 chain : 1;
+ u16 reserved_7_1 : 7;
+ u16 ieob : 1;
+ u16 ieot : 1;
+ u16 bei : 1;
+ u16 reserved_15_11 : 5;
+ } __packed bits;
+ u16 word;
+ } __packed word_C;
+ u8 type;
+ u8 reserved1;
+} __packed;
+
+/**
+ * struct ipa_test_mhi_context - MHI test context
+ */
+struct ipa_test_mhi_context {
+ void __iomem *gsi_mmio;
+ struct ipa_mem_buffer msi;
+ struct ipa_mem_buffer ch_ctx_array;
+ struct ipa_mem_buffer ev_ctx_array;
+ struct ipa_mem_buffer mmio_buf;
+ struct ipa_mem_buffer xfer_ring_bufs[IPA_MHI_TEST_NUM_CHANNELS];
+ struct ipa_mem_buffer ev_ring_bufs[IPA_MHI_TEST_NUM_EVENT_RINGS];
+ struct ipa_mem_buffer in_buffer;
+ struct ipa_mem_buffer out_buffer;
+ u32 prod_hdl;
+ u32 cons_hdl;
+};
+
+static struct ipa_test_mhi_context *test_mhi_ctx;
+
+static void ipa_mhi_test_cb(void *priv,
+ enum ipa_mhi_event_type event, unsigned long data)
+{
+ IPA_UT_DBG("Entry\n");
+
+ if (event == IPA_MHI_EVENT_DATA_AVAILABLE)
+ complete_all(&mhi_test_wakeup_comp);
+ else if (event == IPA_MHI_EVENT_READY)
+ complete_all(&mhi_test_ready_comp);
+ else
+ WARN_ON(1);
+}
+
+static void ipa_test_mhi_free_mmio_space(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ if (!test_mhi_ctx)
+ return;
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->mmio_buf.size,
+ test_mhi_ctx->mmio_buf.base,
+ test_mhi_ctx->mmio_buf.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ev_ctx_array.size,
+ test_mhi_ctx->ev_ctx_array.base,
+ test_mhi_ctx->ev_ctx_array.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ch_ctx_array.size,
+ test_mhi_ctx->ch_ctx_array.base,
+ test_mhi_ctx->ch_ctx_array.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->msi.size,
+ test_mhi_ctx->msi.base, test_mhi_ctx->msi.phys_base);
+}
+
+static int ipa_test_mhi_alloc_mmio_space(void)
+{
+ int rc = 0;
+ struct ipa_mem_buffer *msi;
+ struct ipa_mem_buffer *ch_ctx_array;
+ struct ipa_mem_buffer *ev_ctx_array;
+ struct ipa_mem_buffer *mmio_buf;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+
+ IPA_UT_DBG("Entry\n");
+
+ msi = &test_mhi_ctx->msi;
+ ch_ctx_array = &test_mhi_ctx->ch_ctx_array;
+ ev_ctx_array = &test_mhi_ctx->ev_ctx_array;
+ mmio_buf = &test_mhi_ctx->mmio_buf;
+
+ /* Allocate MSI */
+ msi->size = 4;
+ msi->base = dma_alloc_coherent(ipa3_ctx->pdev, msi->size,
+ &msi->phys_base, GFP_KERNEL);
+ if (!msi->base) {
+ IPA_UT_ERR("no mem for msi\n");
+ return -ENOMEM;
+ }
+
+ IPA_UT_DBG("msi: base 0x%pK phys_addr 0x%pad size %d\n",
+ msi->base, &msi->phys_base, msi->size);
+
+ /* allocate buffer for channel context */
+ ch_ctx_array->size = sizeof(struct ipa_mhi_channel_context_array) *
+ IPA_MHI_TEST_NUM_CHANNELS;
+ ch_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ch_ctx_array->size, &ch_ctx_array->phys_base, GFP_KERNEL);
+ if (!ch_ctx_array->base) {
+ IPA_UT_ERR("no mem for ch ctx array\n");
+ rc = -ENOMEM;
+ goto fail_free_msi;
+ }
+ IPA_UT_DBG("channel ctx array: base 0x%pK phys_addr %pad size %d\n",
+ ch_ctx_array->base, &ch_ctx_array->phys_base,
+ ch_ctx_array->size);
+
+ /* allocate buffer for event context */
+ ev_ctx_array->size = sizeof(struct ipa_mhi_event_context_array) *
+ IPA_MHI_TEST_NUM_EVENT_RINGS;
+ ev_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ev_ctx_array->size, &ev_ctx_array->phys_base, GFP_KERNEL);
+ if (!ev_ctx_array->base) {
+ IPA_UT_ERR("no mem for ev ctx array\n");
+ rc = -ENOMEM;
+ goto fail_free_ch_ctx_arr;
+ }
+ IPA_UT_DBG("event ctx array: base 0x%pK phys_addr %pad size %d\n",
+ ev_ctx_array->base, &ev_ctx_array->phys_base,
+ ev_ctx_array->size);
+
+ /* allocate buffer for mmio */
+ mmio_buf->size = sizeof(struct ipa_mhi_mmio_register_set);
+ mmio_buf->base = dma_alloc_coherent(ipa3_ctx->pdev, mmio_buf->size,
+ &mmio_buf->phys_base, GFP_KERNEL);
+ if (!mmio_buf->base) {
+ IPA_UT_ERR("no mem for mmio buf\n");
+ rc = -ENOMEM;
+ goto fail_free_ev_ctx_arr;
+ }
+ IPA_UT_DBG("mmio buffer: base 0x%pK phys_addr %pad size %d\n",
+ mmio_buf->base, &mmio_buf->phys_base, mmio_buf->size);
+
+ /* initlize table */
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio_buf->base;
+
+ /**
+ * 64-bit pointer to the channel context array in the host memory space;
+ * Host sets the pointer to the channel context array
+ * during initialization.
+ */
+ p_mmio->ccabap = (u32)ch_ctx_array->phys_base -
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID *
+ sizeof(struct ipa_mhi_channel_context_array));
+ IPA_UT_DBG("pMmio->ccabap 0x%llx\n", p_mmio->ccabap);
+
+ /**
+ * 64-bit pointer to the event context array in the host memory space;
+ * Host sets the pointer to the event context array
+ * during initialization
+ */
+ p_mmio->ecabap = (u32)ev_ctx_array->phys_base -
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID *
+ sizeof(struct ipa_mhi_event_context_array));
+ IPA_UT_DBG("pMmio->ecabap 0x%llx\n", p_mmio->ecabap);
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of
+ * virtual address for the buffer of channel context array
+ */
+ p_mmio->crcbap = (unsigned long)ch_ctx_array->base;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of
+ * virtual address for the buffer of channel context array
+ */
+ p_mmio->crdb = (unsigned long)ev_ctx_array->base;
+
+ /* test is running only on device. no need to translate addresses */
+ p_mmio->mhiaddr.mhicrtlbase = 0x04;
+ p_mmio->mhiaddr.mhictrllimit = 0xFFFFFFFF;
+ p_mmio->mhiaddr.mhidatabase = 0x04;
+ p_mmio->mhiaddr.mhidatalimit = 0xFFFFFFFF;
+
+ return rc;
+
+fail_free_ev_ctx_arr:
+ dma_free_coherent(ipa3_ctx->pdev, ev_ctx_array->size,
+ ev_ctx_array->base, ev_ctx_array->phys_base);
+ ev_ctx_array->base = NULL;
+fail_free_ch_ctx_arr:
+ dma_free_coherent(ipa3_ctx->pdev, ch_ctx_array->size,
+ ch_ctx_array->base, ch_ctx_array->phys_base);
+ ch_ctx_array->base = NULL;
+fail_free_msi:
+ dma_free_coherent(ipa3_ctx->pdev, msi->size, msi->base,
+ msi->phys_base);
+ msi->base = NULL;
+ return rc;
+}
+
+static void ipa_mhi_test_destroy_channel_context(
+ struct ipa_mem_buffer transfer_ring_bufs[],
+ struct ipa_mem_buffer event_ring_bufs[],
+ u8 channel_id,
+ u8 event_ring_id)
+{
+ u32 ev_ring_idx;
+ u32 ch_idx;
+
+ IPA_UT_DBG("Entry\n");
+
+ if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+ (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+ IPA_UT_ERR("channal_id invalid %d\n", channel_id);
+ return;
+ }
+
+ if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+ (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+ IPA_UT_ERR("event_ring_id invalid %d\n", event_ring_id);
+ return;
+ }
+
+ ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ if (transfer_ring_bufs[ch_idx].base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ transfer_ring_bufs[ch_idx].size,
+ transfer_ring_bufs[ch_idx].base,
+ transfer_ring_bufs[ch_idx].phys_base);
+ transfer_ring_bufs[ch_idx].base = NULL;
+ }
+
+ if (event_ring_bufs[ev_ring_idx].base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ event_ring_bufs[ev_ring_idx].base,
+ event_ring_bufs[ev_ring_idx].phys_base);
+ event_ring_bufs[ev_ring_idx].base = NULL;
+ }
+}
+
+static int ipa_mhi_test_config_channel_context(
+ struct ipa_mem_buffer *mmio,
+ struct ipa_mem_buffer transfer_ring_bufs[],
+ struct ipa_mem_buffer event_ring_bufs[],
+ u8 channel_id,
+ u8 event_ring_id,
+ u16 transfer_ring_size,
+ u16 event_ring_size,
+ u8 ch_type)
+{
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_channels;
+ struct ipa_mhi_event_context_array *p_events;
+ u32 ev_ring_idx;
+ u32 ch_idx;
+
+ IPA_UT_DBG("Entry\n");
+
+ if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+ (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+ IPA_UT_DBG("channal_id invalid %d\n", channel_id);
+ return -EFAULT;
+ }
+
+ if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+ (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+ IPA_UT_DBG("event_ring_id invalid %d\n", event_ring_id);
+ return -EFAULT;
+ }
+
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+ p_channels =
+ (struct ipa_mhi_channel_context_array *)
+ ((unsigned long)p_mmio->crcbap);
+ p_events = (struct ipa_mhi_event_context_array *)
+ ((unsigned long)p_mmio->crdb);
+
+ IPA_UT_DBG("p_mmio: %pK p_channels: %pK p_events: %pK\n",
+ p_mmio, p_channels, p_events);
+
+ ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ IPA_UT_DBG("ch_idx: %u ev_ring_idx: %u\n", ch_idx, ev_ring_idx);
+ if (transfer_ring_bufs[ch_idx].base) {
+ IPA_UT_ERR("ChannelId %d is already allocated\n", channel_id);
+ return -EFAULT;
+ }
+
+ /* allocate and init event ring if needed */
+ if (!event_ring_bufs[ev_ring_idx].base) {
+ IPA_UT_LOG("Configuring event ring...\n");
+ event_ring_bufs[ev_ring_idx].size =
+ event_ring_size *
+ sizeof(struct ipa_mhi_event_ring_element);
+ event_ring_bufs[ev_ring_idx].base =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ &event_ring_bufs[ev_ring_idx].phys_base,
+ GFP_KERNEL);
+ if (!event_ring_bufs[ev_ring_idx].base) {
+ IPA_UT_ERR("no mem for ev ring buf\n");
+ return -ENOMEM;
+ }
+ p_events[ev_ring_idx].intmodc = 1;
+ p_events[ev_ring_idx].intmodt = 0;
+ p_events[ev_ring_idx].msivec = event_ring_id;
+ p_events[ev_ring_idx].rbase =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ p_events[ev_ring_idx].rlen =
+ event_ring_bufs[ev_ring_idx].size;
+ p_events[ev_ring_idx].rp =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ p_events[ev_ring_idx].wp =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ } else {
+ IPA_UT_LOG("Skip configuring event ring - already done\n");
+ }
+
+ transfer_ring_bufs[ch_idx].size =
+ transfer_ring_size *
+ sizeof(struct ipa_mhi_transfer_ring_element);
+ transfer_ring_bufs[ch_idx].base =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ transfer_ring_bufs[ch_idx].size,
+ &transfer_ring_bufs[ch_idx].phys_base,
+ GFP_KERNEL);
+ if (!transfer_ring_bufs[ch_idx].base) {
+ IPA_UT_ERR("no mem for xfer ring buf\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ event_ring_bufs[ev_ring_idx].base,
+ event_ring_bufs[ev_ring_idx].phys_base);
+ event_ring_bufs[ev_ring_idx].base = NULL;
+ return -ENOMEM;
+ }
+
+ p_channels[ch_idx].erindex = event_ring_id;
+ p_channels[ch_idx].rbase = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].rlen = transfer_ring_bufs[ch_idx].size;
+ p_channels[ch_idx].rp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].wp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].chtype = ch_type;
+ p_channels[ch_idx].brsmode = IPA_MHI_BURST_MODE_DEFAULT;
+ p_channels[ch_idx].pollcfg = 0;
+
+ return 0;
+}
+
+static void ipa_mhi_test_destroy_data_structures(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ /* Destroy OUT data buffer */
+ if (test_mhi_ctx->out_buffer.base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->out_buffer.size,
+ test_mhi_ctx->out_buffer.base,
+ test_mhi_ctx->out_buffer.phys_base);
+ test_mhi_ctx->out_buffer.base = NULL;
+ }
+
+ /* Destroy IN data buffer */
+ if (test_mhi_ctx->in_buffer.base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->in_buffer.size,
+ test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->in_buffer.phys_base);
+ test_mhi_ctx->in_buffer.base = NULL;
+ }
+
+ /* Destroy IN channel ctx */
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+
+ /* Destroy OUT channel ctx */
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+}
+
+static int ipa_mhi_test_setup_data_structures(void)
+{
+ int rc = 0;
+
+ IPA_UT_DBG("Entry\n");
+
+ /* Config OUT Channel Context */
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_ERR("Fail to config OUT ch ctx - err %d", rc);
+ return rc;
+ }
+
+ /* Config IN Channel Context */
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_ERR("Fail to config IN ch ctx - err %d", rc);
+ goto fail_destroy_out_ch_ctx;
+ }
+
+ /* allocate IN data buffer */
+ test_mhi_ctx->in_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+ test_mhi_ctx->in_buffer.base = dma_alloc_coherent(
+ ipa3_ctx->pdev, test_mhi_ctx->in_buffer.size,
+ &test_mhi_ctx->in_buffer.phys_base, GFP_KERNEL);
+ if (!test_mhi_ctx->in_buffer.base) {
+ IPA_UT_ERR("no mem for In data buffer\n");
+ rc = -ENOMEM;
+ goto fail_destroy_in_ch_ctx;
+ }
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ /* allocate OUT data buffer */
+ test_mhi_ctx->out_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+ test_mhi_ctx->out_buffer.base = dma_alloc_coherent(
+ ipa3_ctx->pdev, test_mhi_ctx->out_buffer.size,
+ &test_mhi_ctx->out_buffer.phys_base, GFP_KERNEL);
+ if (!test_mhi_ctx->out_buffer.base) {
+ IPA_UT_ERR("no mem for Out data buffer\n");
+ rc = -EFAULT;
+ goto fail_destroy_in_data_buf;
+ }
+ memset(test_mhi_ctx->out_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ return 0;
+
+fail_destroy_in_data_buf:
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->in_buffer.size,
+ test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->in_buffer.phys_base);
+ test_mhi_ctx->in_buffer.base = NULL;
+fail_destroy_in_ch_ctx:
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+fail_destroy_out_ch_ctx:
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+ return 0;
+}
+
+/**
+ * ipa_test_mhi_suite_setup() - Suite setup function
+ */
+static int ipa_test_mhi_suite_setup(void **ppriv)
+{
+ int rc = 0;
+
+ IPA_UT_DBG("Start Setup\n");
+
+ if (!gsi_ctx) {
+ IPA_UT_ERR("No GSI ctx\n");
+ return -EINVAL;
+ }
+
+ if (!ipa3_ctx) {
+ IPA_UT_ERR("No IPA ctx\n");
+ return -EINVAL;
+ }
+
+ test_mhi_ctx = kzalloc(sizeof(struct ipa_test_mhi_context),
+ GFP_KERNEL);
+ if (!test_mhi_ctx) {
+ IPA_UT_ERR("failed allocated ctx\n");
+ return -ENOMEM;
+ }
+
+ test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
+ gsi_ctx->per.size);
+ if (!test_mhi_ctx) {
+ IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
+ gsi_ctx->per.size);
+ rc = -EFAULT;
+ goto fail_free_ctx;
+ }
+
+ rc = ipa_test_mhi_alloc_mmio_space();
+ if (rc) {
+ IPA_UT_ERR("failed to alloc mmio space");
+ goto fail_iounmap;
+ }
+
+ rc = ipa_mhi_test_setup_data_structures();
+ if (rc) {
+ IPA_UT_ERR("failed to setup data structures");
+ goto fail_free_mmio_spc;
+ }
+
+ *ppriv = test_mhi_ctx;
+ return 0;
+
+fail_free_mmio_spc:
+ ipa_test_mhi_free_mmio_space();
+fail_iounmap:
+ iounmap(test_mhi_ctx->gsi_mmio);
+fail_free_ctx:
+ kfree(test_mhi_ctx);
+ test_mhi_ctx = NULL;
+ return rc;
+}
+
+/**
+ * ipa_test_mhi_suite_teardown() - Suite teardown function
+ */
+static int ipa_test_mhi_suite_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+
+ if (!test_mhi_ctx)
+ return 0;
+
+ ipa_mhi_test_destroy_data_structures();
+ ipa_test_mhi_free_mmio_space();
+ iounmap(test_mhi_ctx->gsi_mmio);
+ kfree(test_mhi_ctx);
+ test_mhi_ctx = NULL;
+
+ return 0;
+}
+
+/**
+ * ipa_mhi_test_initialize_driver() - MHI init and possibly start and connect
+ *
+ * To be run during tests
+ * 1. MHI init (Ready state)
+ * 2. Conditional MHO start and connect (M0 state)
+ */
+static int ipa_mhi_test_initialize_driver(bool skip_start_and_conn)
+{
+ int rc = 0;
+ struct ipa_mhi_init_params init_params;
+ struct ipa_mhi_start_params start_params;
+ struct ipa_mhi_connect_params prod_params;
+ struct ipa_mhi_connect_params cons_params;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ bool is_dma;
+ u64 phys_addr;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ /* start IPA MHI */
+ memset(&init_params, 0, sizeof(init_params));
+ init_params.msi.addr_low = test_mhi_ctx->msi.phys_base;
+ init_params.msi.data = 0x10000000;
+ init_params.msi.mask = ~0x10000000;
+ /* MMIO not needed for GSI */
+ init_params.first_ch_idx = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ init_params.first_er_idx = IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+ init_params.assert_bit40 = false;
+ init_params.notify = ipa_mhi_test_cb;
+ init_params.priv = NULL;
+ init_params.test_mode = true;
+
+ rc = ipa_mhi_init(&init_params);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_init failed %d\n", rc);
+ return rc;
+ }
+
+ IPA_UT_LOG("Wait async ready event\n");
+ if (wait_for_completion_timeout(&mhi_test_ready_comp, 10 * HZ) == 0) {
+ IPA_UT_LOG("timeout waiting for READY event");
+ IPA_UT_TEST_FAIL_REPORT("failed waiting for state ready");
+ return -ETIME;
+ }
+
+ if (ipa_mhi_is_using_dma(&is_dma)) {
+ IPA_UT_LOG("is_dma checkign failed. Is MHI loaded?\n");
+ IPA_UT_TEST_FAIL_REPORT("failed checking using dma");
+ return -EPERM;
+ }
+
+ if (is_dma) {
+ IPA_UT_LOG("init ipa_dma\n");
+ rc = ipa_dma_init();
+ if (rc && rc != -EFAULT) {
+ IPA_UT_LOG("ipa_dma_init failed, %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("failed init dma");
+ return rc;
+ }
+ IPA_UT_LOG("enable ipa_dma\n");
+ rc = ipa_dma_enable();
+ if (rc && rc != -EPERM) {
+ IPA_UT_LOG("ipa_dma_enable failed, %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("failed enable dma");
+ return rc;
+ }
+ }
+
+ if (!skip_start_and_conn) {
+ memset(&start_params, 0, sizeof(start_params));
+ start_params.channel_context_array_addr = p_mmio->ccabap;
+ start_params.event_context_array_addr = p_mmio->ecabap;
+
+ IPA_UT_LOG("BEFORE mhi_start\n");
+ rc = ipa_mhi_start(&start_params);
+ if (rc) {
+ IPA_UT_LOG("mhi_start failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail start mhi");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER mhi_start\n");
+
+ phys_addr = p_mmio->ccabap + (IPA_MHI_TEST_FIRST_CHANNEL_ID *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ p_ch_ctx_array, phys_addr,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ memset(&prod_params, 0, sizeof(prod_params));
+ prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+ prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+ prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+ prod_params.sys.ipa_ep_cfg.seq.seq_type =
+ IPA_MHI_TEST_SEQ_TYPE_DMA;
+ prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+ prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ IPA_UT_LOG("BEFORE connect_pipe (PROD): client:%d ch_id:%u\n",
+ prod_params.sys.client, prod_params.channel_id);
+ rc = ipa_mhi_connect_pipe(&prod_params,
+ &test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+ return rc;
+ }
+
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("MHI_PROD: chstate is not RUN chstate:%s\n",
+ ipa_mhi_get_state_str(
+ p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+ return -EFAULT;
+ }
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ p_ch_ctx_array, phys_addr,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ memset(&cons_params, 0, sizeof(cons_params));
+ cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+ cons_params.sys.skip_ep_cfg = true;
+ cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+ IPA_UT_LOG("BEFORE connect_pipe (CONS): client:%d ch_id:%u\n",
+ cons_params.sys.client, cons_params.channel_id);
+ rc = ipa_mhi_connect_pipe(&cons_params,
+ &test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+ return rc;
+ }
+
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("MHI_CONS: chstate is not RUN chstate:%s\n",
+ ipa_mhi_get_state_str(
+ p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. MHI destroy
+ * 2. re-configure the channels
+ */
+static int ipa_mhi_test_destroy(struct ipa_test_mhi_context *ctx)
+{
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ u64 phys_addr;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ int rc;
+
+ IPA_UT_LOG("Entry\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("Input err invalid ctx\n");
+ return -EINVAL;
+ }
+
+ p_mmio = ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = ctx->ch_ctx_array.base +
+ (phys_addr - ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("channel id %d (CONS): chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = ctx->ch_ctx_array.base +
+ (phys_addr - ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("channel id %d (PROD): chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ IPA_UT_LOG("MHI Destroy\n");
+ ipa_mhi_destroy();
+ IPA_UT_LOG("Post MHI Destroy\n");
+
+ ctx->prod_hdl = 0;
+ ctx->cons_hdl = 0;
+
+ dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[1].size,
+ ctx->xfer_ring_bufs[1].base, ctx->xfer_ring_bufs[1].phys_base);
+ ctx->xfer_ring_bufs[1].base = NULL;
+
+ IPA_UT_LOG("config channel context for channel %d (MHI CONS)\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+ rc = ipa_mhi_test_config_channel_context(
+ &ctx->mmio_buf,
+ ctx->xfer_ring_bufs,
+ ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config channel context failed %d, channel %d\n",
+ rc, IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+ IPA_UT_TEST_FAIL_REPORT("fail config CONS channel ctx");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[0].size,
+ ctx->xfer_ring_bufs[0].base, ctx->xfer_ring_bufs[0].phys_base);
+ ctx->xfer_ring_bufs[0].base = NULL;
+
+ IPA_UT_LOG("config channel context for channel %d (MHI PROD)\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID);
+ rc = ipa_mhi_test_config_channel_context(
+ &ctx->mmio_buf,
+ ctx->xfer_ring_bufs,
+ ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config channel context failed %d, channel %d\n",
+ rc, IPA_MHI_TEST_FIRST_CHANNEL_ID);
+ IPA_UT_TEST_FAIL_REPORT("fail config PROD channel ctx");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. Destroy
+ * 2. Initialize (to Ready or M0 states)
+ */
+static int ipa_mhi_test_reset(struct ipa_test_mhi_context *ctx,
+ bool skip_start_and_conn)
+{
+ int rc;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy fail");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(skip_start_and_conn);
+ if (rc) {
+ IPA_UT_LOG("driver init failed skip_start_and_con=%d rc=%d\n",
+ skip_start_and_conn, rc);
+ IPA_UT_TEST_FAIL_REPORT("init fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. disconnect cons channel
+ * 2. config cons channel
+ * 3. disconnect prod channel
+ * 4. config prod channel
+ * 5. connect prod
+ * 6. connect cons
+ */
+static int ipa_mhi_test_channel_reset(void)
+{
+ int rc;
+ struct ipa_mhi_connect_params prod_params;
+ struct ipa_mhi_connect_params cons_params;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+ test_mhi_ctx->cons_hdl);
+ rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("disconnect_pipe failed (CONS) %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe disconnect fail");
+ return -EFAULT;
+ }
+ test_mhi_ctx->cons_hdl = 0;
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->xfer_ring_bufs[1].size,
+ test_mhi_ctx->xfer_ring_bufs[1].base,
+ test_mhi_ctx->xfer_ring_bufs[1].phys_base);
+ test_mhi_ctx->xfer_ring_bufs[1].base = NULL;
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config_channel_context IN failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail config CONS channel context");
+ return -EFAULT;
+ }
+ IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+ test_mhi_ctx->prod_hdl);
+ rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("disconnect_pipe failed (PROD) %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe disconnect fail");
+ return -EFAULT;
+ }
+ test_mhi_ctx->prod_hdl = 0;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->xfer_ring_bufs[0].size,
+ test_mhi_ctx->xfer_ring_bufs[0].base,
+ test_mhi_ctx->xfer_ring_bufs[0].phys_base);
+ test_mhi_ctx->xfer_ring_bufs[0].base = NULL;
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config_channel_context OUT failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ memset(&prod_params, 0, sizeof(prod_params));
+ prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+ prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+ prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+ prod_params.sys.ipa_ep_cfg.seq.seq_type = IPA_MHI_TEST_SEQ_TYPE_DMA;
+ prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+ prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ IPA_UT_LOG("BEFORE connect PROD\n");
+ rc = ipa_mhi_connect_pipe(&prod_params, &test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+ return rc;
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+ return -EFAULT;
+ }
+
+ memset(&cons_params, 0, sizeof(cons_params));
+ cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+ cons_params.sys.skip_ep_cfg = true;
+ cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+ IPA_UT_LOG("BEFORE connect CONS\n");
+ rc = ipa_mhi_connect_pipe(&cons_params, &test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+ return rc;
+ }
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Send data
+ */
+static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
+ struct ipa_mem_buffer xfer_ring_bufs[],
+ struct ipa_mem_buffer ev_ring_bufs[],
+ u8 channel_id,
+ struct ipa_mem_buffer buf_array[],
+ int buf_array_size,
+ bool ieob,
+ bool ieot,
+ bool bei,
+ bool trigger_db)
+{
+ struct ipa_mhi_transfer_ring_element *curr_re;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_channels;
+ struct ipa_mhi_event_context_array *p_events;
+ u32 channel_idx;
+ u32 event_ring_index;
+ u32 wp_ofst;
+ u32 rp_ofst;
+ u32 next_wp_ofst;
+ int i;
+ u32 num_of_ed_to_queue;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+ p_channels = (struct ipa_mhi_channel_context_array *)
+ ((unsigned long)p_mmio->crcbap);
+ p_events = (struct ipa_mhi_event_context_array *)
+ ((unsigned long)p_mmio->crdb);
+
+ if (ieob)
+ num_of_ed_to_queue = buf_array_size;
+ else
+ num_of_ed_to_queue = ieot ? 1 : 0;
+
+ if (channel_id >=
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS) ||
+ channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) {
+ IPA_UT_LOG("Invalud Channel ID %d\n", channel_id);
+ return -EFAULT;
+ }
+
+ channel_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+
+ if (!xfer_ring_bufs[channel_idx].base) {
+ IPA_UT_LOG("Channel is not allocated\n");
+ return -EFAULT;
+ }
+ if (p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_DEFAULT ||
+ p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_ENABLE)
+ num_of_ed_to_queue += 1; /* for OOB/DB mode event */
+
+ /* First queue EDs */
+ event_ring_index = p_channels[channel_idx].erindex -
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ wp_ofst = (u32)(p_events[event_ring_index].wp -
+ p_events[event_ring_index].rbase);
+
+ if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) {
+ IPA_UT_LOG("invalid ev rlen %llu\n",
+ p_events[event_ring_index].rlen);
+ return -EFAULT;
+ }
+
+ next_wp_ofst = (wp_ofst + num_of_ed_to_queue *
+ sizeof(struct ipa_mhi_event_ring_element)) %
+ (u32)p_events[event_ring_index].rlen;
+
+ /* set next WP */
+ p_events[event_ring_index].wp =
+ (u32)p_events[event_ring_index].rbase + next_wp_ofst;
+
+ /* write value to event ring doorbell */
+ IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
+ p_events[event_ring_index].wp,
+ &(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+ event_ring_index + IPA_MHI_GSI_ER_START, 0));
+ iowrite32(p_events[event_ring_index].wp,
+ test_mhi_ctx->gsi_mmio +
+ GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+ event_ring_index + IPA_MHI_GSI_ER_START, 0));
+
+ for (i = 0; i < buf_array_size; i++) {
+ /* calculate virtual pointer for current WP and RP */
+ wp_ofst = (u32)(p_channels[channel_idx].wp -
+ p_channels[channel_idx].rbase);
+ rp_ofst = (u32)(p_channels[channel_idx].rp -
+ p_channels[channel_idx].rbase);
+ (void)rp_ofst;
+ curr_re = (struct ipa_mhi_transfer_ring_element *)
+ ((unsigned long)xfer_ring_bufs[channel_idx].base +
+ wp_ofst);
+ if (p_channels[channel_idx].rlen & 0xFFFFFFFF00000000) {
+ IPA_UT_LOG("invalid ch rlen %llu\n",
+ p_channels[channel_idx].rlen);
+ return -EFAULT;
+ }
+ next_wp_ofst = (wp_ofst +
+ sizeof(struct ipa_mhi_transfer_ring_element)) %
+ (u32)p_channels[channel_idx].rlen;
+
+ /* write current RE */
+ curr_re->type = IPA_MHI_RING_ELEMENT_TRANSFER;
+ curr_re->len = (u16)buf_array[i].size;
+ curr_re->ptr = (u32)buf_array[i].phys_base;
+ curr_re->word_C.bits.bei = bei;
+ curr_re->word_C.bits.ieob = ieob;
+ curr_re->word_C.bits.ieot = ieot;
+
+ /* set next WP */
+ p_channels[channel_idx].wp =
+ p_channels[channel_idx].rbase + next_wp_ofst;
+
+ if (i == (buf_array_size - 1)) {
+ /* last buffer */
+ curr_re->word_C.bits.chain = 0;
+ if (trigger_db) {
+ IPA_UT_LOG(
+ "DB to channel 0x%llx: base %pa ofst 0x%x\n"
+ , p_channels[channel_idx].wp
+ , &(gsi_ctx->per.phys_addr)
+ , GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+ channel_idx, 0));
+ iowrite32(p_channels[channel_idx].wp,
+ test_mhi_ctx->gsi_mmio +
+ GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+ channel_idx, 0));
+ }
+ } else {
+ curr_re->word_C.bits.chain = 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Send data in loopback (from In to OUT) and compare
+ */
+static int ipa_mhi_test_loopback_data_transfer(void)
+{
+ struct ipa_mem_buffer *p_mmio;
+ int i;
+ int rc;
+ static int val;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = &test_mhi_ctx->mmio_buf;
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ val++;
+
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, (val + i) & 0xFF, 1);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(p_mmio,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(p_mmio,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("transfer timeout. MSI = 0x%x\n",
+ *((u32 *)test_mhi_ctx->msi.base));
+ IPA_UT_TEST_FAIL_REPORT("xfter timeout");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base, test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Do suspend and check channel states to be suspend if should success
+ */
+static int ipa_mhi_test_suspend(bool force, bool should_success)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_suspend(force);
+ if (should_success && rc != 0) {
+ IPA_UT_LOG("ipa_mhi_suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend failed");
+ return -EFAULT;
+ }
+
+ if (!should_success && rc != -EAGAIN) {
+ IPA_UT_LOG("ipa_mhi_suspenddid not return -EAGAIN fail %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend succeeded unexpectedly");
+ return -EFAULT;
+ }
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (should_success) {
+ if (p_ch_ctx_array->chstate !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+ IPA_UT_LOG("chstate is not suspend. ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+ return -EFAULT;
+ }
+ if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+ IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+ IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+ return -EFAULT;
+ }
+ } else {
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (should_success) {
+ if (p_ch_ctx_array->chstate !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+ return -EFAULT;
+ }
+ if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+ IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+ IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+ return -EFAULT;
+ }
+ } else {
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Do resume and check channel state to be running
+ */
+static int ipa_test_mhi_resume(void)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ rc = ipa_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume failed");
+ return -EFAULT;
+ }
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. suspend
+ * 2. queue RE for IN and OUT and send data
+ * 3. should get MSI timeout due to suspend
+ * 4. resume
+ * 5. should get the MSIs now
+ * 6. comapre the IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_resume(void)
+{
+ int rc;
+ int i;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend failed");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (!timeout) {
+ IPA_UT_LOG("Error: transfer success on suspend\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer suceeded unexpectedly");
+ return -EFAULT;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("Error: transfer timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer timeout");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("Error: buffers are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. enable aggregation
+ * 2. queue IN RE (ring element)
+ * 3. allocate skb with data
+ * 4. send it (this will create open aggr frame)
+ */
+static int ipa_mhi_test_create_aggr_open_frame(void)
+{
+ struct ipa_ep_cfg_aggr ep_aggr;
+ struct sk_buff *skb;
+ int rc;
+ int i;
+ u32 aggr_state_active;
+
+ IPA_UT_LOG("Entry\n");
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ ep_aggr.aggr_en = IPA_ENABLE_AGGR;
+ ep_aggr.aggr = IPA_GENERIC;
+ ep_aggr.aggr_pkt_limit = 2;
+
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("failed to configure aggr");
+ return rc;
+ }
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ if (!skb) {
+ IPA_UT_LOG("non mem for skb\n");
+ IPA_UT_TEST_FAIL_REPORT("fail alloc skb");
+ return -ENOMEM;
+ }
+ skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+ memset(skb->data + i, i & 0xFF, 1);
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+ }
+
+ rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ if (rc) {
+ IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+ return rc;
+ }
+
+ msleep(20);
+
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ if (aggr_state_active == 0) {
+ IPA_UT_LOG("No aggregation frame open!\n");
+ IPA_UT_TEST_FAIL_REPORT("No aggregation frame open");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. create open aggr by sending data
+ * 2. suspend - if force it should succeed, otherwize it fails
+ * 3. if force - wait for wakeup event - it should arrive
+ * 4. if force - resume
+ * 5. force close the aggr.
+ * 6. wait for MSI - it should arrive
+ * 7. compare IN and OUT buffers
+ * 8. disable aggr.
+ */
+static int ipa_mhi_test_suspend_aggr_open(bool force)
+{
+ int rc;
+ struct ipa_ep_cfg_aggr ep_aggr;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_aggr_open_frame();
+ if (rc) {
+ IPA_UT_LOG("failed create open aggr\n");
+ IPA_UT_TEST_FAIL_REPORT("fail create open aggr");
+ return rc;
+ }
+
+ if (force)
+ reinit_completion(&mhi_test_wakeup_comp);
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ /**
+ * if suspend force, then suspend should succeed.
+ * otherwize it should fail due to open aggr.
+ */
+ rc = ipa_mhi_test_suspend(force, force);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ if (force) {
+ if (!wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ)) {
+ IPA_UT_LOG("timeout waiting for wakeup event\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout waitinf wakeup event");
+ return -ETIME;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume failed");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+ }
+
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << test_mhi_ctx->cons_hdl));
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+ if (timeout) {
+ IPA_UT_LOG("fail: transfer not completed\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on transferring data");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("fail: buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. suspend
+ * 2. queue IN RE (ring element)
+ * 3. allocate skb with data
+ * 4. send it (this will create open aggr frame)
+ * 5. wait for wakeup event - it should arrive
+ * 6. resume
+ * 7. wait for MSI - it should arrive
+ * 8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_host_wakeup(void)
+{
+ int rc;
+ int i;
+ bool timeout = true;
+ struct sk_buff *skb;
+
+ reinit_completion(&mhi_test_wakeup_comp);
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ /* queue RE for IN side and trigger doorbell*/
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ if (!skb) {
+ IPA_UT_LOG("non mem for skb\n");
+ IPA_UT_TEST_FAIL_REPORT("no mem for skb");
+ return -ENOMEM;
+ }
+ skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+ memset(skb->data + i, i & 0xFF, 1);
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+ }
+
+ rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ if (rc) {
+ IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+ return rc;
+ }
+
+ if (wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ) == 0) {
+ IPA_UT_LOG("timeout waiting for wakeup event\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event");
+ return -ETIME;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+
+ /* check for MSI interrupt one channels */
+ IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+ if (timeout) {
+ IPA_UT_LOG("fail: transfer timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("fail: buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full / holb)
+ */
+static int ipa_mhi_test_create_full_channel(int *submitted_packets)
+{
+ int i;
+ bool timeout = true;
+ int rc;
+
+ if (!submitted_packets) {
+ IPA_UT_LOG("Input error\n");
+ return -EINVAL;
+ }
+
+ *submitted_packets = 0;
+
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+ do {
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ IPA_UT_LOG("submitting OUT buffer\n");
+ timeout = true;
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q re");
+ return rc;
+ }
+ (*submitted_packets)++;
+
+ IPA_UT_LOG("waiting for MSI\n");
+ for (i = 0; i < 10; i++) {
+ if (*((u32 *)test_mhi_ctx->msi.base) ==
+ (0x10000000 |
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID))) {
+ IPA_UT_LOG("got MSI\n");
+ timeout = false;
+ break;
+ }
+ msleep(20);
+ }
+ } while (!timeout);
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full)
+ * 4. suspend - it should fail with -EAGAIN - M1 is rejected
+ * 5. foreach submitted pkt, do the next steps
+ * 6. queue IN RE/buffer
+ * 7. wait for MSI
+ * 8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_full_channel(bool force)
+{
+ int rc;
+ bool timeout;
+ int submitted_packets = 0;
+
+ rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+ if (rc) {
+ IPA_UT_LOG("fail create full channel\n");
+ IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+ return rc;
+ }
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(force, false);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_suspend did not returned -EAGAIN. rc %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("test suspend fail");
+ return -EFAULT;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ while (submitted_packets) {
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ timeout = true;
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("transfer failed - timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ submitted_packets--;
+ }
+
+ return 0;
+}
+
+/**
+ * To be called from test
+ * 1. suspend
+ * 2. reset to M0 state
+ */
+static int ipa_mhi_test_suspend_and_reset(struct ipa_test_mhi_context *ctx)
+{
+ int rc;
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. manualy update wp
+ * 2. suspend - should succeed
+ * 3. restore wp value
+ */
+static int ipa_mhi_test_suspend_wp_update(void)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 old_wp;
+ u64 phys_addr;
+
+ /* simulate a write by updating the wp */
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ old_wp = p_ch_ctx_array->wp;
+ p_ch_ctx_array->wp += 16;
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, false);
+ if (rc) {
+ IPA_UT_LOG("suspend failed rc %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ p_ch_ctx_array->wp = old_wp;
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ p_ch_ctx_array->wp = old_wp;
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. create open aggr by sending data
+ * 2. channel reset (disconnect/connet)
+ * 3. validate no aggr. open after reset
+ * 4. disable aggr.
+ */
+static int ipa_mhi_test_channel_reset_aggr_open(void)
+{
+ int rc;
+ u32 aggr_state_active;
+ struct ipa_ep_cfg_aggr ep_aggr;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_aggr_open_frame();
+ if (rc) {
+ IPA_UT_LOG("failed create open aggr rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail creare open aggr frame");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ IPADBG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ if (aggr_state_active != 0) {
+ IPA_UT_LOG("aggregation frame open after reset!\n");
+ IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ IPA_UT_TEST_FAIL_REPORT("open aggr after reset");
+ return -EFAULT;
+ }
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full)
+ * 4. channel reset
+ * disconnect and reconnect the prod and cons
+ * 5. queue IN RE/buffer and ring DB
+ * 6. wait for MSI - should get timeout as channels were reset
+ * 7. reset again
+ */
+static int ipa_mhi_test_channel_reset_ipa_holb(void)
+{
+ int rc;
+ int submitted_packets = 0;
+ bool timeout;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+ if (rc) {
+ IPA_UT_LOG("fail create full channel rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+ timeout = true;
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+ return rc;
+ }
+ submitted_packets--;
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (!timeout) {
+ IPA_UT_LOG("transfer succeed although we had reset\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer succeed although we had reset");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ return rc;
+}
+
+
+/**
+ * TEST: mhi reset in READY state
+ * 1. init to ready state (without start and connect)
+ * 2. reset (destroy and re-init)
+ * 2. destroy
+ */
+static int ipa_mhi_test_reset_ready_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(true);
+ if (rc) {
+ IPA_UT_LOG("init to Ready state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to init to ready state");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, true);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi reset in M0 state
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init)
+ * 2. destroy
+ */
+static int ipa_mhi_test_reset_m0_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT
+ ("fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in M0 state
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init) in-loop
+ * 3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_m0_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT
+ ("fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_reset, rc, ctx, false);
+ if (rc) {
+ IPA_UT_LOG("in-loop reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "reset (destroy/re-init) in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init)
+ * 3. loopback data
+ * 4. reset (destroy and re-init)
+ * 5. loopback data again
+ * 6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi reset in suspend state
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. destroy
+ */
+static int ipa_mhi_test_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend and then reset failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in suspend state
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. Do 2 and 3 in loop
+ * 3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_and_reset, rc, ctx);
+ if (rc) {
+ IPA_UT_LOG("in-loop reset in suspend failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to in-loop reset while suspend");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. loopback data
+ * 5. suspend
+ * 5. reset (destroy and re-init)
+ * 6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_resume, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend resume failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("in loop suspend/resume failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume with aggr open
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume with open aggr.
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+ rc, false);
+ if (rc) {
+ IPA_UT_LOG("suspend resume with aggr open failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend/resume with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop force suspend/resume with aggr open
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop force suspend/resume with open aggr.
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_force_suspend_resume_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+ rc, true);
+ if (rc) {
+ IPA_UT_LOG("force suspend resume with aggr open failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop force suspend/resume with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/host wakeup resume
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume with host wakeup
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_host_wakeup(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_host_wakeup, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend host wakeup resume failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend/resume with hsot wakeup failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected suspend as full channel
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop rejrected suspend
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_suspend_full_channel(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+ rc, false);
+ if (rc) {
+ IPA_UT_LOG("full channel rejected suspend failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop rejected suspend due to full channel failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected force suspend as full channel
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop force rejected suspend
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_force_suspend_full_channel(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+ rc, true);
+ if (rc) {
+ IPA_UT_LOG("full channel rejected force suspend failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop force rejected suspend as full ch failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend after wp manual update
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend after wp update
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_wp_update(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_wp_update, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend after wp update failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend after wp update failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect)
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("in loop channel reset failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect) with open aggr
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_aggr_open, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop channel reset with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect) with channel in HOLB
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_ipa_holb(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_ipa_holb, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop channel reset with channel HOLB failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI",
+ ipa_test_mhi_suite_setup, ipa_test_mhi_suite_teardown)
+{
+ IPA_UT_ADD_TEST(reset_ready_state,
+ "reset test in Ready state",
+ ipa_mhi_test_reset_ready_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reset_m0_state,
+ "reset test in M0 state",
+ ipa_mhi_test_reset_m0_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(inloop_reset_m0_state,
+ "several reset iterations in M0 state",
+ ipa_mhi_test_inloop_reset_m0_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(loopback_data_with_reset_on_m0,
+ "reset before and after loopback data in M0 state",
+ ipa_mhi_test_loopback_data_with_reset,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reset_on_suspend,
+ "reset test in suspend state",
+ ipa_mhi_test_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(inloop_reset_on_suspend,
+ "several reset iterations in suspend state",
+ ipa_mhi_test_inloop_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(loopback_data_with_reset_on_suspend,
+ "reset before and after loopback data in suspend state",
+ ipa_mhi_test_loopback_data_with_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume,
+ "several suspend/resume iterations",
+ ipa_mhi_test_in_loop_suspend_resume,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_with_open_aggr,
+ "several suspend/resume iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_suspend_resume_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr,
+ "several force suspend/resume iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_force_suspend_resume_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup,
+ "several suspend and host wakeup resume iterations",
+ ipa_mhi_test_in_loop_suspend_host_wakeup,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reject_suspend_channel_full,
+ "several rejected suspend iterations due to full channel",
+ ipa_mhi_test_in_loop_reject_suspend_full_channel,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reject_force_suspend_channel_full,
+ "several rejected force suspend iterations due to full channel",
+ ipa_mhi_test_in_loop_reject_force_suspend_full_channel,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_manual_wp_update,
+ "several suspend/resume iterations with after simulating writing by wp manual update",
+ ipa_mhi_test_in_loop_suspend_resume_wp_update,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset,
+ "several channel reset (disconnect/connect) iterations",
+ ipa_mhi_test_in_loop_channel_reset,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset_aggr_open,
+ "several channel reset (disconnect/connect) iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_channel_reset_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset_ipa_holb,
+ "several channel reset (disconnect/connect) iterations with channel in HOLB state",
+ ipa_mhi_test_in_loop_channel_reset_ipa_holb,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(mhi);
+
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index 8816fc37c2e2..3bf9ac11f2d1 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -84,8 +84,60 @@ static const struct file_operations ipa_ut_dbgfs_regression_test_fops = {
static struct ipa_ut_context *ipa_ut_ctx;
char *_IPA_UT_TEST_LOG_BUF_NAME;
-struct ipa_ut_tst_fail_report _IPA_UT_TEST_FAIL_REPORT_DATA;
+struct ipa_ut_tst_fail_report
+ _IPA_UT_TEST_FAIL_REPORT_DATA[_IPA_UT_TEST_FAIL_REPORT_SIZE];
+u32 _IPA_UT_TEST_FAIL_REPORT_IDX;
+/**
+ * ipa_ut_print_log_buf() - Dump given buffer via kernel error mechanism
+ * @buf: Buffer to print
+ *
+ * Tokenize the string according to new-line and then print
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_print_log_buf(char *buf)
+{
+ char *token;
+
+ if (!buf) {
+ IPA_UT_ERR("Input error - no buf\n");
+ return;
+ }
+
+ for (token = strsep(&buf, "\n"); token; token = strsep(&buf, "\n"))
+ pr_err("%s\n", token);
+}
+
+/**
+ * ipa_ut_dump_fail_report_stack() - dump the report info stack via kernel err
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_dump_fail_report_stack(void)
+{
+ int i;
+
+ IPA_UT_DBG("Entry\n");
+
+ if (_IPA_UT_TEST_FAIL_REPORT_IDX == 0) {
+ IPA_UT_DBG("no report info\n");
+ return;
+ }
+
+ for (i = 0 ; i < _IPA_UT_TEST_FAIL_REPORT_IDX; i++) {
+ if (i == 0)
+ pr_err("***** FAIL INFO STACK *****:\n");
+ else
+ pr_err("Called From:\n");
+
+ pr_err("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n",
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].file,
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].func,
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].line);
+ pr_err("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA[i].info);
+ }
+}
/**
* ipa_ut_show_suite_exec_summary() - Show tests run summary
@@ -231,14 +283,14 @@ static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
}
_IPA_UT_TEST_LOG_BUF_NAME[0] = '\0';
- _IPA_UT_TEST_FAIL_REPORT_DATA.valid = false;
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
pr_info("*** Test '%s': Running... ***\n",
suite->tests[i].name);
rc = suite->tests[i].run(suite->meta_data->priv);
if (rc) {
tst_fail = true;
suite->tests[i].res = IPA_UT_TEST_RES_FAIL;
- pr_info("%s", _IPA_UT_TEST_LOG_BUF_NAME);
+ ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
} else {
suite->tests[i].res = IPA_UT_TEST_RES_SUCCESS;
}
@@ -246,14 +298,8 @@ static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
pr_info(">>>>>>**** TEST '%s': %s ****<<<<<<\n",
suite->tests[i].name, tst_fail ? "FAIL" : "SUCCESS");
- if (tst_fail && _IPA_UT_TEST_FAIL_REPORT_DATA.valid) {
- pr_info("*** FAIL INFO:\n");
- pr_info("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n",
- _IPA_UT_TEST_FAIL_REPORT_DATA.file,
- _IPA_UT_TEST_FAIL_REPORT_DATA.func,
- _IPA_UT_TEST_FAIL_REPORT_DATA.line);
- pr_info("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA.info);
- }
+ if (tst_fail)
+ ipa_ut_dump_fail_report_stack();
pr_info("\n");
}
@@ -279,6 +325,7 @@ release_clock:
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
free_mem:
kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+ _IPA_UT_TEST_LOG_BUF_NAME = NULL;
unlock_mutex:
mutex_unlock(&ipa_ut_ctx->lock);
return ((!rc && !tst_fail) ? count : -EFAULT);
@@ -451,10 +498,16 @@ static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
goto free_mem;
}
+ suite = test->suite;
+ if (!suite || !suite->meta_data) {
+ IPA_UT_ERR("test %s with invalid suite\n", test->name);
+ rc = -EINVAL;
+ goto free_mem;
+ }
+
IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
- suite = test->suite;
- if (suite && suite->meta_data->setup) {
+ if (suite->meta_data->setup) {
IPA_UT_DBG("*** Suite '%s': Run setup ***\n",
suite->meta_data->name);
rc = suite->meta_data->setup(&suite->meta_data->priv);
@@ -470,27 +523,20 @@ static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
}
IPA_UT_DBG("*** Test '%s': Running... ***\n", test->name);
- _IPA_UT_TEST_FAIL_REPORT_DATA.valid = false;
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
rc = test->run(suite->meta_data->priv);
if (rc)
tst_fail = true;
IPA_UT_DBG("*** Test %s - ***\n", tst_fail ? "FAIL" : "SUCCESS");
if (tst_fail) {
pr_info("=================>>>>>>>>>>>\n");
- pr_info("%s\n", _IPA_UT_TEST_LOG_BUF_NAME);
+ ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
pr_info("**** TEST %s FAILED ****\n", test->name);
- if (_IPA_UT_TEST_FAIL_REPORT_DATA.valid) {
- pr_info("*** FAIL INFO:\n");
- pr_info("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n",
- _IPA_UT_TEST_FAIL_REPORT_DATA.file,
- _IPA_UT_TEST_FAIL_REPORT_DATA.func,
- _IPA_UT_TEST_FAIL_REPORT_DATA.line);
- pr_info("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA.info);
- }
+ ipa_ut_dump_fail_report_stack();
pr_info("<<<<<<<<<<<=================\n");
}
- if (suite && suite->meta_data->teardown) {
+ if (suite->meta_data->teardown) {
IPA_UT_DBG("*** Suite '%s': Run Teardown ***\n",
suite->meta_data->name);
rc = suite->meta_data->teardown(suite->meta_data->priv);
@@ -509,6 +555,7 @@ release_clock:
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
free_mem:
kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+ _IPA_UT_TEST_LOG_BUF_NAME = NULL;
unlock_mutex:
mutex_unlock(&ipa_ut_ctx->lock);
return ((!rc && !tst_fail) ? count : -EFAULT);
@@ -856,6 +903,7 @@ static int ipa_ut_framework_init(void)
goto fail_clean_dbgfs;
}
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
ipa_ut_ctx->inited = true;
IPA_UT_DBG("Done\n");
ret = 0;
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.h b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
index 177be51bfe7d..e3884d6b14e3 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
@@ -75,20 +75,31 @@ struct ipa_ut_tst_fail_report {
/**
* Report on test failure
- * To be used by tests.
+ * To be used by tests to report a point were a test fail.
+ * Failures are saved in a stack manner.
+ * Dumping the failure info will dump the fail reports
+ * from all the function in the calling stack
*/
#define IPA_UT_TEST_FAIL_REPORT(__info) \
do { \
extern struct ipa_ut_tst_fail_report \
- _IPA_UT_TEST_FAIL_REPORT_DATA; \
- _IPA_UT_TEST_FAIL_REPORT_DATA.valid = true; \
- _IPA_UT_TEST_FAIL_REPORT_DATA.file = __FILENAME__; \
- _IPA_UT_TEST_FAIL_REPORT_DATA.line = __LINE__; \
- _IPA_UT_TEST_FAIL_REPORT_DATA.func = __func__; \
+ _IPA_UT_TEST_FAIL_REPORT_DATA \
+ [_IPA_UT_TEST_FAIL_REPORT_SIZE]; \
+ extern u32 _IPA_UT_TEST_FAIL_REPORT_IDX; \
+ struct ipa_ut_tst_fail_report *entry; \
+ if (_IPA_UT_TEST_FAIL_REPORT_IDX >= \
+ _IPA_UT_TEST_FAIL_REPORT_SIZE) \
+ break; \
+ entry = &(_IPA_UT_TEST_FAIL_REPORT_DATA \
+ [_IPA_UT_TEST_FAIL_REPORT_IDX]); \
+ entry->file = __FILENAME__; \
+ entry->line = __LINE__; \
+ entry->func = __func__; \
if (__info) \
- _IPA_UT_TEST_FAIL_REPORT_DATA.info = __info; \
+ entry->info = __info; \
else \
- _IPA_UT_TEST_FAIL_REPORT_DATA.info = ""; \
+ entry->info = ""; \
+ _IPA_UT_TEST_FAIL_REPORT_IDX++; \
} while (0)
/**
@@ -100,10 +111,17 @@ struct ipa_ut_tst_fail_report {
do { \
extern char *_IPA_UT_TEST_LOG_BUF_NAME; \
char __buf[512]; \
- IPA_UT_DBG(fmt, args); \
+ IPA_UT_DBG(fmt, ## args); \
+ if (!_IPA_UT_TEST_LOG_BUF_NAME) {\
+ pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ break; \
+ } \
scnprintf(__buf, sizeof(__buf), \
- fmt, args); \
- strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, sizeof(__buf)); \
+ " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, \
+ _IPA_UT_TEST_LOG_BUF_SIZE); \
} while (0)
/**
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_i.h b/drivers/platform/msm/ipa/test/ipa_ut_i.h
index 7cf5e53d0af1..973debfbd900 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_i.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_i.h
@@ -37,10 +37,12 @@
/* Test Log buffer name and size */
#define _IPA_UT_TEST_LOG_BUF_NAME ipa_ut_tst_log_buf
-#define _IPA_UT_TEST_LOG_BUF_SIZE 2048
+#define _IPA_UT_TEST_LOG_BUF_SIZE 8192
/* Global structure for test fail execution result information */
#define _IPA_UT_TEST_FAIL_REPORT_DATA ipa_ut_tst_fail_report_data
+#define _IPA_UT_TEST_FAIL_REPORT_SIZE 5
+#define _IPA_UT_TEST_FAIL_REPORT_IDX ipa_ut_tst_fail_report_data_index
/* Start/End definitions of the array of suites */
#define IPA_UT_DEFINE_ALL_SUITES_START \
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 615ba671ebaa..944800f8e4be 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -20,6 +20,7 @@
* Declare every suite here so that it will be found later below
* No importance for order.
*/
+IPA_UT_DECLARE_SUITE(mhi);
IPA_UT_DECLARE_SUITE(example);
@@ -29,6 +30,7 @@ IPA_UT_DECLARE_SUITE(example);
*/
IPA_UT_DEFINE_ALL_SUITES_START
{
+ IPA_UT_REGISTER_SUITE(mhi),
IPA_UT_REGISTER_SUITE(example),
} IPA_UT_DEFINE_ALL_SUITES_END;
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 83cb87f94665..90a93064ca84 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -528,6 +528,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
rc = smblib_set_prop_system_temp_level(chg, val);
break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ rc = smblib_set_prop_batt_capacity(chg, val);
+ break;
default:
rc = -EINVAL;
}
@@ -541,6 +544,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CAPACITY:
return 1;
default:
break;
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 18b02fbde5a6..68021f32fafa 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -134,7 +134,10 @@ int smblib_get_charge_param(struct smb_charger *chg,
return rc;
}
- *val_u = val_raw * param->step_u + param->min_u;
+ if (param->get_proc)
+ *val_u = param->get_proc(param, val_raw);
+ else
+ *val_u = val_raw * param->step_u + param->min_u;
smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
param->name, *val_u, val_raw);
@@ -216,13 +219,20 @@ int smblib_set_charge_param(struct smb_charger *chg,
int rc = 0;
u8 val_raw;
- if (val_u > param->max_u || val_u < param->min_u) {
- dev_err(chg->dev, "%s: %d is out of range [%d, %d]\n",
- param->name, val_u, param->min_u, param->max_u);
- return -EINVAL;
+ if (param->set_proc) {
+ rc = param->set_proc(param, val_u, &val_raw);
+ if (rc < 0)
+ return -EINVAL;
+ } else {
+ if (val_u > param->max_u || val_u < param->min_u) {
+ dev_err(chg->dev, "%s: %d is out of range [%d, %d]\n",
+ param->name, val_u, param->min_u, param->max_u);
+ return -EINVAL;
+ }
+
+ val_raw = (val_u - param->min_u) / param->step_u;
}
- val_raw = (val_u - param->min_u) / param->step_u;
rc = smblib_write(chg, param->reg, val_raw);
if (rc < 0) {
dev_err(chg->dev, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
@@ -749,6 +759,11 @@ int smblib_get_prop_batt_capacity(struct smb_charger *chg,
{
int rc = -EINVAL;
+ if (chg->fake_capacity >= 0) {
+ val->intval = chg->fake_capacity;
+ return 0;
+ }
+
if (chg->bms_psy)
rc = power_supply_get_property(chg->bms_psy,
POWER_SUPPLY_PROP_CAPACITY, val);
@@ -903,6 +918,16 @@ int smblib_set_prop_input_suspend(struct smb_charger *chg,
return rc;
}
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ chg->fake_capacity = val->intval;
+
+ power_supply_changed(chg->batt_psy);
+
+ return 0;
+}
+
int smblib_set_prop_system_temp_level(struct smb_charger *chg,
const union power_supply_propval *val)
{
@@ -1907,6 +1932,7 @@ int smblib_init(struct smb_charger *chg)
INIT_WORK(&chg->pl_detect_work, smblib_pl_detect_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
INIT_DELAYED_WORK(&chg->pl_taper_work, smblib_pl_taper_work);
+ chg->fake_capacity = -EINVAL;
switch (chg->mode) {
case PARALLEL_MASTER:
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 47839074b724..b56cd24adde1 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -56,6 +56,11 @@ struct smb_chg_param {
int min_u;
int max_u;
int step_u;
+ int (*get_proc)(struct smb_chg_param *param,
+ u8 val_raw);
+ int (*set_proc)(struct smb_chg_param *param,
+ int val_u,
+ u8 *val_raw);
};
struct smb_params {
@@ -138,6 +143,8 @@ struct smb_charger {
int system_temp_level;
int thermal_levels;
int *thermal_mitigation;
+
+ int fake_capacity;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -189,6 +196,8 @@ int smblib_get_prop_system_temp_level(struct smb_charger *chg,
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_set_prop_system_temp_level(struct smb_charger *chg,
const union power_supply_propval *val);
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 329ee7210c1f..86cc33badbc0 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -311,9 +311,11 @@ static int smb138x_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smblib_set_prop_input_suspend(chg, val);
break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ rc = smblib_set_prop_batt_capacity(chg, val);
+ break;
default:
- pr_err("batt power supply set prop %d not supported\n",
- prop);
+ pr_err("batt power supply set prop %d not supported\n", prop);
return -EINVAL;
}
@@ -325,6 +327,7 @@ static int smb138x_batt_prop_is_writeable(struct power_supply *psy,
{
switch (prop) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ case POWER_SUPPLY_PROP_CAPACITY:
return 1;
default:
break;
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
index 1e18ce73743d..fe5dbbeac15e 100644
--- a/drivers/regulator/cpr3-mmss-regulator.c
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -217,10 +217,10 @@ msmcobalt_v1_rev0_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
};
static const int msmcobalt_v2_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
- 516000,
- 628000,
- 752000,
- 924000,
+ 616000,
+ 740000,
+ 828000,
+ 1024000,
};
#define MSM8996_MMSS_FUSE_STEP_VOLT 10000
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 9e400a9eee5c..6e8db03fe16e 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -5935,14 +5935,11 @@ static int cpr3_panic_callback(struct notifier_block *nfb,
struct cpr3_controller, panic_notifier);
struct cpr3_panic_regs_info *regs_info = ctrl->panic_regs_info;
struct cpr3_reg_info *reg;
- void __iomem *virt_addr;
int i = 0;
for (i = 0; i < regs_info->reg_count; i++) {
reg = &(regs_info->regs[i]);
- virt_addr = ioremap(reg->addr, 0x4);
- reg->value = readl_relaxed(virt_addr);
- iounmap(virt_addr);
+ reg->value = readl_relaxed(reg->virt_addr);
pr_err("%s[0x%08x] = 0x%08x\n", reg->name, reg->addr,
reg->value);
}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 3ddc1dc3c982..0907518722df 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -498,6 +498,7 @@ struct cpr3_aging_sensor_info {
* @name: Register name
* @addr: Register physical address
* @value: Register content
+ * @virt_addr: Register virtual address
*
* This data structure is used to dump some critical register contents
* when the device crashes due to a kernel panic.
@@ -506,6 +507,7 @@ struct cpr3_reg_info {
const char *name;
u32 addr;
u32 value;
+ void __iomem *virt_addr;
};
/**
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 34b51ec8cab8..9d55e9af2e7c 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1078,6 +1078,12 @@ static int cpr3_panic_notifier_init(struct cpr3_controller *ctrl)
rc);
return rc;
}
+ regs[i].virt_addr = devm_ioremap(ctrl->dev, regs[i].addr, 0x4);
+ if (!regs[i].virt_addr) {
+ pr_err("Unable to map panic register addr 0x%08x\n",
+ regs[i].addr);
+ return -EINVAL;
+ }
regs[i].value = 0xFFFFFFFF;
}
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 1c444d6d2607..083459f96ac4 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -51,6 +51,9 @@
* @speed_bin: Application processor speed bin fuse parameter value for
* the given chip
* @cpr_fusing_rev: CPR fusing revision fuse parameter value
+ * @force_highest_corner: Flag indicating that all corners must operate
+ * at the voltage of the highest corner. This is
+ * applicable to MSMCOBALT only.
*
* This struct holds the values for all of the fuses read from memory.
*/
@@ -61,6 +64,7 @@ struct cprh_msmcobalt_kbss_fuses {
u64 quot_offset[MSMCOBALT_KBSS_FUSE_CORNERS];
u64 speed_bin;
u64 cpr_fusing_rev;
+ u64 force_highest_corner;
};
/*
@@ -181,6 +185,12 @@ static const struct cpr3_fuse_param msmcobalt_kbss_speed_bin_param[] = {
{},
};
+static const struct cpr3_fuse_param
+msmcobalt_cpr_force_highest_corner_param[] = {
+ {100, 45, 45},
+ {},
+};
+
/*
* Open loop voltage fuse reference voltages in microvolts for MSMCOBALT v1
*/
@@ -301,6 +311,18 @@ static int cprh_msmcobalt_kbss_read_fuse_data(struct cpr3_regulator *vreg)
}
+ rc = cpr3_read_fuse_param(base,
+ msmcobalt_cpr_force_highest_corner_param,
+ &fuse->force_highest_corner);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR force highest corner fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (fuse->force_highest_corner)
+ cpr3_info(vreg, "Fusing requires all operation at the highest corner\n");
+
vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
if (vreg->fuse_combo >= CPRH_MSMCOBALT_KBSS_FUSE_COMBO_COUNT) {
cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
@@ -485,6 +507,54 @@ done:
}
/**
+ * cprh_msmcobalt_partial_binning_override() - override the voltage and quotient
+ * settings for low corners based upon special partial binning
+ * fuse values
+ *
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Some parts are not able to operate at low voltages. The force highest
+ * corner fuse specifies if a given part must operate with voltages
+ * corresponding to the highest corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_msmcobalt_partial_binning_override(struct cpr3_regulator *vreg)
+{
+ struct cprh_msmcobalt_kbss_fuses *fuse = vreg->platform_fuses;
+ struct cpr3_corner *corner;
+ struct cpr4_sdelta *sdelta;
+ int i;
+ u32 proc_freq;
+
+ if (fuse->force_highest_corner) {
+ cpr3_info(vreg, "overriding CPR parameters for corners 0 to %d with quotients and voltages of corner %d\n",
+ vreg->corner_count - 2, vreg->corner_count - 1);
+ corner = &vreg->corner[vreg->corner_count - 1];
+ for (i = 0; i < vreg->corner_count - 1; i++) {
+ proc_freq = vreg->corner[i].proc_freq;
+ sdelta = vreg->corner[i].sdelta;
+ if (sdelta) {
+ if (sdelta->table)
+ devm_kfree(vreg->thread->ctrl->dev,
+ sdelta->table);
+ if (sdelta->boost_table)
+ devm_kfree(vreg->thread->ctrl->dev,
+ sdelta->boost_table);
+ devm_kfree(vreg->thread->ctrl->dev,
+ sdelta);
+ }
+ vreg->corner[i] = *corner;
+ vreg->corner[i].proc_freq = proc_freq;
+ }
+
+ return 0;
+ }
+
+ return 0;
+};
+
+/**
* cprh_kbss_parse_core_count_temp_adj_properties() - load device tree
* properties associated with per-corner-band and temperature
* voltage adjustments.
@@ -1201,6 +1271,13 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg)
return -EINVAL;
}
+ rc = cprh_msmcobalt_partial_binning_override(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to override CPR parameters based on partial binning fuse values, rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = cprh_kbss_apm_crossover_as_corner(vreg);
if (rc) {
cpr3_err(vreg, "unable to introduce APM voltage crossover corner, rc=%d\n",
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index f3b4b6c08571..d6e372fc7922 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1412,8 +1412,10 @@ static ssize_t ufsdbg_reset_controller_write(struct file *filp,
struct ufs_hba *hba = filp->f_mapping->host->i_private;
unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
/*
* simulating a dummy error in order to "convince"
* eh_work to actually reset the controller
@@ -1421,9 +1423,13 @@ static ssize_t ufsdbg_reset_controller_write(struct file *filp,
hba->saved_err |= INT_FATAL_ERRORS;
hba->silence_err_logs = true;
schedule_work(&hba->eh_work);
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->eh_work);
+
+ ufshcd_release(hba, false);
+ pm_runtime_put_sync(hba->dev);
+
return cnt;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1fdfadf5e1b9..ce779d760c69 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2630,6 +2630,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_HOST_BUSY;
spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* if error handling is in progress, return host busy */
+ if (ufshcd_eh_in_progress(hba)) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ }
+
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -2647,13 +2654,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
cmd->scsi_done(cmd);
goto out_unlock;
}
-
- /* if error handling is in progress, don't issue commands */
- if (ufshcd_eh_in_progress(hba)) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
spin_unlock_irqrestore(hba->host->host_lock, flags);
hba->req_abort_count = 0;
@@ -4039,31 +4039,49 @@ out:
static int ufshcd_link_recovery(struct ufs_hba *hba)
{
- int ret;
+ int ret = 0;
unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
- ret = ufshcd_vops_full_reset(hba);
- if (ret)
- dev_warn(hba->dev,
- "full reset returned %d, trying to recover the link\n",
- ret);
- ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
- if (ret)
- dev_err(hba->dev, "%s: link recovery failed, err %d",
- __func__, ret);
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba)))
+ ret = -ENOLINK;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return ret;
}
@@ -4087,8 +4105,7 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
* If link recovery fails then return error so that caller
* don't retry the hibern8 enter again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ ret = ufshcd_link_recovery(hba);
} else {
dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
ktime_to_us(ktime_get()));
@@ -5604,11 +5621,9 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
ufsdbg_set_err_state(hba);
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold_all(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
goto out;
@@ -5644,7 +5659,8 @@ static void ufshcd_err_handler(struct work_struct *work)
}
}
- if ((hba->saved_err & INT_FATAL_ERRORS) || hba->saved_ce_err ||
+ if ((hba->saved_err & INT_FATAL_ERRORS)
+ || hba->saved_ce_err || hba->force_host_reset ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -5732,6 +5748,7 @@ skip_pending_xfer_clear:
hba->saved_err = 0;
hba->saved_uic_err = 0;
hba->saved_ce_err = 0;
+ hba->force_host_reset = false;
}
skip_err_handling:
@@ -5743,12 +5760,9 @@ skip_err_handling:
}
hba->silence_err_logs = false;
- ufshcd_clear_eh_in_progress(hba);
out:
+ ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_scsi_unblock_requests(hba);
- ufshcd_release_all(hba);
- pm_runtime_put_sync(hba->dev);
}
static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
@@ -5849,8 +5863,11 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- __ufshcd_scsi_block_requests(hba);
+ /*
+ * Set error handling in progress flag early so that we
+ * don't issue new requests any more.
+ */
+ ufshcd_set_eh_in_progress(hba);
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
@@ -6354,6 +6371,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
int retries = MAX_HOST_RESET_RETRIES;
do {
+ err = ufshcd_vops_full_reset(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: full reset returned %d\n",
+ __func__, err);
+
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -6383,13 +6405,12 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int err;
+ int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
- ufshcd_hold_all(hba);
/*
* Check if there is any race with fatal error handling.
* If so, wait for it to complete. Even though fatal error
@@ -6402,29 +6423,37 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba->ufshcd_state == UFSHCD_STATE_RESET))
break;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
flush_work(&hba->eh_work);
} while (1);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
- ufshcd_update_error_stats(hba, UFS_ERR_EH);
- err = ufshcd_reset_and_restore(hba);
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba))) {
err = FAILED;
hba->ufshcd_state = UFSHCD_STATE_ERROR;
}
- ufshcd_clear_eh_in_progress(hba);
+
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release_all(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a4ee3726edb0..552d50081e3f 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -815,6 +815,7 @@ struct ufs_hba {
u32 saved_uic_err;
u32 saved_ce_err;
bool silence_err_logs;
+ bool force_host_reset;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ecfa6954f1e6..0e74093eeb2b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -770,4 +770,10 @@ config WCD_DSP_GLINK
between MSM and WCD DSP over glink transport protocol. This driver
provides read and write interface via char device.
+config QCOM_SMCINVOKE
+ bool "Secure QSEE Support"
+ help
+ Enable SMCInvoke driver which supports capability based secure
+ communication between QSEE and HLOS.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 913f5d55e53b..e9e65ea443dd 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -98,3 +98,4 @@ obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o rpm_master_stat.o system_stats.o
obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
+obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 57e58a57fab7..f54d9c3f4f3d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1148,6 +1148,7 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
{
struct glink_core_rx_intent *intent;
struct glink_core_rx_intent *intent_tmp;
+ struct glink_core_rx_intent *best_intent = NULL;
unsigned long flags;
if (GLINK_MAX_PKT_SIZE < size) {
@@ -1170,21 +1171,29 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
list) {
if (intent->intent_size >= size) {
- list_del(&intent->list);
- GLINK_DBG_CH(ctx,
- "%s: R[%u]:%zu Removed remote intent\n",
- __func__,
- intent->id,
- intent->intent_size);
- *riid_ptr = intent->id;
- *intent_size = intent->intent_size;
- *cookie = intent->cookie;
- kfree(intent);
- spin_unlock_irqrestore(
- &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
- return 0;
+ if (!best_intent)
+ best_intent = intent;
+ else if (best_intent->intent_size > intent->intent_size)
+ best_intent = intent;
+ if (best_intent->intent_size == size)
+ break;
}
}
+ if (best_intent) {
+ list_del(&best_intent->list);
+ GLINK_DBG_CH(ctx,
+ "%s: R[%u]:%zu Removed remote intent\n",
+ __func__,
+ best_intent->id,
+ best_intent->intent_size);
+ *riid_ptr = best_intent->id;
+ *intent_size = best_intent->intent_size;
+ *cookie = best_intent->cookie;
+ kfree(best_intent);
+ spin_unlock_irqrestore(
+ &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return 0;
+ }
spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
return -EAGAIN;
}
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index bfe2072ee554..aff9683b394f 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -38,6 +38,9 @@
#include <soc/qcom/icnss.h>
#include <soc/qcom/msm_qmi_interface.h>
#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
#include "wlan_firmware_service_v01.h"
@@ -45,12 +48,109 @@
#define WLFW_TIMEOUT_MS 3000
#define WLFW_SERVICE_INS_ID_V01 0
#define MAX_PROP_SIZE 32
-#define MAX_VOLTAGE_LEVEL 2
-#define VREG_ON 1
-#define VREG_OFF 0
-#define MPM2_MPM_WCSSAON_CONFIG_OFFSET 0x18
#define NUM_LOG_PAGES 4
+/*
+ * Registers: MPM2_PSHOLD
+ * Base Address: 0x10AC000
+ */
+#define MPM_WCSSAON_CONFIG_OFFSET 0x18
+#define MPM_WCSSAON_CONFIG_ARES_N BIT(0)
+#define MPM_WCSSAON_CONFIG_WLAN_DISABLE BIT(1)
+#define MPM_WCSSAON_CONFIG_FORCE_ACTIVE BIT(14)
+#define MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE BIT(19)
+#define MPM_WCSSAON_CONFIG_DISCONNECT_CLR BIT(21)
+
+/*
+ * Registers: WCSS_SR_SHADOW_REGISTERS
+ * Base Address: 0x18820000
+ */
+#define SR_WCSSAON_SR_LSB_OFFSET 0x22070
+#define SR_WCSSAON_SR_LSB_RETENTION_STATUS BIT(20)
+
+#define SR_PMM_SR_MSB 0x2206C
+#define SR_PMM_SR_MSB_AHB_CLOCK_MASK GENMASK(26, 22)
+#define SR_PMM_SR_MSB_XO_CLOCK_MASK GENMASK(31, 27)
+
+/*
+ * Registers: WCSS_HM_A_WCSS_CLK_CTL_WCSS_CC_REG
+ * Base Address: 0x189D0000
+ */
+#define WCSS_WLAN1_GDSCR_OFFSET 0x1D3004
+#define WCSS_WLAN1_GDSCR_SW_COLLAPSE BIT(0)
+#define WCSS_WLAN1_GDSCR_HW_CONTROL BIT(1)
+#define WCSS_WLAN1_GDSCR_PWR_ON BIT(31)
+
+#define WCSS_RFACTRL_GDSCR_OFFSET 0x1D60C8
+#define WCSS_RFACTRL_GDSCR_SW_COLLAPSE BIT(0)
+#define WCSS_RFACTRL_GDSCR_HW_CONTROL BIT(1)
+#define WCSS_RFACTRL_GDSCR_PWR_ON BIT(31)
+
+#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET 0x1D1004
+#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE BIT(0)
+#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL BIT(1)
+#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON BIT(31)
+
+/*
+ * Registers: WCSS_HM_A_WIFI_APB_3_A_WCMN_MAC_WCMN_REG
+ * Base Address: 0x18AF0000
+ */
+#define WCMN_PMM_WLAN1_CFG_REG1_OFFSET 0x2F0804
+#define WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N BIT(9)
+#define WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP BIT(10)
+
+/*
+ * Registers: WCSS_HM_A_PMM_PMM
+ * Base Address: 0x18880000
+ */
+#define PMM_COMMON_IDLEREQ_CSR_OFFSET 0x80120
+#define PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET BIT(16)
+#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK BIT(26)
+#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE BIT(27)
+
+#define PMM_RFACTRL_IDLEREQ_CSR_OFFSET 0x80164
+#define PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET BIT(16)
+#define PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK BIT(26)
+
+#define PMM_WSI_CMD_OFFSET 0x800E0
+#define PMM_WSI_CMD_USE_WLAN1_WSI BIT(0)
+#define PMM_WSI_CMD_SW_USE_PMM_WSI BIT(2)
+#define PMM_WSI_CMD_SW_BUS_SYNC BIT(3)
+#define PMM_WSI_CMD_SW_RF_RESET BIT(4)
+#define PMM_WSI_CMD_SW_REG_READ BIT(5)
+#define PMM_WSI_CMD_SW_XO_DIS BIT(8)
+#define PMM_WSI_CMD_SW_FORCE_IDLE BIT(9)
+#define PMM_WSI_CMD_PMM_WSI_SM GENMASK(24, 16)
+#define PMM_WSI_CMD_RF_CMD_IP BIT(31)
+
+#define PMM_REG_RW_ADDR_OFFSET 0x800F0
+#define PMM_REG_RW_ADDR_SW_REG_RW_ADDR GENMASK(15, 0)
+
+#define PMM_REG_READ_DATA_OFFSET 0x800F8
+
+#define PMM_RF_VAULT_REG_ADDR_OFFSET 0x800FC
+#define PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR GENMASK(15, 0)
+
+#define PMM_RF_VAULT_REG_DATA_OFFSET 0x80100
+#define PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA GENMASK(31, 0)
+
+#define PMM_XO_DIS_ADDR_OFFSET 0x800E8
+#define PMM_XO_DIS_ADDR_XO_DIS_ADDR GENMASK(15, 0)
+
+#define PMM_XO_DIS_DATA_OFFSET 0x800EC
+#define PMM_XO_DIS_DATA_XO_DIS_DATA GENMASK(31, 0)
+
+#define PMM_RF_RESET_ADDR_OFFSET 0x80104
+#define PMM_RF_RESET_ADDR_RF_RESET_ADDR GENMASK(15, 0)
+
+#define PMM_RF_RESET_DATA_OFFSET 0x80108
+#define PMM_RF_RESET_DATA_RF_RESET_DATA GENMASK(31, 0)
+
+#define ICNSS_HW_REG_RETRY 10
+
+#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
+#define ICNSS_WLAN_SERVICE_NAME "wlan/fw"
+
#define icnss_ipc_log_string(_x...) do { \
if (icnss_ipc_log_context) \
ipc_log_string(icnss_ipc_log_context, _x); \
@@ -99,11 +199,18 @@
#endif
enum icnss_debug_quirks {
- HW_ALWAY_ON,
+ HW_ALWAYS_ON,
HW_DEBUG_ENABLE,
+ SKIP_QMI,
+ HW_ONLY_TOP_LEVEL_RESET,
+ RECOVERY_DISABLE,
+ SSR_ONLY,
+ PDR_ONLY,
};
-#define ICNSS_QUIRKS_DEFAULT 0
+#define ICNSS_QUIRKS_DEFAULT ( \
+ BIT(SSR_ONLY) \
+ )
unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
module_param(quirks, ulong, 0600);
@@ -116,6 +223,7 @@ enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_FW_READY_IND,
ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_DRIVER_EVENT_MAX,
};
@@ -135,6 +243,9 @@ enum icnss_driver_state {
ICNSS_DRIVER_PROBED,
ICNSS_FW_TEST_MODE,
ICNSS_SUSPEND,
+ ICNSS_SSR_ENABLED,
+ ICNSS_PDR_ENABLED,
+ ICNSS_PD_RESTART,
};
struct ce_irq_list {
@@ -145,11 +256,35 @@ struct ce_irq_list {
struct icnss_vreg_info {
struct regulator *reg;
const char *name;
- u32 nominal_min;
- u32 max_voltage;
- bool state;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct icnss_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+static struct icnss_vreg_info icnss_vreg_info[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, true},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
};
+#define ICNSS_VREG_INFO_SIZE ARRAY_SIZE(icnss_vreg_info)
+
+static struct icnss_clk_info icnss_clk_info[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ICNSS_CLK_INFO_SIZE ARRAY_SIZE(icnss_clk_info)
+
struct icnss_stats {
struct {
uint32_t posted;
@@ -188,11 +323,12 @@ struct icnss_stats {
uint32_t ini_req_err;
};
-static struct icnss_data {
+static struct icnss_priv {
struct platform_device *pdev;
struct icnss_driver_ops *ops;
struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
- struct icnss_vreg_info vreg_info;
+ struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
+ struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
phys_addr_t mem_base_pa;
void __iomem *mem_base_va;
@@ -224,12 +360,83 @@ static struct icnss_data {
u32 rf_pin_result;
struct icnss_mem_region_info
icnss_mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
- bool skip_qmi;
struct dentry *root_dentry;
spinlock_t on_off_lock;
struct icnss_stats stats;
+ struct work_struct service_notifier_work;
+ void **service_notifier;
+ struct notifier_block service_notifier_nb;
+ int total_domains;
+ struct notifier_block get_service_nb;
+ void *modem_notify_handler;
+ struct notifier_block modem_ssr_nb;
} *penv;
+static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
+{
+ writel_relaxed(val, base + offset);
+ wmb(); /* Ensure data is written to hardware register */
+}
+
+static u32 icnss_hw_read_reg(void *base, u32 offset)
+{
+ u32 rdata = readl_relaxed(base + offset);
+
+ icnss_pr_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata);
+
+ return rdata;
+}
+
+static void icnss_hw_write_reg_field(void *base, u32 offset, u32 mask, u32 val)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 rdata = readl_relaxed(base + offset);
+
+ val = (rdata & ~mask) | (val << shift);
+
+ icnss_pr_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n",
+ offset, rdata, val);
+
+ icnss_hw_write_reg(base, offset, val);
+}
+
+static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
+ unsigned long usecs, int retry)
+{
+ u32 shift;
+ u32 rdata;
+ int r = 0;
+
+ shift = find_first_bit((void *)&mask, 32);
+
+ val = val << shift;
+
+ rdata = readl_relaxed(base + offset);
+
+ icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ offset, val, rdata, mask);
+
+ while ((rdata & mask) != val) {
+ if (retry != 0 && r >= retry) {
+ icnss_pr_err(" POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ offset, val, rdata, mask);
+
+ return -EIO;
+ }
+
+ r++;
+ udelay(usecs);
+ rdata = readl_relaxed(base + offset);
+
+ if (retry)
+ icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ offset, val, rdata, mask);
+
+ }
+
+ return 0;
+}
+
static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
{
switch (type) {
@@ -243,6 +450,8 @@ static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
return "REGISTER_DRIVER";
case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
return "UNREGISTER_DRIVER";
+ case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
+ return "PD_SERVICE_DOWN";
case ICNSS_DRIVER_EVENT_MAX:
return "EVENT_MAX";
}
@@ -312,7 +521,7 @@ static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len)
ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
if (ret < 0) {
- icnss_pr_err("Failed to decode message: %d, msg_len: %u!\n",
+ icnss_pr_err("Failed to decode message: %d, msg_len: %u\n",
ret, msg_len);
goto out;
}
@@ -334,184 +543,631 @@ out:
return ret;
}
-static int icnss_vreg_on(struct icnss_vreg_info *vreg_info)
+static int icnss_vreg_on(struct icnss_priv *priv)
{
int ret = 0;
+ struct icnss_vreg_info *vreg_info;
+ int i;
- if (!vreg_info->reg) {
- icnss_pr_err("regulator is not initialized\n");
- return -ENOENT;
- }
+ for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &priv->vreg_info[i];
- if (!vreg_info->max_voltage || !vreg_info->nominal_min) {
- icnss_pr_err("%s invalid constraints specified\n",
- vreg_info->name);
- return -EINVAL;
- }
+ if (!vreg_info->reg)
+ continue;
- ret = regulator_set_voltage(vreg_info->reg,
- vreg_info->nominal_min, vreg_info->max_voltage);
- if (ret < 0) {
- icnss_pr_err("regulator_set_voltage failed for (%s). min_uV=%d,max_uV=%d,ret=%d\n",
- vreg_info->name, vreg_info->nominal_min,
- vreg_info->max_voltage, ret);
- return ret;
+ icnss_pr_dbg("Regulator %s being enabled\n", vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+
+ if (ret) {
+ icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
+ vreg_info->name, vreg_info->min_v,
+ vreg_info->max_v, ret);
+ break;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+
+ if (ret < 0) {
+ icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua, ret);
+ break;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ icnss_pr_err("Regulator %s, can't enable: %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
}
- ret = regulator_enable(vreg_info->reg);
- if (ret < 0) {
- icnss_pr_err("Fail to enable regulator (%s) ret=%d\n",
- vreg_info->name, ret);
+ if (!ret)
+ return 0;
+
+ for (; i >= 0; i--) {
+ vreg_info = &priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+
+ regulator_set_load(vreg_info->reg, 0);
+
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
}
return ret;
}
-static int icnss_vreg_off(struct icnss_vreg_info *vreg_info)
+static int icnss_vreg_off(struct icnss_priv *priv)
{
int ret = 0;
- int min_uV = 0;
+ struct icnss_vreg_info *vreg_info;
+ int i;
- if (!vreg_info->reg) {
- icnss_pr_err("Regulator is not initialized\n");
- return -ENOENT;
- }
+ for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &priv->vreg_info[i];
- ret = regulator_disable(vreg_info->reg);
- if (ret < 0) {
- icnss_pr_err("Fail to disable regulator (%s) ret=%d\n",
- vreg_info->name, ret);
- return ret;
- }
+ if (!vreg_info->reg)
+ continue;
- ret = regulator_set_voltage(vreg_info->reg,
- min_uV, vreg_info->max_voltage);
- if (ret < 0) {
- icnss_pr_err("regulator_set_voltage failed for (%s). min_uV=%d,max_uV=%d,ret=%d\n",
- vreg_info->name, min_uV,
- vreg_info->max_voltage, ret);
+ icnss_pr_dbg("Regulator %s being disabled\n", vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ icnss_pr_err("Regulator %s, can't disable: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ icnss_pr_err("Regulator %s, can't set load: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+
+ if (ret)
+ icnss_pr_err("Regulator %s, can't set voltage: %d\n",
+ vreg_info->name, ret);
}
+
return ret;
}
-static int icnss_vreg_set(bool state)
+static int icnss_clk_init(struct icnss_priv *priv)
{
+ struct icnss_clk_info *clk_info;
+ int i;
int ret = 0;
- struct icnss_vreg_info *vreg_info = &penv->vreg_info;
- if (vreg_info->state == state) {
- icnss_pr_dbg("Already %s state is %s\n", vreg_info->name,
- state ? "enabled" : "disabled");
- return ret;
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ clk_info = &priv->clk_info[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ icnss_pr_dbg("Clock %s being enabled\n", clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
+ clk_info->name, clk_info->freq,
+ ret);
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+
+ if (ret) {
+ icnss_pr_err("Clock %s, can't enable: %d\n",
+ clk_info->name, ret);
+ break;
+ }
}
- if (state)
- ret = icnss_vreg_on(vreg_info);
- else
- ret = icnss_vreg_off(vreg_info);
+ if (ret == 0)
+ return 0;
- if (ret < 0)
- goto out;
- else
- ret = 0;
+ for (; i >= 0; i--) {
+ clk_info = &priv->clk_info[i];
- icnss_pr_dbg("Regulator %s is now %s\n", vreg_info->name,
- state ? "enabled" : "disabled");
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
- vreg_info->state = state;
-out:
return ret;
}
-static void icnss_hw_release_reset(struct icnss_data *pdata)
+static int icnss_clk_deinit(struct icnss_priv *priv)
{
- uint32_t rdata = 0;
+ struct icnss_clk_info *clk_info;
+ int i;
- icnss_pr_dbg("HW Release reset: state: 0x%lx\n", pdata->state);
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ clk_info = &priv->clk_info[i];
+
+ if (!clk_info->handle)
+ continue;
- if (penv->mpm_config_va) {
- writel_relaxed(0x1,
- penv->mpm_config_va +
- MPM2_MPM_WCSSAON_CONFIG_OFFSET);
- while (rdata != 0x1)
- rdata = readl_relaxed(penv->mpm_config_va +
- MPM2_MPM_WCSSAON_CONFIG_OFFSET);
+ icnss_pr_dbg("Clock %s being disabled\n", clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
}
+
+ return 0;
}
-static void icnss_hw_reset(struct icnss_data *pdata)
+static void icnss_hw_top_level_release_reset(struct icnss_priv *priv)
{
- uint32_t rdata = 0;
+ icnss_pr_dbg("RESET: HW Release reset: state: 0x%lx\n", priv->state);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_ARES_N, 1);
- icnss_pr_dbg("HW reset: state: 0x%lx\n", pdata->state);
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_WLAN_DISABLE, 0x0);
+
+ icnss_hw_poll_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_ARES_N, 1, 10,
+ ICNSS_HW_REG_RETRY);
+}
+
+static void icnss_hw_top_level_reset(struct icnss_priv *priv)
+{
+ icnss_pr_dbg("RESET: HW top level reset: state: 0x%lx\n", priv->state);
- if (penv->mpm_config_va) {
- writel_relaxed(0x0,
- penv->mpm_config_va +
- MPM2_MPM_WCSSAON_CONFIG_OFFSET);
- while (rdata != 0x0)
- rdata = readl_relaxed(penv->mpm_config_va +
- MPM2_MPM_WCSSAON_CONFIG_OFFSET);
+ icnss_hw_write_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_ARES_N, 0);
+
+ icnss_hw_poll_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_ARES_N, 0, 10,
+ ICNSS_HW_REG_RETRY);
+}
+
+int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
+{
+ u32 rdata;
+
+ icnss_pr_dbg("RESET: WLAN SS power down, state: 0x%lx\n", priv->state);
+
+ rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET);
+
+ if ((rdata & WCSS_WLAN1_GDSCR_PWR_ON) == 0)
+ return 0;
+
+ icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
+ WCSS_WLAN1_GDSCR_HW_CONTROL, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
+ WCSS_WLAN1_GDSCR_SW_COLLAPSE, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
+ WCSS_WLAN1_GDSCR_PWR_ON, 0, 10,
+ ICNSS_HW_REG_RETRY);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
+ WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
+ WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N, 0);
+
+ return 0;
+}
+
+int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
+{
+ u32 rdata;
+
+ icnss_pr_dbg("RESET: Common SS power down, state: 0x%lx\n",
+ priv->state);
+
+ rdata = icnss_hw_read_reg(priv->mem_base_va,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET);
+
+ if ((rdata & WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON) == 0)
+ return 0;
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_COMMON_IDLEREQ_CSR_OFFSET,
+ PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET,
+ 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va,
+ PMM_COMMON_IDLEREQ_CSR_OFFSET,
+ PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK,
+ 1, 20, ICNSS_HW_REG_RETRY);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va,
+ PMM_COMMON_IDLEREQ_CSR_OFFSET,
+ PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE,
+ 1, 10, ICNSS_HW_REG_RETRY);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
+ WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON, 0, 10,
+ ICNSS_HW_REG_RETRY);
+
+ return 0;
+
+}
+
+int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
+{
+ u32 rdata;
+
+ icnss_pr_dbg("RESET: RFACTRL power down, state: 0x%lx\n", priv->state);
+
+ rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET);
+
+ if ((rdata & WCSS_RFACTRL_GDSCR_PWR_ON) == 0)
+ return 0;
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
+ PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET,
+ 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va,
+ PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
+ PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK,
+ 1, 10, ICNSS_HW_REG_RETRY);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
+ WCSS_RFACTRL_GDSCR_HW_CONTROL, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
+ WCSS_RFACTRL_GDSCR_SW_COLLAPSE, 1);
+
+ return 0;
+}
+
+void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
+{
+ icnss_pr_dbg("RESET: WSI CMD Error recovery, state: 0x%lx\n",
+ priv->state);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_FORCE_IDLE, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_PMM_WSI_SM, 1, 100, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_FORCE_IDLE, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_BUS_SYNC, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_RF_CMD_IP, 0, 100, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_BUS_SYNC, 0);
+}
+
+u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
+{
+ u32 rdata = 0;
+ int ret;
+ int i;
+
+ icnss_pr_dbg("RF register read command, addr: 0x%04x, state: 0x%lx\n",
+ addr, priv->state);
+
+ for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_USE_WLAN1_WSI, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_REG_RW_ADDR_OFFSET,
+ PMM_REG_RW_ADDR_SW_REG_RW_ADDR,
+ addr & 0xFFFF);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_REG_READ, 1);
+
+ ret = icnss_hw_poll_reg_field(priv->mem_base_va,
+ PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_RF_CMD_IP, 0, 10,
+ ICNSS_HW_REG_RETRY);
+ if (ret == 0)
+ break;
+
+ icnss_hw_wsi_cmd_error_recovery(priv);
}
+
+
+ rdata = icnss_hw_read_reg(priv->mem_base_va, PMM_REG_READ_DATA_OFFSET);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_REG_READ, 0);
+
+ icnss_pr_dbg("RF register read command, data: 0x%08x, state: 0x%lx\n",
+ rdata, priv->state);
+
+ return rdata;
+}
+
+int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
+{
+ u32 rdata;
+ int ret;
+
+ icnss_pr_dbg("RESET: RF reset command, state: 0x%lx\n", priv->state);
+
+ rdata = icnss_hw_rf_register_read_command(priv, 0x5080);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_USE_WLAN1_WSI, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_RF_VAULT_REG_ADDR_OFFSET,
+ PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
+ 0x5082);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_RF_VAULT_REG_DATA_OFFSET,
+ PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
+ 0x12AB8FAD);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_ADDR_OFFSET,
+ PMM_RF_RESET_ADDR_RF_RESET_ADDR, 0x5080);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_DATA_OFFSET,
+ PMM_RF_RESET_DATA_RF_RESET_DATA,
+ rdata & 0xBFFF);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_RF_RESET, 1);
+
+ ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_RF_CMD_IP, 0, 10,
+ ICNSS_HW_REG_RETRY);
+
+ if (ret) {
+ icnss_pr_err("RESET: RF reset command failed, state: 0x%lx\n",
+ priv->state);
+ icnss_hw_wsi_cmd_error_recovery(priv);
+ }
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_RF_RESET, 0);
+
+ return 0;
+}
+
+int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
+{
+ int ret;
+
+ icnss_pr_dbg("RESET: XO disable command, state: 0x%lx\n", priv->state);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_USE_WLAN1_WSI, 1);
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_RF_VAULT_REG_ADDR_OFFSET,
+ PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
+ 0x5082);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ PMM_RF_VAULT_REG_DATA_OFFSET,
+ PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
+ 0x12AB8FAD);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_ADDR_OFFSET,
+ PMM_XO_DIS_ADDR_XO_DIS_ADDR, 0x5081);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_DATA_OFFSET,
+ PMM_XO_DIS_DATA_XO_DIS_DATA, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_XO_DIS, 1);
+
+ ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_RF_CMD_IP, 0, 10,
+ ICNSS_HW_REG_RETRY);
+ if (ret) {
+ icnss_pr_err("RESET: XO disable command failed, state: 0x%lx\n",
+ priv->state);
+ icnss_hw_wsi_cmd_error_recovery(priv);
+ }
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
+ PMM_WSI_CMD_SW_XO_DIS, 0);
+
+ return 0;
+}
+
+int icnss_hw_reset(struct icnss_priv *priv)
+{
+ u32 rdata;
+ u32 rdata1;
+ int i;
+
+ if (test_bit(HW_ONLY_TOP_LEVEL_RESET, &quirks))
+ goto top_level_reset;
+
+ icnss_pr_dbg("RESET: START, state: 0x%lx\n", priv->state);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
+ SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 10,
+ ICNSS_HW_REG_RETRY);
+
+ for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
+ rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
+ udelay(10);
+ rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
+
+ icnss_pr_dbg("RESET: XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
+ rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
+ rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
+ rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
+ rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
+
+ if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
+ (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
+ (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
+ (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
+ break;
+
+ icnss_hw_write_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE,
+ 0x1);
+ usleep_range(2000, 3000);
+ }
+
+ if (i >= ICNSS_HW_REG_RETRY)
+ ICNSS_ASSERT(false);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0x1);
+
+ icnss_hw_reset_wlan_ss_power_down(priv);
+
+ icnss_hw_reset_common_ss_power_down(priv);
+
+ icnss_hw_reset_wlan_rfactrl_power_down(priv);
+
+ icnss_hw_reset_rf_reset_cmd(priv);
+
+ icnss_hw_reset_xo_disable_cmd(priv);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
+
+ icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
+
+ icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
+ BIT(26), 1, 200, ICNSS_HW_REG_RETRY);
+
+top_level_reset:
+ icnss_hw_top_level_reset(priv);
+
+ icnss_pr_dbg("RESET: DONE, state: 0x%lx\n", priv->state);
+
+ return 0;
}
-static int icnss_hw_power_on(struct icnss_data *pdata)
+static int icnss_hw_power_on(struct icnss_priv *priv)
{
int ret = 0;
unsigned long flags;
- icnss_pr_dbg("Power on: state: 0x%lx\n", pdata->state);
+ icnss_pr_dbg("Power on: state: 0x%lx\n", priv->state);
- spin_lock_irqsave(&pdata->on_off_lock, flags);
- if (test_bit(ICNSS_POWER_ON, &pdata->state)) {
- spin_unlock_irqrestore(&pdata->on_off_lock, flags);
+ spin_lock_irqsave(&priv->on_off_lock, flags);
+ if (test_bit(ICNSS_POWER_ON, &priv->state)) {
+ spin_unlock_irqrestore(&priv->on_off_lock, flags);
return ret;
}
- set_bit(ICNSS_POWER_ON, &pdata->state);
- spin_unlock_irqrestore(&pdata->on_off_lock, flags);
+ set_bit(ICNSS_POWER_ON, &priv->state);
+ spin_unlock_irqrestore(&priv->on_off_lock, flags);
- ret = icnss_vreg_set(VREG_ON);
+ ret = icnss_vreg_on(priv);
if (ret)
goto out;
- icnss_hw_release_reset(pdata);
+ ret = icnss_clk_init(priv);
+ if (ret)
+ goto out;
+
+ icnss_hw_top_level_release_reset(priv);
return ret;
out:
- clear_bit(ICNSS_POWER_ON, &pdata->state);
+ clear_bit(ICNSS_POWER_ON, &priv->state);
return ret;
}
-static int icnss_hw_power_off(struct icnss_data *pdata)
+static int icnss_hw_power_off(struct icnss_priv *priv)
{
int ret = 0;
unsigned long flags;
- icnss_pr_dbg("Power off: 0x%lx\n", pdata->state);
+ if (test_bit(HW_ALWAYS_ON, &quirks))
+ return 0;
+
+ icnss_pr_dbg("Power off: 0x%lx\n", priv->state);
- spin_lock_irqsave(&pdata->on_off_lock, flags);
- if (!test_bit(ICNSS_POWER_ON, &pdata->state)) {
- spin_unlock_irqrestore(&pdata->on_off_lock, flags);
+ spin_lock_irqsave(&priv->on_off_lock, flags);
+ if (!test_bit(ICNSS_POWER_ON, &priv->state)) {
+ spin_unlock_irqrestore(&priv->on_off_lock, flags);
return ret;
}
- clear_bit(ICNSS_POWER_ON, &pdata->state);
- spin_unlock_irqrestore(&pdata->on_off_lock, flags);
+ clear_bit(ICNSS_POWER_ON, &priv->state);
+ spin_unlock_irqrestore(&priv->on_off_lock, flags);
- icnss_hw_reset(pdata);
+ icnss_hw_reset(priv);
- ret = icnss_vreg_set(VREG_OFF);
+ icnss_clk_deinit(priv);
+
+ ret = icnss_vreg_off(priv);
if (ret)
goto out;
return ret;
out:
- set_bit(ICNSS_POWER_ON, &pdata->state);
+ set_bit(ICNSS_POWER_ON, &priv->state);
return ret;
}
int icnss_power_on(struct device *dev)
{
- struct icnss_data *priv = dev_get_drvdata(dev);
+ struct icnss_priv *priv = dev_get_drvdata(dev);
if (!priv) {
icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
@@ -525,7 +1181,7 @@ EXPORT_SYMBOL(icnss_power_on);
int icnss_power_off(struct device *dev)
{
- struct icnss_data *priv = dev_get_drvdata(dev);
+ struct icnss_priv *priv = dev_get_drvdata(dev);
if (!priv) {
icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
@@ -537,7 +1193,7 @@ int icnss_power_off(struct device *dev)
}
EXPORT_SYMBOL(icnss_power_off);
-int icnss_map_msa_permissions(struct icnss_data *priv, u32 index)
+int icnss_map_msa_permissions(struct icnss_priv *priv, u32 index)
{
int ret = 0;
phys_addr_t addr;
@@ -575,7 +1231,7 @@ out:
}
-int icnss_unmap_msa_permissions(struct icnss_data *priv, u32 index)
+int icnss_unmap_msa_permissions(struct icnss_priv *priv, u32 index)
{
int ret = 0;
phys_addr_t addr;
@@ -611,7 +1267,7 @@ out:
return ret;
}
-static int icnss_setup_msa_permissions(struct icnss_data *priv)
+static int icnss_setup_msa_permissions(struct icnss_priv *priv)
{
int ret = 0;
@@ -630,7 +1286,7 @@ err_map_msa:
return ret;
}
-static void icnss_remove_msa_permissions(struct icnss_data *priv)
+static void icnss_remove_msa_permissions(struct icnss_priv *priv)
{
icnss_unmap_msa_permissions(priv, 0);
icnss_unmap_msa_permissions(priv, 1);
@@ -686,7 +1342,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
resp.mem_region_info_len);
if (resp.mem_region_info_len > 2) {
- icnss_pr_err("Invalid memory region length received%d\n",
+ icnss_pr_err("Invalid memory region length received: %d\n",
resp.mem_region_info_len);
ret = -EINVAL;
penv->stats.msa_info_err++;
@@ -701,8 +1357,8 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
resp.mem_region_info[i].size;
penv->icnss_mem_region[i].secure_flag =
resp.mem_region_info[i].secure_flag;
- icnss_pr_dbg("Memory Region: %d Addr: 0x%x Size: %d Flag: %d\n",
- i, (unsigned int)penv->icnss_mem_region[i].reg_addr,
+ icnss_pr_dbg("Memory Region: %d Addr: 0x%llx Size: 0x%x Flag: 0x%08x\n",
+ i, penv->icnss_mem_region[i].reg_addr,
penv->icnss_mem_region[i].size,
penv->icnss_mem_region[i].secure_flag);
}
@@ -1192,12 +1848,67 @@ static int icnss_driver_event_server_exit(void *data)
qmi_handle_destroy(penv->wlfw_clnt);
- penv->state = 0;
+ clear_bit(ICNSS_WLFW_QMI_CONNECTED, &penv->state);
penv->wlfw_clnt = NULL;
return 0;
}
+static int icnss_call_driver_probe(struct icnss_priv *priv)
+{
+ int ret;
+
+ if (!priv->ops || !priv->ops->probe)
+ return 0;
+
+ icnss_hw_power_on(priv);
+
+ ret = priv->ops->probe(&priv->pdev->dev);
+ if (ret < 0) {
+ icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
+ ret, priv->state);
+ goto out;
+ }
+
+ set_bit(ICNSS_DRIVER_PROBED, &priv->state);
+
+ return 0;
+
+out:
+ icnss_hw_power_off(priv);
+ return ret;
+}
+
+static int icnss_call_driver_reinit(struct icnss_priv *priv)
+{
+ int ret = 0;
+
+ if (!priv->ops || !priv->ops->reinit)
+ goto out;
+
+ icnss_hw_power_on(priv);
+
+ ret = priv->ops->reinit(&priv->pdev->dev);
+ if (ret < 0) {
+ icnss_pr_err("Driver reinit failed: %d, state: 0x%lx\n",
+ ret, priv->state);
+ ICNSS_ASSERT(false);
+ goto out_power_off;
+ }
+
+out:
+ clear_bit(ICNSS_PD_RESTART, &priv->state);
+
+ return 0;
+
+out_power_off:
+ icnss_hw_power_off(priv);
+
+ clear_bit(ICNSS_PD_RESTART, &priv->state);
+ return ret;
+}
+
+
static int icnss_driver_event_fw_ready_ind(void *data)
{
int ret = 0;
@@ -1209,34 +1920,20 @@ static int icnss_driver_event_fw_ready_ind(void *data)
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
+ icnss_hw_power_off(penv);
+
if (!penv->pdev) {
icnss_pr_err("Device is not ready\n");
ret = -ENODEV;
goto out;
}
- /*
- * WAR required after FW ready without which CCPM init fails in firmware
- * when WLAN enable is sent to firmware
- */
- icnss_hw_reset(penv);
- usleep_range(100, 102);
- icnss_hw_release_reset(penv);
-
- if (!penv->ops || !penv->ops->probe)
- goto out;
-
- ret = penv->ops->probe(&penv->pdev->dev);
- if (ret < 0) {
- icnss_pr_err("Driver probe failed: %d\n", ret);
- goto out;
- }
-
- set_bit(ICNSS_DRIVER_PROBED, &penv->state);
+ if (test_bit(ICNSS_PD_RESTART, &penv->state))
+ ret = icnss_call_driver_reinit(penv);
+ else
+ ret = icnss_call_driver_probe(penv);
- return 0;
out:
- icnss_hw_power_off(penv);
return ret;
}
@@ -1251,11 +1948,11 @@ static int icnss_driver_event_register_driver(void *data)
penv->ops = data;
- if (penv->skip_qmi)
+ if (test_bit(SKIP_QMI, &quirks))
set_bit(ICNSS_FW_READY, &penv->state);
if (!test_bit(ICNSS_FW_READY, &penv->state)) {
- icnss_pr_dbg("FW is not ready yet, state: 0x%lx!\n",
+ icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
penv->state);
goto out;
}
@@ -1302,6 +1999,32 @@ out:
return 0;
}
+static int icnss_qmi_pd_event_service_down(struct icnss_priv *priv, void *data)
+{
+ int ret = 0;
+
+ if (test_bit(ICNSS_PD_RESTART, &priv->state))
+ goto out;
+
+ set_bit(ICNSS_PD_RESTART, &priv->state);
+ clear_bit(ICNSS_FW_READY, &priv->state);
+
+ if (!priv->ops || !priv->ops->shutdown)
+ goto out;
+
+ priv->ops->shutdown(&priv->pdev->dev);
+
+out:
+ icnss_remove_msa_permissions(priv);
+
+ ret = icnss_hw_power_off(priv);
+
+ icnss_pr_dbg("Shutdown completed: %d, state: 0x%lx\n",
+ ret, priv->state);
+
+ return ret;
+}
+
static void icnss_driver_event_work(struct work_struct *work)
{
struct icnss_driver_event *event;
@@ -1337,6 +2060,9 @@ static void icnss_driver_event_work(struct work_struct *work)
case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
ret = icnss_driver_event_unregister_driver(event->data);
break;
+ case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
+ icnss_qmi_pd_event_service_down(penv, event->data);
+ break;
default:
icnss_pr_err("Invalid Event type: %d", event->type);
kfree(event);
@@ -1388,6 +2114,239 @@ static struct notifier_block wlfw_clnt_nb = {
.notifier_call = icnss_qmi_wlfw_clnt_svc_event_notify,
};
+static int icnss_modem_notifier_nb(struct notifier_block *this,
+ unsigned long code,
+ void *ss_handle)
+{
+ icnss_pr_dbg("Modem-Notify: event %lu\n", code);
+
+ if (code == SUBSYS_AFTER_POWERUP) {
+ icnss_pr_dbg("Modem-Notify: Powerup\n");
+ } else if (code == SUBSYS_BEFORE_SHUTDOWN) {
+ icnss_pr_info("Modem-Notify: Before shutdown\n");
+ icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+ true, NULL);
+ } else if (code == SUBSYS_AFTER_SHUTDOWN) {
+ icnss_pr_info("Modem-Notify: After Shutdown\n");
+ } else {
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
+{
+ int ret = 0;
+
+ priv->modem_ssr_nb.notifier_call = icnss_modem_notifier_nb;
+
+ priv->modem_notify_handler =
+ subsys_notif_register_notifier("modem", &priv->modem_ssr_nb);
+
+ if (IS_ERR(priv->modem_notify_handler)) {
+ ret = PTR_ERR(priv->modem_notify_handler);
+ icnss_pr_err("Modem register notifier failed: %d\n", ret);
+ }
+
+ set_bit(ICNSS_SSR_ENABLED, &priv->state);
+
+ return ret;
+}
+
+static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
+{
+ if (!test_and_clear_bit(ICNSS_SSR_ENABLED, &priv->state))
+ return 0;
+
+ subsys_notif_unregister_notifier(priv->modem_notify_handler,
+ &priv->modem_ssr_nb);
+ priv->modem_notify_handler = NULL;
+
+ return 0;
+}
+
+static int icnss_pdr_unregister_notifier(struct icnss_priv *priv)
+{
+ int i;
+
+ if (!test_and_clear_bit(ICNSS_PDR_ENABLED, &priv->state))
+ return 0;
+
+ for (i = 0; i < priv->total_domains; i++)
+ service_notif_unregister_notifier(priv->service_notifier[i],
+ &priv->service_notifier_nb);
+
+ kfree(priv->service_notifier);
+
+ priv->service_notifier = NULL;
+
+ return 0;
+}
+
+static int icnss_service_notifier_notify(struct notifier_block *nb,
+ unsigned long notification, void *data)
+{
+ struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+ service_notifier_nb);
+
+ switch (notification) {
+ case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+ icnss_pr_info("Service down, state: 0x%lx\n", priv->state);
+ icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+ true, NULL);
+ icnss_pr_dbg("Service down completed, state: 0x%lx\n",
+ priv->state);
+ break;
+ case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
+ icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state);
+ break;
+ default:
+ icnss_pr_dbg("Service state Unknown, notification: 0x%lx, state: 0x%lx\n",
+ notification, priv->state);
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int icnss_get_service_location_notify(struct notifier_block *nb,
+ unsigned long opcode, void *data)
+{
+ struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+ get_service_nb);
+ struct pd_qmi_client_data *pd = data;
+ int curr_state;
+ int ret;
+ int i;
+ void **handle;
+
+ icnss_pr_dbg("Get service notify opcode: %lu, state: 0x%lx\n", opcode,
+ priv->state);
+
+ if (opcode != LOCATOR_UP)
+ return NOTIFY_DONE;
+
+ if (pd->total_domains == 0) {
+ icnss_pr_err("Did not find any domains\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ handle = kcalloc(pd->total_domains, sizeof(void *), GFP_KERNEL);
+
+ if (!handle) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->service_notifier_nb.notifier_call = icnss_service_notifier_notify;
+
+ for (i = 0; i < pd->total_domains; i++) {
+ icnss_pr_dbg("%d: domain_name: %s, instance_id: %d\n", i,
+ pd->domain_list[i].name,
+ pd->domain_list[i].instance_id);
+
+ handle[i] =
+ service_notif_register_notifier(pd->domain_list[i].name,
+ pd->domain_list[i].instance_id,
+ &priv->service_notifier_nb, &curr_state);
+
+ if (IS_ERR(handle[i])) {
+ icnss_pr_err("%d: Unable to register notifier for %s(0x%x)\n",
+ i, pd->domain_list->name,
+ pd->domain_list->instance_id);
+ ret = PTR_ERR(handle[i]);
+ goto free_handle;
+ }
+ }
+
+ priv->service_notifier = handle;
+ priv->total_domains = pd->total_domains;
+
+ set_bit(ICNSS_PDR_ENABLED, &priv->state);
+
+ icnss_modem_ssr_unregister_notifier(priv);
+
+ icnss_pr_dbg("PD restart enabled, state: 0x%lx\n", priv->state);
+
+ return NOTIFY_OK;
+
+free_handle:
+ for (i = 0; i < pd->total_domains; i++) {
+ if (handle[i])
+ service_notif_unregister_notifier(handle[i],
+ &priv->service_notifier_nb);
+ }
+ kfree(handle);
+
+out:
+ icnss_pr_err("PD restart not enabled: %d, state: 0x%lx\n", ret,
+ priv->state);
+
+ return NOTIFY_OK;
+}
+
+
+static int icnss_pd_restart_enable(struct icnss_priv *priv)
+{
+ int ret;
+
+ if (test_bit(SSR_ONLY, &quirks)) {
+ icnss_pr_dbg("PDR disabled through module parameter\n");
+ return 0;
+ }
+
+ icnss_pr_dbg("Get service location, state: 0x%lx\n", priv->state);
+
+ priv->get_service_nb.notifier_call = icnss_get_service_location_notify;
+ ret = get_service_location(ICNSS_SERVICE_LOCATION_CLIENT_NAME,
+ ICNSS_WLAN_SERVICE_NAME,
+ &priv->get_service_nb);
+ if (ret) {
+ icnss_pr_err("Get service location failed: %d\n", ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ icnss_pr_err("PD restart not enabled: %d\n", ret);
+ return ret;
+
+}
+
+
+static int icnss_enable_recovery(struct icnss_priv *priv)
+{
+ int ret;
+
+ if (test_bit(RECOVERY_DISABLE, &quirks)) {
+ icnss_pr_dbg("Recovery disabled through module parameter\n");
+ return 0;
+ }
+
+ if (test_bit(PDR_ONLY, &quirks)) {
+ icnss_pr_dbg("SSR disabled through module parameter\n");
+ goto enable_pdr;
+ }
+
+ icnss_modem_ssr_register_notifier(priv);
+ if (test_bit(SSR_ONLY, &quirks)) {
+ icnss_pr_dbg("PDR disabled through module parameter\n");
+ return 0;
+ }
+
+enable_pdr:
+ ret = icnss_pd_restart_enable(priv);
+
+ if (ret)
+ return ret;
+
+ icnss_modem_ssr_unregister_notifier(priv);
+
+ return 0;
+}
+
int icnss_register_driver(struct icnss_driver_ops *ops)
{
int ret = 0;
@@ -1683,7 +2642,7 @@ skip:
if (ret)
icnss_pr_err("Failed to send mode, ret = %d\n", ret);
out:
- if (penv->skip_qmi)
+ if (test_bit(SKIP_QMI, &quirks))
ret = 0;
return ret;
@@ -1732,7 +2691,7 @@ EXPORT_SYMBOL(icnss_get_irq);
struct dma_iommu_mapping *icnss_smmu_get_mapping(struct device *dev)
{
- struct icnss_data *priv = dev_get_drvdata(dev);
+ struct icnss_priv *priv = dev_get_drvdata(dev);
if (!priv) {
icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
@@ -1747,7 +2706,7 @@ EXPORT_SYMBOL(icnss_smmu_get_mapping);
int icnss_smmu_map(struct device *dev,
phys_addr_t paddr, uint32_t *iova_addr, size_t size)
{
- struct icnss_data *priv = dev_get_drvdata(dev);
+ struct icnss_priv *priv = dev_get_drvdata(dev);
unsigned long iova;
size_t len;
int ret = 0;
@@ -1759,7 +2718,7 @@ int icnss_smmu_map(struct device *dev,
}
if (!iova_addr) {
- icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu",
+ icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
&paddr, size);
return -EINVAL;
}
@@ -1768,7 +2727,7 @@ int icnss_smmu_map(struct device *dev,
iova = roundup(penv->smmu_iova_ipa_start, PAGE_SIZE);
if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
- icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu",
+ icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
iova,
&priv->smmu_iova_ipa_start,
priv->smmu_iova_ipa_len);
@@ -1779,7 +2738,7 @@ int icnss_smmu_map(struct device *dev,
rounddown(paddr, PAGE_SIZE), len,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
- icnss_pr_err("PA to IOVA mapping failed, ret %d!", ret);
+ icnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
return ret;
}
@@ -1790,7 +2749,7 @@ int icnss_smmu_map(struct device *dev,
}
EXPORT_SYMBOL(icnss_smmu_map);
-static int icnss_bw_vote(struct icnss_data *priv, int index)
+static int icnss_bw_vote(struct icnss_priv *priv, int index)
{
int ret = 0;
@@ -1798,13 +2757,13 @@ static int icnss_bw_vote(struct icnss_data *priv, int index)
index, priv->state);
ret = msm_bus_scale_client_update_request(priv->bus_client, index);
if (ret)
- icnss_pr_err("Fail to vote %d: ret %d, state 0x%lx!\n",
+ icnss_pr_err("Fail to vote %d: ret %d, state 0x%lx\n",
index, ret, priv->state);
return ret;
}
-static int icnss_bw_init(struct icnss_data *priv)
+static int icnss_bw_init(struct icnss_priv *priv)
{
int ret = 0;
@@ -1832,7 +2791,7 @@ out:
return ret;
}
-static void icnss_bw_deinit(struct icnss_data *priv)
+static void icnss_bw_deinit(struct icnss_priv *priv)
{
if (!priv)
return;
@@ -1846,7 +2805,7 @@ static void icnss_bw_deinit(struct icnss_data *priv)
msm_bus_cl_clear_pdata(priv->bus_scale_table);
}
-static int icnss_smmu_init(struct device *dev)
+static int icnss_smmu_init(struct icnss_priv *priv)
{
struct dma_iommu_mapping *mapping;
int disable_htw = 1;
@@ -1857,8 +2816,8 @@ static int icnss_smmu_init(struct device *dev)
icnss_pr_dbg("Initializing SMMU\n");
mapping = arm_iommu_create_mapping(&platform_bus_type,
- penv->smmu_iova_start,
- penv->smmu_iova_len);
+ priv->smmu_iova_start,
+ priv->smmu_iova_len);
if (IS_ERR(mapping)) {
icnss_pr_err("Create mapping failed, err = %d\n", ret);
ret = PTR_ERR(mapping);
@@ -1891,13 +2850,13 @@ static int icnss_smmu_init(struct device *dev)
goto set_attr_fail;
}
- ret = arm_iommu_attach_device(dev, mapping);
+ ret = arm_iommu_attach_device(&priv->pdev->dev, mapping);
if (ret < 0) {
icnss_pr_err("Attach device failed, err = %d\n", ret);
goto attach_fail;
}
- penv->smmu_mapping = mapping;
+ priv->smmu_mapping = mapping;
return ret;
@@ -1908,88 +2867,132 @@ map_fail:
return ret;
}
-static void icnss_smmu_remove(struct device *dev)
+static void icnss_smmu_deinit(struct icnss_priv *priv)
{
- arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(penv->smmu_mapping);
+ if (!priv->smmu_mapping)
+ return;
+
+ arm_iommu_detach_device(&priv->pdev->dev);
+ arm_iommu_release_mapping(priv->smmu_mapping);
- penv->smmu_mapping = NULL;
+ priv->smmu_mapping = NULL;
}
-static int icnss_dt_parse_vreg_info(struct device *dev,
- struct icnss_vreg_info *vreg_info,
- const char *vreg_name)
+static int icnss_get_vreg_info(struct device *dev,
+ struct icnss_vreg_info *vreg_info)
{
int ret = 0;
- u32 voltage_levels[MAX_VOLTAGE_LEVEL];
char prop_name[MAX_PROP_SIZE];
- struct device_node *np = dev->of_node;
+ struct regulator *reg;
+ const __be32 *prop;
+ int len = 0;
+ int i;
- snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
- if (!of_parse_phandle(np, prop_name, 0)) {
- icnss_pr_err("No vreg data found for %s\n", vreg_name);
- ret = -EINVAL;
- return ret;
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+
+ if (IS_ERR(reg) == -EPROBE_DEFER) {
+ icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ ret = PTR_ERR(reg);
+ goto out;
}
- vreg_info->name = vreg_name;
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (vreg_info->required) {
+
+ icnss_pr_err("Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto out;
+ } else {
+ icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
- snprintf(prop_name, MAX_PROP_SIZE,
- "qcom,%s-voltage-level", vreg_name);
- ret = of_property_read_u32_array(np, prop_name, voltage_levels,
- ARRAY_SIZE(voltage_levels));
- if (ret) {
- icnss_pr_err("Error reading %s property\n", prop_name);
- return ret;
}
- vreg_info->nominal_min = voltage_levels[0];
- vreg_info->max_voltage = voltage_levels[1];
+ vreg_info->reg = reg;
- return ret;
-}
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-config", vreg_info->name);
-static int icnss_get_resources(struct device *dev)
-{
- int ret = 0;
- struct icnss_vreg_info *vreg_info;
+ prop = of_get_property(dev->of_node, prop_name, &len);
- vreg_info = &penv->vreg_info;
- if (vreg_info->reg) {
- icnss_pr_err("%s regulator is already initialized\n",
- vreg_info->name);
- return ret;
+ icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len < (2 * sizeof(__be32))) {
+ icnss_pr_dbg("Property %s %s\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ goto done;
}
- vreg_info->reg = devm_regulator_get(dev, vreg_info->name);
- if (IS_ERR(vreg_info->reg)) {
- ret = PTR_ERR(vreg_info->reg);
- if (ret == -EPROBE_DEFER) {
- icnss_pr_err("%s probe deferred!\n", vreg_info->name);
- } else {
- icnss_pr_err("Get %s failed!\n", vreg_info->name);
+ for (i = 0; (i * sizeof(__be32)) < len; i++) {
+ switch (i) {
+ case 0:
+ vreg_info->min_v = be32_to_cpup(&prop[0]);
+ break;
+ case 1:
+ vreg_info->max_v = be32_to_cpup(&prop[1]);
+ break;
+ case 2:
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ break;
+ case 3:
+ vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+ break;
+ default:
+ icnss_pr_dbg("Property %s, ignoring value at %d\n",
+ prop_name, i);
+ break;
}
}
+
+done:
+ icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+
+out:
return ret;
}
-static int icnss_release_resources(void)
+static int icnss_get_clk_info(struct device *dev,
+ struct icnss_clk_info *clk_info)
{
+ struct clk *handle;
int ret = 0;
- struct icnss_vreg_info *vreg_info = &penv->vreg_info;
- if (!vreg_info->reg) {
- icnss_pr_err("Regulator is not initialized\n");
- return -ENOENT;
+ handle = devm_clk_get(dev, clk_info->name);
+
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ icnss_pr_err("Clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ goto out;
+ } else {
+ icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
+ ret);
+ ret = 0;
+ goto out;
+ }
}
- devm_regulator_put(vreg_info->reg);
+ icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+out:
return ret;
}
static int icnss_test_mode_show(struct seq_file *s, void *data)
{
- struct icnss_data *priv = s->private;
+ struct icnss_priv *priv = s->private;
seq_puts(s, "0 : Test mode disable\n");
seq_puts(s, "1 : WLAN Firmware test\n");
@@ -2019,7 +3022,7 @@ out:
return 0;
}
-static int icnss_test_mode_fw_test_off(struct icnss_data *priv)
+static int icnss_test_mode_fw_test_off(struct icnss_priv *priv)
{
int ret;
@@ -2053,7 +3056,7 @@ static int icnss_test_mode_fw_test_off(struct icnss_data *priv)
out:
return ret;
}
-static int icnss_test_mode_fw_test(struct icnss_data *priv,
+static int icnss_test_mode_fw_test(struct icnss_priv *priv,
enum icnss_driver_mode mode)
{
int ret;
@@ -2102,7 +3105,7 @@ out:
static ssize_t icnss_test_mode_write(struct file *fp, const char __user *buf,
size_t count, loff_t *off)
{
- struct icnss_data *priv =
+ struct icnss_priv *priv =
((struct seq_file *)fp->private_data)->private;
int ret;
u32 val;
@@ -2152,7 +3155,7 @@ static const struct file_operations icnss_test_mode_fops = {
static ssize_t icnss_stats_write(struct file *fp, const char __user *buf,
size_t count, loff_t *off)
{
- struct icnss_data *priv =
+ struct icnss_priv *priv =
((struct seq_file *)fp->private_data)->private;
int ret;
u32 val;
@@ -2167,7 +3170,7 @@ static ssize_t icnss_stats_write(struct file *fp, const char __user *buf,
return count;
}
-static int icnss_stats_show_state(struct seq_file *s, struct icnss_data *priv)
+static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
{
int i;
int skip = 0;
@@ -2198,8 +3201,14 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_data *priv)
case ICNSS_FW_TEST_MODE:
seq_puts(s, "FW TEST MODE");
continue;
- case ICNSS_SUSPEND:
- seq_puts(s, "DRIVER SUSPENDED");
+ case ICNSS_SSR_ENABLED:
+ seq_puts(s, "SSR ENABLED");
+ continue;
+ case ICNSS_PDR_ENABLED:
+ seq_puts(s, "PDR ENABLED");
+ continue;
+ case ICNSS_PD_RESTART:
+ seq_puts(s, "PD RESTART");
continue;
}
@@ -2211,7 +3220,7 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_data *priv)
}
static int icnss_stats_show_capability(struct seq_file *s,
- struct icnss_data *priv)
+ struct icnss_priv *priv)
{
if (test_bit(ICNSS_FW_READY, &priv->state)) {
seq_puts(s, "\n<---------------- FW Capability ----------------->\n");
@@ -2229,7 +3238,7 @@ static int icnss_stats_show_capability(struct seq_file *s,
return 0;
}
-static int icnss_stats_show_events(struct seq_file *s, struct icnss_data *priv)
+static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv)
{
int i;
@@ -2244,7 +3253,7 @@ static int icnss_stats_show_events(struct seq_file *s, struct icnss_data *priv)
return 0;
}
-static int icnss_stats_show_irqs(struct seq_file *s, struct icnss_data *priv)
+static int icnss_stats_show_irqs(struct seq_file *s, struct icnss_priv *priv)
{
int i;
@@ -2266,7 +3275,7 @@ static int icnss_stats_show(struct seq_file *s, void *data)
#define ICNSS_STATS_DUMP(_s, _priv, _x) \
seq_printf(_s, "%24s: %u\n", #_x, _priv->stats._x)
- struct icnss_data *priv = s->private;
+ struct icnss_priv *priv = s->private;
ICNSS_STATS_DUMP(s, priv, ind_register_req);
ICNSS_STATS_DUMP(s, priv, ind_register_resp);
@@ -2318,7 +3327,7 @@ static const struct file_operations icnss_stats_fops = {
.llseek = seq_lseek,
};
-static int icnss_debugfs_create(struct icnss_data *priv)
+static int icnss_debugfs_create(struct icnss_priv *priv)
{
int ret = 0;
struct dentry *root_dentry;
@@ -2343,7 +3352,7 @@ out:
return ret;
}
-static void icnss_debugfs_destroy(struct icnss_data *priv)
+static void icnss_debugfs_destroy(struct icnss_priv *priv)
{
debugfs_remove_recursive(priv->root_dentry);
}
@@ -2354,107 +3363,115 @@ static int icnss_probe(struct platform_device *pdev)
struct resource *res;
int i;
struct device *dev = &pdev->dev;
+ struct icnss_priv *priv;
if (penv) {
- icnss_pr_err("penv is already initialized\n");
+ icnss_pr_err("Driver is already initialized\n");
return -EEXIST;
}
- penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL);
- if (!penv)
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- dev_set_drvdata(dev, penv);
+ dev_set_drvdata(dev, priv);
- penv->pdev = pdev;
+ priv->pdev = pdev;
- ret = icnss_dt_parse_vreg_info(dev, &penv->vreg_info, "vdd-io");
- if (ret < 0) {
- icnss_pr_err("Failed to parse vdd io data: %d\n", ret);
- goto out;
+ memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
+ for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+ ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
+
+ if (ret)
+ goto out;
}
- ret = icnss_get_resources(dev);
- if (ret < 0) {
- icnss_pr_err("Regulator setup failed (%d)\n", ret);
- goto out;
+ memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
+ if (ret)
+ goto out;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
if (!res) {
- icnss_pr_err("Memory base not found\n");
+ icnss_pr_err("Memory base not found in DT\n");
ret = -EINVAL;
- goto release_regulator;
+ goto out;
}
- penv->mem_base_pa = res->start;
- penv->mem_base_va = ioremap(penv->mem_base_pa, resource_size(res));
- if (!penv->mem_base_va) {
- icnss_pr_err("mem_base ioremap failed\n");
+
+ priv->mem_base_pa = res->start;
+ priv->mem_base_va = devm_ioremap(dev, priv->mem_base_pa,
+ resource_size(res));
+ if (!priv->mem_base_va) {
+ icnss_pr_err("Memory base ioremap failed: phy addr: %pa\n",
+ &priv->mem_base_pa);
ret = -EINVAL;
- goto release_regulator;
+ goto out;
}
+ icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%p\n", &priv->mem_base_pa,
+ priv->mem_base_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mpm_config");
if (!res) {
- icnss_pr_err("mpm_config not found\n");
+ icnss_pr_err("MPM Config not found\n");
ret = -EINVAL;
- goto unmap_mem_base;
+ goto out;
}
- penv->mpm_config_pa = res->start;
- penv->mpm_config_va = ioremap(penv->mpm_config_pa, resource_size(res));
- if (!penv->mpm_config_va) {
- icnss_pr_err("mpm_config ioremap failed, phy addr: %pa\n",
- &penv->mpm_config_pa);
+ priv->mpm_config_pa = res->start;
+ priv->mpm_config_va = devm_ioremap(dev, priv->mpm_config_pa,
+ resource_size(res));
+ if (!priv->mpm_config_va) {
+ icnss_pr_err("MPM Config ioremap failed, phy addr: %pa\n",
+ &priv->mpm_config_pa);
ret = -EINVAL;
- goto unmap_mem_base;
+ goto out;
}
- icnss_pr_dbg("mpm_config_pa: %pa, mpm_config_va: %p\n",
- &penv->mpm_config_pa, penv->mpm_config_va);
+
+ icnss_pr_dbg("MPM_CONFIG pa: %pa, va: 0x%p\n", &priv->mpm_config_pa,
+ priv->mpm_config_va);
for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i);
if (!res) {
icnss_pr_err("Fail to get IRQ-%d\n", i);
ret = -ENODEV;
- goto unmap_mpm_config;
+ goto out;
} else {
- penv->ce_irqs[i] = res->start;
+ priv->ce_irqs[i] = res->start;
}
}
- if (of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
- &penv->msa_mem_size) == 0) {
- if (penv->msa_mem_size) {
- penv->msa_va = dma_alloc_coherent(&pdev->dev,
- penv->msa_mem_size,
- &penv->msa_pa,
- GFP_KERNEL);
- if (!penv->msa_va) {
- icnss_pr_err("DMA alloc failed for MSA\n");
- ret = -EINVAL;
- goto unmap_mpm_config;
- }
+ ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
+ &priv->msa_mem_size);
- icnss_pr_dbg("MSA va: %p, MSA pa: %pa\n", penv->msa_va,
- &penv->msa_pa);
- }
- } else {
- icnss_pr_err("Fail to get MSA Memory Size\n");
- ret = -ENODEV;
- goto unmap_mpm_config;
+ if (ret || priv->msa_mem_size == 0) {
+ icnss_pr_err("Fail to get MSA Memory Size: %u, ret: %d\n",
+ priv->msa_mem_size, ret);
+ goto out;
}
+ priv->msa_va = dmam_alloc_coherent(&pdev->dev, priv->msa_mem_size,
+ &priv->msa_pa, GFP_KERNEL);
+ if (!priv->msa_va) {
+ icnss_pr_err("DMA alloc failed for MSA\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p\n", &priv->msa_pa,
+ priv->msa_va);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smmu_iova_base");
if (!res) {
icnss_pr_err("SMMU IOVA base not found\n");
} else {
- penv->smmu_iova_start = res->start;
- penv->smmu_iova_len = resource_size(res);
+ priv->smmu_iova_start = res->start;
+ priv->smmu_iova_len = resource_size(res);
icnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
- &penv->smmu_iova_start,
- penv->smmu_iova_len);
+ &priv->smmu_iova_start,
+ priv->smmu_iova_len);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
@@ -2462,42 +3479,39 @@ static int icnss_probe(struct platform_device *pdev)
if (!res) {
icnss_pr_err("SMMU IOVA IPA not found\n");
} else {
- penv->smmu_iova_ipa_start = res->start;
- penv->smmu_iova_ipa_len = resource_size(res);
+ priv->smmu_iova_ipa_start = res->start;
+ priv->smmu_iova_ipa_len = resource_size(res);
icnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n",
- &penv->smmu_iova_ipa_start,
- penv->smmu_iova_ipa_len);
+ &priv->smmu_iova_ipa_start,
+ priv->smmu_iova_ipa_len);
}
- ret = icnss_smmu_init(&pdev->dev);
+ ret = icnss_smmu_init(priv);
if (ret < 0) {
icnss_pr_err("SMMU init failed, err = %d, start: %pad, len: %zx\n",
- ret, &penv->smmu_iova_start,
- penv->smmu_iova_len);
- goto err_smmu_init;
+ ret, &priv->smmu_iova_start,
+ priv->smmu_iova_len);
+ goto out;
}
- ret = icnss_bw_init(penv);
+ ret = icnss_bw_init(priv);
if (ret)
- goto err_bw_init;
+ goto out_smmu_deinit;
}
- penv->skip_qmi = of_property_read_bool(dev->of_node,
- "qcom,skip-qmi");
-
- spin_lock_init(&penv->event_lock);
- spin_lock_init(&penv->on_off_lock);
+ spin_lock_init(&priv->event_lock);
+ spin_lock_init(&priv->on_off_lock);
- penv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
- if (!penv->event_wq) {
+ priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
+ if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
ret = -EFAULT;
- goto err_alloc_workqueue;
+ goto out_bw_deinit;
}
- INIT_WORK(&penv->event_work, icnss_driver_event_work);
- INIT_WORK(&penv->qmi_recv_msg_work, icnss_qmi_wlfw_clnt_notify_work);
- INIT_LIST_HEAD(&penv->event_list);
+ INIT_WORK(&priv->event_work, icnss_driver_event_work);
+ INIT_WORK(&priv->qmi_recv_msg_work, icnss_qmi_wlfw_clnt_notify_work);
+ INIT_LIST_HEAD(&priv->event_list);
ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
WLFW_SERVICE_VERS_V01,
@@ -2505,39 +3519,28 @@ static int icnss_probe(struct platform_device *pdev)
&wlfw_clnt_nb);
if (ret < 0) {
icnss_pr_err("Notifier register failed: %d\n", ret);
- goto err_qmi;
+ goto out_destroy_wq;
}
- icnss_debugfs_create(penv);
+ icnss_enable_recovery(priv);
+
+ icnss_debugfs_create(priv);
+
+ penv = priv;
icnss_pr_info("Platform driver probed successfully\n");
- return ret;
+ return 0;
-err_qmi:
- if (penv->event_wq)
- destroy_workqueue(penv->event_wq);
-err_alloc_workqueue:
- icnss_bw_deinit(penv);
-err_bw_init:
- if (penv->smmu_mapping)
- icnss_smmu_remove(&pdev->dev);
-err_smmu_init:
- if (penv->msa_va)
- dma_free_coherent(&pdev->dev, penv->msa_mem_size,
- penv->msa_va, penv->msa_pa);
-unmap_mpm_config:
- if (penv->mpm_config_va)
- iounmap(penv->mpm_config_va);
-unmap_mem_base:
- if (penv->mem_base_va)
- iounmap(penv->mem_base_va);
-release_regulator:
- icnss_release_resources();
+out_destroy_wq:
+ destroy_workqueue(priv->event_wq);
+out_bw_deinit:
+ icnss_bw_deinit(priv);
+out_smmu_deinit:
+ icnss_smmu_deinit(priv);
out:
dev_set_drvdata(dev, NULL);
- devm_kfree(&pdev->dev, penv);
- penv = NULL;
+
return ret;
}
@@ -2547,6 +3550,10 @@ static int icnss_remove(struct platform_device *pdev)
icnss_debugfs_destroy(penv);
+ icnss_modem_ssr_unregister_notifier(penv);
+
+ icnss_pdr_unregister_notifier(penv);
+
qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
WLFW_SERVICE_VERS_V01,
WLFW_SERVICE_INS_ID_V01,
@@ -2556,18 +3563,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_bw_deinit(penv);
- if (penv->msa_va)
- dma_free_coherent(&pdev->dev, penv->msa_mem_size,
- penv->msa_va, penv->msa_pa);
- if (penv->mpm_config_va)
- iounmap(penv->mpm_config_va);
- if (penv->mem_base_va)
- iounmap(penv->mem_base_va);
-
icnss_hw_power_off(penv);
- icnss_release_resources();
-
dev_set_drvdata(&pdev->dev, NULL);
return 0;
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
index fdff6f2140a3..5eaf2db32d21 100644
--- a/drivers/soc/qcom/rpm-smd.c
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -116,6 +116,7 @@ static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
static bool standalone;
static int probe_status = -EPROBE_DEFER;
static int msm_rpm_read_smd_data(char *buf);
+static void msm_rpm_process_ack(uint32_t msg_id, int errno);
int msm_rpm_register_notifier(struct notifier_block *nb)
{
@@ -615,6 +616,7 @@ struct msm_rpm_wait_data {
bool ack_recd;
int errno;
struct completion ack;
+ bool delete_on_ack;
};
DEFINE_SPINLOCK(msm_rpm_list_lock);
@@ -793,23 +795,45 @@ static int msm_rpm_read_sleep_ack(void)
{
int ret;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ uint32_t msg_id;
if (glink_enabled)
ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
else {
ret = msm_rpm_read_smd_data(buf);
- if (!ret)
+ if (!ret) {
+ /*
+ * Mimic Glink behavior to ensure that the
+ * data is read and the msg is removed from
+ * the wait list. We should have gotten here
+ * only when there are no drivers waiting on
+ * ACKs. msm_rpm_get_entry_from_msg_id()
+ * return non-NULL only then.
+ */
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ msm_rpm_process_ack(msg_id, 0);
ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
+ }
}
return ret;
}
+static void msm_rpm_flush_noack_messages(void)
+{
+ while (!list_empty(&msm_rpm_wait_list)) {
+ if (!msm_rpm_read_sleep_ack())
+ break;
+ }
+}
+
static int msm_rpm_flush_requests(bool print)
{
struct rb_node *t;
int ret;
int count = 0;
+ msm_rpm_flush_noack_messages();
+
for (t = rb_first(&tr_root); t; t = rb_next(t)) {
struct slp_buf *s = rb_entry(t, struct slp_buf, node);
@@ -1078,14 +1102,18 @@ static void msm_rpm_notify(void *data, unsigned event)
bool msm_rpm_waiting_for_ack(void)
{
- bool ret;
+ bool ret = false;
unsigned long flags;
+ struct msm_rpm_wait_data *elem = NULL;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
- ret = list_empty(&msm_rpm_wait_list);
+ elem = list_first_entry_or_null(&msm_rpm_wait_list,
+ struct msm_rpm_wait_data, list);
+ if (elem)
+ ret = !elem->delete_on_ack;
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
- return !ret;
+ return ret;
}
static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
@@ -1124,7 +1152,7 @@ static uint32_t msm_rpm_get_next_msg_id(void)
return id;
}
-static int msm_rpm_add_wait_list(uint32_t msg_id)
+static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
{
unsigned long flags;
struct msm_rpm_wait_data *data =
@@ -1137,8 +1165,12 @@ static int msm_rpm_add_wait_list(uint32_t msg_id)
data->ack_recd = false;
data->msg_id = msg_id;
data->errno = INIT_ERROR;
+ data->delete_on_ack = delete_on_ack;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
- list_add(&data->list, &msm_rpm_wait_list);
+ if (delete_on_ack)
+ list_add_tail(&data->list, &msm_rpm_wait_list);
+ else
+ list_add(&data->list, &msm_rpm_wait_list);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
return 0;
@@ -1156,21 +1188,24 @@ static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
static void msm_rpm_process_ack(uint32_t msg_id, int errno)
{
- struct list_head *ptr;
+ struct list_head *ptr, *next;
struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
- list_for_each(ptr, &msm_rpm_wait_list) {
+ list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
elem = list_entry(ptr, struct msm_rpm_wait_data, list);
- if (elem && (elem->msg_id == msg_id)) {
+ if (elem->msg_id == msg_id) {
elem->errno = errno;
elem->ack_recd = true;
complete(&elem->ack);
+ if (elem->delete_on_ack) {
+ list_del(&elem->list);
+ kfree(elem);
+ }
break;
}
- elem = NULL;
}
/* Special case where the sleep driver doesn't
* wait for ACKs. This would decrease the latency involved with
@@ -1533,8 +1568,7 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata,
return ret;
}
- if (!noack)
- msm_rpm_add_wait_list(msg_id);
+ msm_rpm_add_wait_list(msg_id, noack);
ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
new file mode 100644
index 000000000000..a1344f0780b0
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -0,0 +1,500 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/smcinvoke.h>
+#include <soc/qcom/scm.h>
+#include <asm/cacheflush.h>
+#include "smcinvoke_object.h"
+
+#define SMCINVOKE_TZ_PARAM_ID 0x224
+#define SMCINVOKE_TZ_CMD 0x32000600
+#define SMCINVOKE_FILE "smcinvoke"
+#define SMCINVOKE_TZ_ROOT_OBJ 1
+#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
+#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
+#define SMCINVOKE_TZ_OBJ_NULL 0
+
+#define FOR_ARGS(ndxvar, counts, section) \
+ for (ndxvar = object_counts_index_##section(counts); \
+ ndxvar < (object_counts_index_##section(counts) \
+ + object_counts_num_##section(counts)); \
+ ++ndxvar)
+
+static long smcinvoke_ioctl(struct file *, unsigned, unsigned long);
+static int smcinvoke_open(struct inode *, struct file *);
+static int smcinvoke_release(struct inode *, struct file *);
+
+static const struct file_operations smcinvoke_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = smcinvoke_ioctl,
+ .compat_ioctl = smcinvoke_ioctl,
+ .open = smcinvoke_open,
+ .release = smcinvoke_release,
+};
+
+static struct miscdevice smcinvoke_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "smcinvoke",
+ .fops = &smcinvoke_fops
+};
+
+struct smcinvoke_buf_hdr {
+ uint32_t offset;
+ uint32_t size;
+};
+
+union smcinvoke_tz_args {
+ struct smcinvoke_buf_hdr b;
+ uint32_t tzhandle;
+};
+struct smcinvoke_msg_hdr {
+ uint32_t tzhandle;
+ uint32_t op;
+ uint32_t counts;
+};
+
+struct smcinvoke_tzobj_context {
+ uint32_t tzhandle;
+};
+
+/*
+ * size_add saturates at SIZE_MAX. If integer overflow is detected,
+ * this function would return SIZE_MAX otherwise normal a+b is returned.
+ */
+static inline size_t size_add(size_t a, size_t b)
+{
+ return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
+}
+
+/*
+ * pad_size is used along with size_align to define a buffer overflow
+ * protected version of ALIGN
+ */
+static inline size_t pad_size(size_t a, size_t b)
+{
+ return (~a + 1) % b;
+}
+
+/*
+ * size_align saturates at SIZE_MAX. If integer overflow is detected, this
+ * function would return SIZE_MAX otherwise next aligned size is returned.
+ */
+static inline size_t size_align(size_t a, size_t b)
+{
+ return size_add(a, pad_size(a, b));
+}
+
+/*
+ * This function retrieves file pointer corresponding to FD provided. It stores
+ * retrived file pointer until IOCTL call is concluded. Once call is completed,
+ * all stored file pointers are released. file pointers are stored to prevent
+ * other threads from releasing that FD while IOCTL is in progress.
+ */
+static int get_tzhandle_from_fd(int64_t fd, struct file **filp,
+ uint32_t *tzhandle)
+{
+ int ret = -EBADF;
+ struct file *tmp_filp = NULL;
+ struct smcinvoke_tzobj_context *tzobj = NULL;
+
+ if (fd == SMCINVOKE_USERSPACE_OBJ_NULL) {
+ *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
+ ret = 0;
+ goto out;
+ } else if (fd < SMCINVOKE_USERSPACE_OBJ_NULL) {
+ goto out;
+ }
+
+ tmp_filp = fget(fd);
+ if (!tmp_filp)
+ goto out;
+
+ /* Verify if filp is smcinvoke device's file pointer */
+ if (!tmp_filp->f_op || !tmp_filp->private_data ||
+ (tmp_filp->f_op != &smcinvoke_fops)) {
+ fput(tmp_filp);
+ goto out;
+ }
+
+ tzobj = tmp_filp->private_data;
+ *tzhandle = tzobj->tzhandle;
+ *filp = tmp_filp;
+ ret = 0;
+out:
+ return ret;
+}
+
+static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd)
+{
+ int unused_fd = -1, ret = -1;
+ struct file *f = NULL;
+ struct smcinvoke_tzobj_context *cxt = NULL;
+
+ if (tzhandle == SMCINVOKE_TZ_OBJ_NULL) {
+ *fd = SMCINVOKE_USERSPACE_OBJ_NULL;
+ ret = 0;
+ goto out;
+ }
+
+ cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
+ if (!cxt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ unused_fd = get_unused_fd_flags(O_RDWR);
+ if (unused_fd < 0)
+ goto out;
+
+ f = anon_inode_getfile(SMCINVOKE_FILE, &smcinvoke_fops, cxt, O_RDWR);
+ if (IS_ERR(f))
+ goto out;
+
+ *fd = unused_fd;
+ fd_install(*fd, f);
+ ((struct smcinvoke_tzobj_context *)
+ (f->private_data))->tzhandle = tzhandle;
+ return 0;
+out:
+ if (unused_fd >= 0)
+ put_unused_fd(unused_fd);
+ kfree(cxt);
+
+ return ret;
+}
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
+ const uint8_t *out_buf, size_t out_buf_len,
+ int32_t *smcinvoke_result)
+{
+ int ret = 0;
+ struct scm_desc desc = {0};
+ size_t inbuf_flush_size = (1UL << get_order(in_buf_len)) * PAGE_SIZE;
+ size_t outbuf_flush_size = (1UL << get_order(out_buf_len)) * PAGE_SIZE;
+
+ desc.arginfo = SMCINVOKE_TZ_PARAM_ID;
+ desc.args[0] = (uint64_t)virt_to_phys(in_buf);
+ desc.args[1] = in_buf_len;
+ desc.args[2] = (uint64_t)virt_to_phys(out_buf);
+ desc.args[3] = out_buf_len;
+
+ dmac_flush_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_flush_range(out_buf, out_buf + outbuf_flush_size);
+
+ ret = scm_call2(SMCINVOKE_TZ_CMD, &desc);
+ *smcinvoke_result = (int32_t)desc.ret[1];
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) {
+ pr_err("SCM call failed with ret val = %d %d %d %d\n",
+ ret, (int)desc.ret[0],
+ (int)desc.ret[1], (int)desc.ret[2]);
+ ret = ret | desc.ret[0] | desc.ret[1] | desc.ret[2];
+ }
+ dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
+ return ret;
+}
+
+static int marshal_out(void *buf, uint32_t buf_size,
+ struct smcinvoke_cmd_req *req,
+ union smcinvoke_arg *args_buf)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ size_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ if (offset > buf_size)
+ goto out;
+
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ tz_args += object_counts_num_BI(req->counts);
+
+ FOR_ARGS(i, req->counts, BO) {
+ args_buf[i].b.size = tz_args->b.size;
+ if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
+ tz_args->b.offset > buf_size) {
+ pr_err("%s: buffer overflow detected\n", __func__);
+ goto out;
+ }
+ if (copy_to_user((void __user *)(args_buf[i].b.addr),
+ (uint8_t *)(buf) + tz_args->b.offset,
+ tz_args->b.size)) {
+ pr_err("Error %d copying ctxt to user\n", ret);
+ goto out;
+ }
+ tz_args++;
+ }
+ tz_args += object_counts_num_OI(req->counts);
+
+ FOR_ARGS(i, req->counts, OO) {
+ /*
+ * create a new FD and assign to output object's
+ * context
+ */
+ ret = get_fd_from_tzhandle(tz_args->tzhandle,
+ &(args_buf[i].o.fd));
+ if (ret)
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * SMC expects arguments in following format
+ * ---------------------------------------------------------------------------
+ * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
+ * ---------------------------------------------------------------------------
+ * cxt: target, op: operation, counts: total arguments
+ * offset: offset is from beginning of buffer i.e. cxt
+ * size: size is 8 bytes aligned value
+ */
+static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf)
+{
+ uint32_t i = 0;
+
+ size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ /* Computed total_size should be 8 bytes aligned from start of buf */
+ total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+ /* each buffer has to be 8 bytes aligned */
+ while (i < object_counts_num_buffers(req->counts))
+ total_size = size_add(total_size,
+ size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE));
+
+ /* Since we're using get_free_pages, no need for explicit PAGE align */
+ return total_size;
+}
+
+static int marshal_in(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf, uint32_t tzhandle,
+ uint8_t *buf, size_t buf_size, struct file **arr_filp)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts};
+ uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ sizeof(union smcinvoke_tz_args) *
+ object_counts_total(req->counts);
+
+ if (buf_size < offset)
+ goto out;
+
+ *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ FOR_ARGS(i, req->counts, BI) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ if (copy_from_user(buf+offset,
+ (void __user *)(args_buf[i].b.addr),
+ args_buf[i].b.size))
+ goto out;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, BO) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, OI) {
+ if (get_tzhandle_from_fd(args_buf[i].o.fd,
+ &arr_filp[i], &(tz_args->tzhandle)))
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+long smcinvoke_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
+{
+ int ret = -1, i = 0, nr_args = 0;
+ struct smcinvoke_cmd_req req = {0};
+ void *in_msg = NULL;
+ size_t inmsg_size = 0;
+ void *out_msg = NULL;
+ union smcinvoke_arg *args_buf = NULL;
+ struct file *filp_to_release[object_counts_max_OO] = {NULL};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+
+ switch (cmd) {
+ case SMCINVOKE_IOCTL_INVOKE_REQ:
+ if (_IOC_SIZE(cmd) != sizeof(req)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ nr_args = object_counts_num_buffers(req.counts) +
+ object_counts_num_objects(req.counts);
+
+ if (!nr_args || req.argsize != sizeof(union smcinvoke_arg)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
+ if (!args_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(args_buf, (void __user *)(req.args),
+ nr_args * req.argsize);
+
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ inmsg_size = compute_in_msg_size(&req, args_buf);
+ in_msg = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(inmsg_size));
+ if (!in_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ out_msg = (void *)__get_free_page(GFP_KERNEL);
+ if (!out_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = marshal_in(&req, args_buf, tzobj->tzhandle, in_msg,
+ inmsg_size, filp_to_release);
+ if (ret)
+ goto out;
+
+ ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg,
+ SMCINVOKE_TZ_MIN_BUF_SIZE, &req.result);
+ if (ret)
+ goto out;
+
+ ret = marshal_out(in_msg, inmsg_size, &req, args_buf);
+
+ ret |= copy_to_user((void __user *)(req.args), args_buf,
+ nr_args * req.argsize);
+ ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
+ if (ret)
+ goto out;
+
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ free_page((long)out_msg);
+ free_pages((long)in_msg, get_order(inmsg_size));
+ kfree(args_buf);
+ for (i = 0; i < object_counts_max_OO; i++) {
+ if (filp_to_release[i])
+ fput(filp_to_release[i]);
+ }
+
+ return ret;
+}
+
+static int smcinvoke_open(struct inode *nodp, struct file *filp)
+{
+ struct smcinvoke_tzobj_context *tzcxt = NULL;
+
+ tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
+ if (!tzcxt)
+ return -ENOMEM;
+
+ tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
+ filp->private_data = tzcxt;
+
+ return 0;
+}
+
+
+static int smcinvoke_release(struct inode *nodp, struct file *filp)
+{
+ int ret = 0, smcinvoke_result = 0;
+ uint8_t *in_buf = NULL;
+ uint8_t *out_buf = NULL;
+ struct smcinvoke_msg_hdr hdr = {0};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+ uint32_t tzhandle = tzobj->tzhandle;
+
+ /* Root object is special in sense it is indestructible */
+ if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+ goto out;
+
+ in_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ out_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ if (!in_buf || !out_buf)
+ goto out;
+
+ hdr.tzhandle = tzhandle;
+ hdr.op = object_op_RELEASE;
+ hdr.counts = 0;
+ *(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+ ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE,
+ out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, &smcinvoke_result);
+out:
+ kfree(filp->private_data);
+ free_page((long)in_buf);
+ free_page((long)out_buf);
+
+ return ret;
+}
+
+static int __init smcinvoke_init(void)
+{
+ return misc_register(&smcinvoke_miscdev);
+}
+
+device_initcall(smcinvoke_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smcinvoke_object.h b/drivers/soc/qcom/smcinvoke_object.h
new file mode 100644
index 000000000000..138a1cc05717
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke_object.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SMCINVOKE_OBJECT_H
+#define __SMCINVOKE_OBJECT_H
+
+#include <linux/types.h>
+
+#define object_op_METHOD_MASK ((uint32_t)0x0000FFFFu)
+#define object_op_RELEASE (object_op_METHOD_MASK - 0)
+#define object_op_RETAIN (object_op_METHOD_MASK - 1)
+
+#define object_counts_max_BI 0xF
+#define object_counts_max_BO 0xF
+#define object_counts_max_OI 0xF
+#define object_counts_max_OO 0xF
+
+/* unpack counts */
+
+#define object_counts_num_BI(k) ((size_t) (((k) >> 0) & object_counts_max_BI))
+#define object_counts_num_BO(k) ((size_t) (((k) >> 4) & object_counts_max_BO))
+#define object_counts_num_OI(k) ((size_t) (((k) >> 8) & object_counts_max_OI))
+#define object_counts_num_OO(k) ((size_t) (((k) >> 12) & object_counts_max_OO))
+#define object_counts_num_buffers(k) \
+ (object_counts_num_BI(k) + object_counts_num_BO(k))
+
+#define object_counts_num_objects(k) \
+ (object_counts_num_OI(k) + object_counts_num_OO(k))
+
+/* Indices into args[] */
+
+#define object_counts_index_BI(k) 0
+#define object_counts_index_BO(k) \
+ (object_counts_index_BI(k) + object_counts_num_BI(k))
+#define object_counts_index_OI(k) \
+ (object_counts_index_BO(k) + object_counts_num_BO(k))
+#define object_counts_index_OO(k) \
+ (object_counts_index_OI(k) + object_counts_num_OI(k))
+#define object_counts_total(k) \
+ (object_counts_index_OO(k) + object_counts_num_OO(k))
+
+
+#endif /* __SMCINVOKE_OBJECT_H */
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 56ca6835fc12..6a1a87ead6e4 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -918,10 +918,9 @@ static void check_pbl_done(struct pil_tz_data *d)
err_value = __raw_readl(d->err_status);
pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
- if (!err_value)
- __raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
- else
+ if (err_value)
pr_err("PBL error status register: 0x%08x\n", err_value);
+ __raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
}
static void check_err_ready(struct pil_tz_data *d)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 5cc655908fda..3df80c73b74a 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2513,6 +2513,7 @@ void usb_hc_died (struct usb_hcd *hcd)
}
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
/* Make sure that the other roothub is also deallocated. */
+ usb_atomic_notify_dead_bus(&hcd->self);
}
EXPORT_SYMBOL_GPL (usb_hc_died);
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2e..af91b1e7146c 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -17,6 +17,7 @@
#include "usb.h"
static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list);
/**
* usb_register_notify - register a notifier callback whenever a usb change happens
@@ -67,3 +68,33 @@ void usb_notify_remove_bus(struct usb_bus *ubus)
{
blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
}
+
+/**
+ * usb_register_atomic_notify - register a atomic notifier callback whenever a
+ * HC dies
+ * @nb: pointer to the atomic notifier block for the callback events.
+ *
+ */
+void usb_register_atomic_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_register_atomic_notify);
+
+/**
+ * usb_unregister_atomic_notify - unregister a atomic notifier callback
+ * @nb: pointer to the notifier block for the callback events.
+ *
+ */
+void usb_unregister_atomic_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify);
+
+
+void usb_atomic_notify_dead_bus(struct usb_bus *ubus)
+{
+ atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED,
+ ubus);
+}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 05b5e17abf92..ccb35af525e2 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -175,6 +175,7 @@ extern void usb_notify_add_device(struct usb_device *udev);
extern void usb_notify_remove_device(struct usb_device *udev);
extern void usb_notify_add_bus(struct usb_bus *ubus);
extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus);
extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
struct usb_hub_descriptor *desc);
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index a629723d19cb..e32348d17b26 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -593,6 +593,31 @@ static void ipa_work_handler(struct work_struct *w)
d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
log_event_dbg("%s: ST_INIT_EVT_CONN_IN_PROG",
__func__);
+ } else if (event == EVT_HOST_READY) {
+ /*
+ * When in a composition such as RNDIS + ADB,
+ * RNDIS host sends a GEN_CURRENT_PACKET_FILTER msg
+ * to enable/disable flow control eg. during RNDIS
+ * adaptor disable/enable from device manager.
+ * In the case of the msg to disable flow control,
+ * connect IPA channels and enable data path.
+ * EVT_HOST_READY is posted to the state machine
+ * in the handler for this msg.
+ */
+ usb_gadget_autopm_get(d_port->gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
+ ipa_connect_channels(d_port);
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_INIT_EVT_HOST_READY", __func__);
}
break;
case STATE_CONNECT_IN_PROGRESS:
@@ -1702,7 +1727,10 @@ static int gsi_get_alt(struct usb_function *f, unsigned intf)
{
struct f_gsi *gsi = func_to_gsi(f);
- if (intf == gsi->ctrl_id)
+ /* RNDIS, RMNET and DPL only support alt 0*/
+ if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG)
return 0;
else if (intf == gsi->data_id)
return gsi->data_interface_up;
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index fd22928353b4..7454bba68117 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -1125,7 +1125,7 @@ static int mdp3_res_init(void)
mdp3_res->ion_client = msm_ion_client_create(mdp3_res->pdev->name);
if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
- pr_err("msm_ion_client_create() return error (%p)\n",
+ pr_err("msm_ion_client_create() return error (%pK)\n",
mdp3_res->ion_client);
mdp3_res->ion_client = NULL;
return -EINVAL;
@@ -1556,7 +1556,7 @@ void mdp3_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
mutex_lock(&mdp3_res->iommu_lock);
meta = mdp3_iommu_meta_lookup(table);
if (!meta) {
- WARN(1, "%s: buffer was never mapped for %p\n", __func__,
+ WARN(1, "%s: buffer was never mapped for %pK\n", __func__,
handle);
mutex_unlock(&mdp3_res->iommu_lock);
return;
@@ -1582,7 +1582,7 @@ static void mdp3_iommu_meta_add(struct mdp3_iommu_meta *meta)
} else if (meta->table > entry->table) {
p = &(*p)->rb_right;
} else {
- pr_err("%s: handle %p already exists\n", __func__,
+ pr_err("%s: handle %pK already exists\n", __func__,
entry->handle);
BUG();
}
@@ -1645,7 +1645,7 @@ static int mdp3_iommu_map_iommu(struct mdp3_iommu_meta *meta,
ret = iommu_map_range(domain, meta->iova_addr + padding,
table->sgl, size, prot);
if (ret) {
- pr_err("%s: could not map %pa in domain %p\n",
+ pr_err("%s: could not map %pa in domain %pK\n",
__func__, &meta->iova_addr, domain);
unmap_size = padding;
goto out2;
@@ -1768,12 +1768,12 @@ int mdp3_self_map_iommu(struct ion_client *client, struct ion_handle *handle,
}
} else {
if (iommu_meta->flags != iommu_flags) {
- pr_err("%s: hndl %p already mapped with diff flag\n",
+ pr_err("%s: hndl %pK already mapped with diff flag\n",
__func__, handle);
ret = -EINVAL;
goto out_unlock;
} else if (iommu_meta->mapped_size != iova_length) {
- pr_err("%s: hndl %p already mapped with diff len\n",
+ pr_err("%s: hndl %pK already mapped with diff len\n",
__func__, handle);
ret = -EINVAL;
goto out_unlock;
@@ -1807,7 +1807,7 @@ int mdp3_put_img(struct mdp3_img_data *data, int client)
fdput(data->srcp_f);
memset(&data->srcp_f, 0, sizeof(struct fd));
} else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
- pr_debug("ion hdl = %p buf=0x%pa\n", data->srcp_dma_buf,
+ pr_debug("ion hdl = %pK buf=0x%pa\n", data->srcp_dma_buf,
&data->addr);
if (!iclient) {
pr_err("invalid ion client\n");
@@ -1910,7 +1910,7 @@ done:
data->addr += img->offset;
data->len -= img->offset;
- pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%lx\n",
+ pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
img->memory_id, data->srcp_dma_buf,
&data->addr, data->len);
@@ -2134,7 +2134,7 @@ static int mdp3_alloc(struct msm_fb_data_type *mfd)
return -ERANGE;
}
- pr_debug("alloc 0x%zxB @ (%pa phys) (0x%p virt) (%pa iova) for fb%d\n",
+ pr_debug("alloc 0x%zxB @ (%pa phys) (0x%pK virt) (%pa iova) for fb%d\n",
size, &phys, virt, &mfd->iova, mfd->index);
mfd->fbi->fix.smem_start = phys;
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index d4c83d6e33f0..8b382ea6fd95 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -721,7 +721,7 @@ retry_dma_done:
retry_vsync:
rc = wait_for_completion_timeout(&dma->vsync_comp,
KOFF_TIMEOUT);
- pr_err("%s VID DMA Buff Addr %p\n", __func__, buf);
+ pr_err("%s VID DMA Buff Addr %pK\n", __func__, buf);
if (rc <= 0 && --retry_count) {
int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
(1 << MDP3_INTR_LCDC_START_OF_FRAME);
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
index 5ba3fbdb6238..0c830afbb923 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
@@ -1308,7 +1308,7 @@ int config_ppp_op_mode(struct ppp_blit_op *blit_op)
pr_debug("ROI(x %d,y %d,w %d, h %d) ",
blit_op->src.roi.x, blit_op->src.roi.y,
blit_op->src.roi.width, blit_op->src.roi.height);
- pr_debug("Addr_P0 %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ pr_debug("Addr_P0 %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
blit_op->src.p0, blit_op->src.stride0,
blit_op->src.p1, blit_op->src.stride1);
@@ -1320,7 +1320,7 @@ int config_ppp_op_mode(struct ppp_blit_op *blit_op)
pr_debug("ROI(x %d,y %d, w %d, h %d) ",
blit_op->bg.roi.x, blit_op->bg.roi.y,
blit_op->bg.roi.width, blit_op->bg.roi.height);
- pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ pr_debug("Addr %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
blit_op->bg.p0, blit_op->bg.stride0,
blit_op->bg.p1, blit_op->bg.stride1);
}
@@ -1331,7 +1331,7 @@ int config_ppp_op_mode(struct ppp_blit_op *blit_op)
pr_debug("ROI(x %d,y %d, w %d, h %d) ",
blit_op->dst.roi.x, blit_op->dst.roi.y,
blit_op->dst.roi.width, blit_op->dst.roi.height);
- pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ pr_debug("Addr %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
blit_op->dst.p0, blit_op->src.stride0,
blit_op->dst.p1, blit_op->dst.stride1);
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index e883f045967d..5ad51dd23f3b 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -150,7 +150,7 @@ static struct mdp_input_layer32 *__create_layer_list32(
compat_ptr(commit32->commit_v1.input_layers),
sizeof(struct mdp_input_layer32) * layer_count);
if (ret) {
- pr_err("layer list32 copy from user failed, ptr %p\n",
+ pr_err("layer list32 copy from user failed, ptr %pK\n",
compat_ptr(commit32->commit_v1.input_layers));
kfree(layer_list32);
ret = -EFAULT;
@@ -182,7 +182,7 @@ static int __copy_scale_params(struct mdp_input_layer *layer,
sizeof(struct mdp_scale_data));
if (ret) {
kfree(scale);
- pr_err("scale param copy from user failed, ptr %p\n",
+ pr_err("scale param copy from user failed, ptr %pK\n",
compat_ptr(layer32->scale));
ret = -EFAULT;
} else {
@@ -307,7 +307,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
ret = copy_from_user(&commit32, (void __user *)argp,
sizeof(struct mdp_layer_commit32));
if (ret) {
- pr_err("%s:copy_from_user failed, ptr %p\n", __func__,
+ pr_err("%s:copy_from_user failed, ptr %pK\n", __func__,
(void __user *)argp);
ret = -EFAULT;
return ret;
@@ -325,7 +325,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
compat_ptr(commit32.commit_v1.output_layer),
buffer_size);
if (ret) {
- pr_err("fail to copy output layer from user, ptr %p\n",
+ pr_err("fail to copy output layer from user, ptr %pK\n",
compat_ptr(commit32.commit_v1.output_layer));
ret = -EFAULT;
goto layer_list_err;
@@ -3418,7 +3418,7 @@ static int __copy_layer_igc_lut_data_v1_7(
cfg_payload32,
sizeof(struct mdp_igc_lut_data_v1_7_32));
if (ret) {
- pr_err("copy from user failed, IGC cfg payload = %p\n",
+ pr_err("copy from user failed, IGC cfg payload = %pK\n",
cfg_payload32);
ret = -EFAULT;
goto exit;
@@ -3493,7 +3493,7 @@ static int __copy_layer_hist_lut_data_v1_7(
cfg_payload32,
sizeof(struct mdp_hist_lut_data_v1_7_32));
if (ret) {
- pr_err("copy from user failed, hist lut cfg_payload = %p\n",
+ pr_err("copy from user failed, hist lut cfg_payload = %pK\n",
cfg_payload32);
ret = -EFAULT;
goto exit;
@@ -3565,7 +3565,7 @@ static int __copy_layer_pa_data_v1_7(
cfg_payload32,
sizeof(struct mdp_pa_data_v1_7_32));
if (ret) {
- pr_err("copy from user failed, pa cfg_payload = %p\n",
+ pr_err("copy from user failed, pa cfg_payload = %pK\n",
cfg_payload32);
ret = -EFAULT;
goto exit;
@@ -3707,7 +3707,7 @@ static int __copy_layer_pp_info_pcc_params(
compat_ptr(pp_info32->pcc_cfg_data.cfg_payload),
sizeof(struct mdp_pcc_data_v1_7));
if (ret) {
- pr_err("compat copy of PCC cfg payload failed, ptr %p\n",
+ pr_err("compat copy of PCC cfg payload failed, ptr %pK\n",
compat_ptr(
pp_info32->pcc_cfg_data.cfg_payload));
ret = -EFAULT;
@@ -3741,7 +3741,7 @@ static int __copy_layer_pp_info_params(struct mdp_input_layer *layer,
compat_ptr(layer32->pp_info),
sizeof(struct mdp_overlay_pp_params32));
if (ret) {
- pr_err("pp info copy from user failed, pp_info %p\n",
+ pr_err("pp info copy from user failed, pp_info %pK\n",
compat_ptr(layer32->pp_info));
ret = -EFAULT;
goto exit;
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index 39848366a55b..79980acc2201 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -1298,6 +1298,38 @@ static inline struct mdss_mdp_misr_map *mdss_misr_get_map(u32 block_id,
}
} else {
if (block_id <= DISPLAY_MISR_HDMI) {
+ /*
+ * In Dual LM single display configuration,
+ * the interface number (i.e. block_id)
+ * might not be the one given from ISR.
+ * We should always check with the actual
+ * intf_num from ctl.
+ */
+ struct msm_fb_data_type *mfd = NULL;
+
+ /*
+ * ISR pass in NULL ctl, so we need to get it
+ * from the mdata.
+ */
+ if (!ctl && mdata->mixer_intf)
+ ctl = mdata->mixer_intf->ctl;
+ if (ctl)
+ mfd = ctl->mfd;
+ if (mfd && is_dual_lm_single_display(mfd)) {
+ switch (ctl->intf_num) {
+ case MDSS_MDP_INTF1:
+ block_id = DISPLAY_MISR_DSI0;
+ break;
+ case MDSS_MDP_INTF2:
+ block_id = DISPLAY_MISR_DSI1;
+ break;
+ default:
+ pr_err("Unmatch INTF for Dual LM single display configuration, INTF:%d\n",
+ ctl->intf_num);
+ return NULL;
+ }
+ }
+
intf_base = (char *)mdss_mdp_get_intf_base_addr(
mdata, block_id);
@@ -1311,11 +1343,15 @@ static inline struct mdss_mdp_misr_map *mdss_misr_get_map(u32 block_id,
/*
* extra offset required for
- * cmd misr in 8996
+ * cmd misr in 8996 and mdss3.x
*/
if (IS_MDSS_MAJOR_MINOR_SAME(
mdata->mdp_rev,
- MDSS_MDP_HW_REV_107)) {
+ MDSS_MDP_HW_REV_107) ||
+ (mdata->mdp_rev ==
+ MDSS_MDP_HW_REV_300) ||
+ (mdata->mdp_rev ==
+ MDSS_MDP_HW_REV_301)) {
ctrl_reg += 0x8;
value_reg += 0x8;
}
@@ -1350,7 +1386,7 @@ static inline struct mdss_mdp_misr_map *mdss_misr_get_map(u32 block_id,
return NULL;
}
- pr_debug("MISR Module(%d) CTRL(0x%x) SIG(0x%x) intf_base(0x%p)\n",
+ pr_debug("MISR Module(%d) CTRL(0x%x) SIG(0x%x) intf_base(0x%pK)\n",
block_id, map->ctrl_reg, map->value_reg, intf_base);
return map;
}
@@ -1390,6 +1426,9 @@ void mdss_misr_disable(struct mdss_data_type *mdata,
map = mdss_misr_get_map(req->block_id, ctl, mdata,
ctl->is_video_mode);
+ if (!map)
+ return;
+
/* clear the map data */
memset(map->crc_ping, 0, sizeof(map->crc_ping));
memset(map->crc_pong, 0, sizeof(map->crc_pong));
@@ -1420,7 +1459,7 @@ int mdss_misr_set(struct mdss_data_type *mdata,
bool use_mdp_up_misr = false;
if (!mdata || !req || !ctl) {
- pr_err("Invalid input params: mdata = %p req = %p ctl = %p",
+ pr_err("Invalid input params: mdata = %pK req = %pK ctl = %pK",
mdata, req, ctl);
return -EINVAL;
}
@@ -1500,7 +1539,7 @@ int mdss_misr_set(struct mdss_data_type *mdata,
writel_relaxed(config,
mdata->mdp_base + map->ctrl_reg);
- pr_debug("MISR_CTRL=0x%x [base:0x%p reg:0x%x config:0x%x]\n",
+ pr_debug("MISR_CTRL=0x%x [base:0x%pK reg:0x%x config:0x%x]\n",
readl_relaxed(mdata->mdp_base + map->ctrl_reg),
mdata->mdp_base, map->ctrl_reg, config);
}
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index 32bfb151eddd..cfcc96aafffb 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -254,7 +254,7 @@ static void mdss_dump_debug_bus(u32 bus_dump_flag,
if (*dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%p end_addr:0x%p\n",
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
__func__, dump_addr, dump_addr + list_size);
} else {
in_mem = false;
@@ -378,7 +378,7 @@ static void mdss_dump_vbif_debug_bus(u32 bus_dump_flag,
if (*dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%p end_addr:0x%p\n",
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
__func__, dump_addr, dump_addr + list_size);
} else {
in_mem = false;
@@ -438,7 +438,7 @@ void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
if (*dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%p end_addr:0x%p reg_addr=0x%p\n",
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%pK\n",
dump_name, dump_addr, dump_addr + (u32)len * 16,
addr);
} else {
@@ -459,7 +459,7 @@ void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
xc = readl_relaxed(addr+0xc);
if (in_log)
- pr_info("%p : %08x %08x %08x %08x\n", addr, x0, x4, x8,
+ pr_info("%pK : %08x %08x %08x %08x\n", addr, x0, x4, x8,
xc);
if (dump_addr && in_mem) {
@@ -497,7 +497,7 @@ static void mdss_dump_reg_by_ranges(struct mdss_debug_base *dbg,
len = get_dump_range(&xlog_node->offset,
dbg->max_offset);
addr = dbg->base + xlog_node->offset.start;
- pr_debug("%s: range_base=0x%p start=0x%x end=0x%x\n",
+ pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
xlog_node->range_name,
addr, xlog_node->offset.start,
xlog_node->offset.end);
@@ -508,7 +508,7 @@ static void mdss_dump_reg_by_ranges(struct mdss_debug_base *dbg,
} else {
/* If there is no list to dump ranges, dump all registers */
pr_info("Ranges not found, will dump full registers");
- pr_info("base:0x%p len:0x%zu\n", dbg->base, dbg->max_offset);
+ pr_info("base:0x%pK len:%zu\n", dbg->base, dbg->max_offset);
addr = dbg->base;
len = dbg->max_offset;
mdss_dump_reg((const char *)dbg->name, reg_dump_flag, addr,
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index f7261d4e3fa4..c8b415df4bce 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/spinlock_types.h>
#include <linux/kthread.h>
+#include <linux/msm_ext_display.h>
#include "mdss.h"
#include "mdss_dp.h"
@@ -827,6 +828,112 @@ int mdss_dp_wait4train(struct mdss_dp_drv_pdata *dp_drv)
return ret;
}
+static int dp_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+ struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev);
+ u32 hpd;
+
+ if (!dp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&dp_ctrl->pd_msg_mutex);
+ hpd = dp_ctrl->cable_connected;
+ mutex_unlock(&dp_ctrl->pd_msg_mutex);
+
+ return hpd;
+}
+
+static int dp_audio_info_setup(struct platform_device *pdev,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ int rc = 0;
+ struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev);
+
+ if (!dp_ctrl || !params) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+ mdss_dp_config_audio_acr_ctrl(&dp_ctrl->ctrl_io,
+ dp_ctrl->link_rate);
+ mdss_dp_audio_setup_sdps(&dp_ctrl->ctrl_io);
+
+ return rc;
+} /* dp_audio_info_setup */
+
+static int dp_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_ext_disp_audio_edid_blk *blk)
+{
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
+ int rc = 0;
+
+ if (!dp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ rc = hdmi_edid_get_audio_blk
+ (dp->panel_data.panel_info.edid_data, blk);
+ if (rc)
+ DEV_ERR("%s:edid_get_audio_blk failed\n", __func__);
+
+ return rc;
+} /* dp_get_audio_edid_blk */
+
+static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
+{
+ int ret = 0;
+ struct device_node *pd_np;
+ const char *phandle = "qcom,msm_ext_disp";
+
+ if (!dp) {
+ pr_err("%s: invalid input\n", __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ dp->ext_audio_data.type = EXT_DISPLAY_TYPE_DP;
+ dp->ext_audio_data.kobj = dp->kobj;
+ dp->ext_audio_data.pdev = dp->pdev;
+ dp->ext_audio_data.codec_ops.audio_info_setup =
+ dp_audio_info_setup;
+ dp->ext_audio_data.codec_ops.get_audio_edid_blk =
+ dp_get_audio_edid_blk;
+ dp->ext_audio_data.codec_ops.cable_status =
+ dp_get_cable_status;
+
+ if (!dp->pdev->dev.of_node) {
+ pr_err("%s cannot find dp dev.of_node\n", __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ pd_np = of_parse_phandle(dp->pdev->dev.of_node, phandle, 0);
+ if (!pd_np) {
+ pr_err("%s cannot find %s dev\n", __func__, phandle);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ dp->ext_pdev = of_find_device_by_node(pd_np);
+ if (!dp->ext_pdev) {
+ pr_err("%s cannot find %s pdev\n", __func__, phandle);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ ret = msm_ext_disp_register_intf(dp->ext_pdev,
+ &dp->ext_audio_data);
+ if (ret)
+ pr_err("%s: failed to register disp\n", __func__);
+
+end:
+ return ret;
+}
+
#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
@@ -895,7 +1002,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
panel_data);
/* wait until link training is completed */
- mutex_lock(&dp_drv->host_mutex);
+ mutex_lock(&dp_drv->train_mutex);
pr_debug("Enter++ cont_splash=%d\n", dp_drv->cont_splash);
/* Default lane mapping */
@@ -908,7 +1015,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
ret = mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, true);
if (ret) {
pr_err("Unabled to start core clocks\n");
- return ret;
+ goto exit;
}
mdss_dp_hpd_configure(&dp_drv->ctrl_io, true);
@@ -941,7 +1048,8 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
dp_drv->link_rate, dp_drv->dpcd.max_link_rate);
if (!dp_drv->link_rate) {
pr_err("Unable to configure required link rate\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
@@ -955,9 +1063,8 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
ret = mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, true);
if (ret) {
- mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false);
pr_err("Unabled to start link clocks\n");
- return ret;
+ goto exit;
}
mdss_dp_mainlink_reset(&dp_drv->ctrl_io);
@@ -985,17 +1092,16 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
pr_debug("mainlink ready\n");
dp_drv->power_on = true;
-
- mutex_unlock(&dp_drv->host_mutex);
pr_debug("End-\n");
+exit:
+ mutex_unlock(&dp_drv->train_mutex);
return ret;
}
int mdss_dp_off(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
- int ret = 0;
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
@@ -1009,73 +1115,54 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
mutex_lock(&dp_drv->train_mutex);
reinit_completion(&dp_drv->idle_comp);
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
-
- ret = wait_for_completion_timeout(&dp_drv->idle_comp,
- msecs_to_jiffies(100));
- if (ret == 0)
- pr_err("idle pattern timedout\n");
mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0);
- mdss_dp_irq_disable(dp_drv);
+ if (dp_drv->link_clks_on)
+ mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
- mdss_dp_mainlink_reset(&dp_drv->ctrl_io);
- mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
+ mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+
+ mdss_dp_irq_disable(dp_drv);
mdss_dp_config_gpios(dp_drv, false);
mdss_dp_pinctrl_set_state(dp_drv, false);
- mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+ /* Make sure DP is disabled before clk disable */
+ wmb();
mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, false);
mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false);
mdss_dp_regulator_ctrl(dp_drv, false);
-
- pr_debug("End--: state_ctrl=%x\n",
- dp_read(dp_drv->base + DP_STATE_CTRL));
+ dp_drv->dp_initialized = false;
dp_drv->power_on = false;
mutex_unlock(&dp_drv->train_mutex);
+ pr_debug("DP off done\n");
+
return 0;
}
-static void mdss_dp_send_cable_notification(
+static inline void mdss_dp_set_audio_switch_node(
struct mdss_dp_drv_pdata *dp, int val)
{
- int state = 0;
-
- if (!dp) {
- DEV_ERR("%s: invalid input\n", __func__);
- return;
- }
- state = dp->sdev.state;
-
- switch_set_state(&dp->sdev, val);
-
- DEV_INFO("%s: cable state %s %d\n", __func__,
- dp->sdev.state == state ?
- "is same" : "switched to",
- dp->sdev.state);
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
+ val);
}
-static int mdss_dp_register_switch_event(struct mdss_dp_drv_pdata *dp)
+static void mdss_dp_send_cable_notification(
+ struct mdss_dp_drv_pdata *dp, int val)
{
- int rc = -EINVAL;
if (!dp) {
DEV_ERR("%s: invalid input\n", __func__);
- goto end;
+ return;
}
- dp->sdev.name = "hdmi";
- rc = switch_dev_register(&dp->sdev);
- if (rc) {
- DEV_ERR("%s: display switch registration failed\n", __func__);
- goto end;
- }
-end:
- return rc;
+ if (dp && dp->ext_audio_data.intf_ops.hpd)
+ dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev,
+ dp->ext_audio_data.type, val);
}
static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
@@ -1124,6 +1211,10 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
+ if (dp_drv->dp_initialized) {
+ pr_err("%s: host init done already\n", __func__);
+ return 0;
+ }
ret = mdss_dp_regulator_ctrl(dp_drv, true);
if (ret) {
pr_err("failed to enable regulators\n");
@@ -1170,6 +1261,8 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
}
mdss_dp_send_cable_notification(dp_drv, true);
+ mdss_dp_set_audio_switch_node(dp_drv, true);
+ dp_drv->dp_initialized = true;
return ret;
@@ -1421,7 +1514,12 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
mdss_dp_sysfs_create(dp, fbi);
mdss_dp_edid_init(pdata);
mdss_dp_hdcp_init(pdata);
- mdss_dp_register_switch_event(dp);
+
+ rc = mdss_dp_init_ext_disp(dp);
+ if (rc)
+ pr_err("failed to initialize ext disp data, ret=%d\n",
+ rc);
+
break;
case MDSS_EVENT_CHECK_PARAMS:
rc = mdss_dp_check_params(dp, arg);
@@ -1738,6 +1836,7 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
dp_drv->cable_connected = false;
mutex_unlock(&dp_drv->pd_msg_mutex);
mdss_dp_send_cable_notification(dp_drv, false);
+ mdss_dp_set_audio_switch_node(dp_drv, false);
}
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
@@ -1781,8 +1880,8 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
if (cmd_type == SVDM_CMD_TYPE_INITIATOR) {
pr_debug("Attention. cmd_type=%d\n",
cmd_type);
- if (!dp_drv->alt_mode.current_state
- == ENTER_MODE_DONE) {
+ if (!(dp_drv->alt_mode.current_state
+ == ENTER_MODE_DONE)) {
pr_debug("sending discover_mode\n");
dp_send_events(dp_drv, EV_USBPD_DISCOVER_MODES);
break;
@@ -1909,7 +2008,6 @@ static int mdss_dp_probe(struct platform_device *pdev)
dp_drv->mask1 = EDP_INTR_MASK1;
dp_drv->mask2 = EDP_INTR_MASK2;
mutex_init(&dp_drv->emutex);
- mutex_init(&dp_drv->host_mutex);
mutex_init(&dp_drv->pd_msg_mutex);
mutex_init(&dp_drv->hdcp_mutex);
spin_lock_init(&dp_drv->lock);
@@ -1999,8 +2097,12 @@ static int mdss_dp_probe(struct platform_device *pdev)
probe_err:
iounmap(dp_drv->ctrl_io.base);
iounmap(dp_drv->phy_io.base);
- if (dp_drv)
+ if (dp_drv) {
+ if (dp_drv->pd)
+ usbpd_unregister_svid(dp_drv->pd,
+ &dp_drv->svid_handler);
devm_kfree(&pdev->dev, dp_drv);
+ }
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index d36c1e4ffce5..b724aa655424 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -333,10 +333,13 @@ struct mdss_dp_drv_pdata {
int (*on) (struct mdss_panel_data *pdata);
int (*off) (struct mdss_panel_data *pdata);
struct platform_device *pdev;
+ struct platform_device *ext_pdev;
struct usbpd *pd;
struct usbpd_svid_handler svid_handler;
struct dp_alt_mode alt_mode;
+ bool dp_initialized;
+ struct msm_ext_disp_init_data ext_audio_data;
struct mutex emutex;
int clk_cnt;
@@ -398,7 +401,6 @@ struct mdss_dp_drv_pdata {
struct completion video_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
- struct mutex host_mutex;
struct mutex pd_msg_mutex;
struct mutex hdcp_mutex;
bool cable_connected;
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index aea342ac90db..0bbcd0c9041e 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -1109,16 +1109,16 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train)
}
char vm_pre_emphasis[4][4] = {
- {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
+ {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
+ {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
{0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
{0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
char vm_voltage_swing[4][4] = {
- {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+ {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
+ {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
{0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
@@ -1312,7 +1312,7 @@ static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
usleep_range(usleep_time, usleep_time);
}
-static int dp_aux_link_train(struct mdss_dp_drv_pdata *dp)
+int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
int usleep_time;
@@ -1412,16 +1412,6 @@ void mdss_dp_fill_link_cfg(struct mdss_dp_drv_pdata *ep)
}
-int mdss_dp_link_train(struct mdss_dp_drv_pdata *ep)
-{
- int ret;
-
- mutex_lock(&ep->train_mutex);
- ret = dp_aux_link_train(ep);
- mutex_unlock(&ep->train_mutex);
- return ret;
-}
-
void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep)
{
mutex_init(&ep->aux_mutex);
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index f7b27d1e56a1..62b76199959c 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -18,6 +18,13 @@
#include "mdss_dp_util.h"
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
struct mdss_hw mdss_dp_hw = {
.hw_ndx = MDSS_HW_EDP,
.ptr = NULL,
@@ -239,13 +246,13 @@ void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io)
{
writel_relaxed(0x3d, phy_io->base + DP_PHY_PD_CTL);
- writel_relaxed(0x03, phy_io->base + DP_PHY_AUX_CFG1);
- writel_relaxed(0x00, phy_io->base + DP_PHY_AUX_CFG3);
+ writel_relaxed(0x13, phy_io->base + DP_PHY_AUX_CFG1);
+ writel_relaxed(0x10, phy_io->base + DP_PHY_AUX_CFG3);
writel_relaxed(0x0a, phy_io->base + DP_PHY_AUX_CFG4);
writel_relaxed(0x26, phy_io->base + DP_PHY_AUX_CFG5);
writel_relaxed(0x0a, phy_io->base + DP_PHY_AUX_CFG6);
writel_relaxed(0x03, phy_io->base + DP_PHY_AUX_CFG7);
- writel_relaxed(0xbb, phy_io->base + DP_PHY_AUX_CFG8);
+ writel_relaxed(0x8b, phy_io->base + DP_PHY_AUX_CFG8);
writel_relaxed(0x03, phy_io->base + DP_PHY_AUX_CFG9);
writel_relaxed(0x1f, phy_io->base + DP_PHY_AUX_INTERRUPT_MASK);
}
@@ -370,3 +377,163 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
pr_debug("DP config = 0x%x\n", config);
return config;
}
+
+void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
+ char link_rate)
+{
+ u32 acr_ctrl = 0;
+
+ switch (link_rate) {
+ case DP_LINK_RATE_162:
+ acr_ctrl = 0;
+ break;
+ case DP_LINK_RATE_270:
+ acr_ctrl = 1;
+ break;
+ case DP_LINK_RATE_540:
+ acr_ctrl = 2;
+ break;
+ default:
+ pr_debug("Unknown link rate\n");
+ acr_ctrl = 1;
+ break;
+ }
+
+ writel_relaxed(acr_ctrl, ctrl_io->base + MMSS_DP_AUDIO_ACR_CTRL);
+}
+
+static void mdss_dp_audio_config_parity_settings(struct dss_io_data *ctrl_io)
+{
+ u32 value = 0;
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_STREAM_0);
+ /* Config header and parity byte 1 */
+ value |= ((0x2 << HEADER_BYTE_1_BIT)
+ | (0x13 << PARITY_BYTE_1_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_STREAM_0);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_STREAM_1);
+ /* Config header and parity byte 2 */
+ value |= ((0x28 << HEADER_BYTE_2_BIT)
+ | (0xf5 << PARITY_BYTE_2_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_STREAM_1);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_STREAM_1);
+ /* Config header and parity byte 3 */
+ value |= ((0x97 << HEADER_BYTE_3_BIT)
+ | (0xc2 << PARITY_BYTE_3_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_STREAM_1);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_TIMESTAMP_0);
+ /* Config header and parity byte 1 */
+ value |= ((0x1 << HEADER_BYTE_1_BIT)
+ | (0x98 << PARITY_BYTE_1_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_TIMESTAMP_0);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_TIMESTAMP_1);
+ /* Config header and parity byte 2 */
+ value |= ((0x17 << HEADER_BYTE_2_BIT)
+ | (0x60 << PARITY_BYTE_2_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_TIMESTAMP_1);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_INFOFRAME_0);
+ /* Config header and parity byte 1 */
+ value |= ((0x84 << HEADER_BYTE_1_BIT)
+ | (0x84 << PARITY_BYTE_1_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_INFOFRAME_0);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_INFOFRAME_1);
+ /* Config header and parity byte 2 */
+ value |= ((0xb1 << HEADER_BYTE_2_BIT)
+ | (0x4e << PARITY_BYTE_2_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_INFOFRAME_1);
+
+ value = readl_relaxed(ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_0);
+ /* Config header and parity byte 1 */
+ value |= ((0x5 << HEADER_BYTE_1_BIT)
+ | (0xbe << PARITY_BYTE_1_BIT));
+ writel_relaxed(value, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_0);
+
+ value = readl_relaxed(ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1);
+ /* Config header and parity byte 2 */
+ value |= ((0x0b << HEADER_BYTE_2_BIT)
+ | (0xc7 << PARITY_BYTE_2_BIT));
+ writel_relaxed(value, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1);
+
+ value = readl_relaxed(ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1);
+ /* Config header and parity byte 3 */
+ value |= ((0x1 << HEADER_BYTE_3_BIT)
+ | (0x98 << PARITY_BYTE_3_BIT));
+ writel_relaxed(value, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1);
+
+ writel_relaxed(0x22222222, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_2);
+ writel_relaxed(0x22222222, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_3);
+ writel_relaxed(0x22222222, ctrl_io->base +
+ MMSS_DP_AUDIO_COPYMANAGEMENT_4);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_ISRC_0);
+ /* Config header and parity byte 1 */
+ value |= ((0x6 << HEADER_BYTE_1_BIT)
+ | (0x35 << PARITY_BYTE_1_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_ISRC_0);
+
+ value = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_ISRC_1);
+ /* Config header and parity byte 2 */
+ value |= ((0x0b << HEADER_BYTE_2_BIT)
+ | (0xc7 << PARITY_BYTE_2_BIT));
+ writel_relaxed(value, ctrl_io->base + MMSS_DP_AUDIO_ISRC_1);
+
+ writel_relaxed(0x33333333, ctrl_io->base + MMSS_DP_AUDIO_ISRC_2);
+ writel_relaxed(0x33333333, ctrl_io->base + MMSS_DP_AUDIO_ISRC_3);
+ writel_relaxed(0x33333333, ctrl_io->base + MMSS_DP_AUDIO_ISRC_4);
+
+}
+
+void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io)
+{
+ u32 sdp_cfg = 0;
+ u32 sdp_cfg2 = 0;
+
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ writel_relaxed(sdp_cfg, ctrl_io->base + MMSS_DP_SDP_CFG);
+
+ sdp_cfg2 = readl_relaxed(ctrl_io->base + MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ writel_relaxed(sdp_cfg2, ctrl_io->base + MMSS_DP_SDP_CFG2);
+
+ mdss_dp_audio_config_parity_settings(ctrl_io);
+}
+
+void mdss_dp_audio_enable(struct dss_io_data *ctrl_io, bool enable)
+{
+ u32 audio_ctrl = readl_relaxed(ctrl_io->base + MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ writel_relaxed(audio_ctrl, ctrl_io->base + MMSS_DP_AUDIO_CFG);
+}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index a2649b8c1611..96664d1f9954 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -56,6 +56,75 @@
#define DP_MAINLINK_READY (0x00000440)
#define DP_TU (0x0000044C)
+#define MMSS_DP_AUDIO_TIMING_GEN (0x00000480)
+#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000484)
+#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000488)
+#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000048C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000490)
+#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000494)
+#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000498)
+
+#define MMSS_DP_AUDIO_CFG (0x00000600)
+#define MMSS_DP_AUDIO_STATUS (0x00000604)
+#define MMSS_DP_AUDIO_PKT_CTRL (0x00000608)
+#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000060C)
+#define MMSS_DP_AUDIO_ACR_CTRL (0x00000610)
+#define MMSS_DP_AUDIO_CTRL_RESET (0x00000614)
+
+#define MMSS_DP_SDP_CFG (0x00000628)
+#define MMSS_DP_SDP_CFG2 (0x0000062C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000630)
+#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000634)
+
+#define MMSS_DP_AUDIO_STREAM_0 (0x00000640)
+#define MMSS_DP_AUDIO_STREAM_1 (0x00000644)
+
+#define MMSS_DP_EXTENSION_0 (0x00000650)
+#define MMSS_DP_EXTENSION_1 (0x00000654)
+#define MMSS_DP_EXTENSION_2 (0x00000658)
+#define MMSS_DP_EXTENSION_3 (0x0000065C)
+#define MMSS_DP_EXTENSION_4 (0x00000660)
+#define MMSS_DP_EXTENSION_5 (0x00000664)
+#define MMSS_DP_EXTENSION_6 (0x00000668)
+#define MMSS_DP_EXTENSION_7 (0x0000066C)
+#define MMSS_DP_EXTENSION_8 (0x00000670)
+#define MMSS_DP_EXTENSION_9 (0x00000674)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000678)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000067C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000680)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000684)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000688)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000068C)
+#define MMSS_DP_AUDIO_ISRC_0 (0x00000690)
+#define MMSS_DP_AUDIO_ISRC_1 (0x00000694)
+#define MMSS_DP_AUDIO_ISRC_2 (0x00000698)
+#define MMSS_DP_AUDIO_ISRC_3 (0x0000069C)
+#define MMSS_DP_AUDIO_ISRC_4 (0x000006A0)
+#define MMSS_DP_AUDIO_ISRC_5 (0x000006A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000006A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000006B0)
+
+#define MMSS_DP_GENERIC0_0 (0x00000700)
+#define MMSS_DP_GENERIC0_1 (0x00000704)
+#define MMSS_DP_GENERIC0_2 (0x00000708)
+#define MMSS_DP_GENERIC0_3 (0x0000070C)
+#define MMSS_DP_GENERIC0_4 (0x00000710)
+#define MMSS_DP_GENERIC0_5 (0x00000714)
+#define MMSS_DP_GENERIC0_6 (0x00000718)
+#define MMSS_DP_GENERIC0_7 (0x0000071C)
+#define MMSS_DP_GENERIC0_8 (0x00000720)
+#define MMSS_DP_GENERIC0_9 (0x00000724)
+#define MMSS_DP_GENERIC1_0 (0x00000728)
+#define MMSS_DP_GENERIC1_1 (0x0000072C)
+#define MMSS_DP_GENERIC1_2 (0x00000730)
+#define MMSS_DP_GENERIC1_3 (0x00000734)
+#define MMSS_DP_GENERIC1_4 (0x00000738)
+#define MMSS_DP_GENERIC1_5 (0x0000073C)
+#define MMSS_DP_GENERIC1_6 (0x00000740)
+#define MMSS_DP_GENERIC1_7 (0x00000744)
+#define MMSS_DP_GENERIC1_8 (0x00000748)
+#define MMSS_DP_GENERIC1_9 (0x0000074C)
+
/*DP PHY Register offsets */
#define DP_PHY_REVISION_ID0 (0x00000000)
#define DP_PHY_REVISION_ID1 (0x00000004)
@@ -162,5 +231,9 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
struct lane_mapping l_map);
+void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
+ char link_rate);
+void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io);
+void mdss_dp_audio_enable(struct dss_io_data *ctrl_io, bool enable);
#endif /* __DP_UTIL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 4285a14e7f35..c145f72c3c70 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1180,7 +1180,7 @@ static int mdss_dsi_off(struct mdss_panel_data *pdata, int power_state)
panel_info = &ctrl_pdata->panel_data.panel_info;
- pr_debug("%s+: ctrl=%p ndx=%d power_state=%d\n",
+ pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
__func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
if (power_state == panel_info->panel_power_state) {
@@ -1361,7 +1361,7 @@ int mdss_dsi_on(struct mdss_panel_data *pdata)
mdss_dsi_validate_debugfs_info(ctrl_pdata);
cur_power_state = pdata->panel_info.panel_power_state;
- pr_debug("%s+: ctrl=%p ndx=%d cur_power_state=%d\n", __func__,
+ pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d\n", __func__,
ctrl_pdata, ctrl_pdata->ndx, cur_power_state);
pinfo = &pdata->panel_info;
@@ -1535,7 +1535,7 @@ static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
panel_data);
mipi = &pdata->panel_info.mipi;
- pr_debug("%s+: ctrl=%p ndx=%d cur_power_state=%d ctrl_state=%x\n",
+ pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d ctrl_state=%x\n",
__func__, ctrl_pdata, ctrl_pdata->ndx,
pdata->panel_info.panel_power_state, ctrl_pdata->ctrl_state);
@@ -1608,7 +1608,7 @@ static int mdss_dsi_blank(struct mdss_panel_data *pdata, int power_state)
panel_data);
mipi = &pdata->panel_info.mipi;
- pr_debug("%s+: ctrl=%p ndx=%d power_state=%d\n",
+ pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
__func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -1682,7 +1682,7 @@ static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s+: ctrl=%p ndx=%d\n", __func__,
+ pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
ctrl_pdata, ctrl_pdata->ndx);
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -1716,7 +1716,7 @@ int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s+: ctrl=%p ndx=%d\n", __func__,
+ pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
ctrl_pdata, ctrl_pdata->ndx);
WARN((ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT),
@@ -3000,8 +3000,8 @@ static int mdss_dsi_get_bridge_chip_params(struct mdss_panel_info *pinfo,
u32 temp_val = 0;
if (!ctrl_pdata || !pdev || !pinfo) {
- pr_err("%s: Invalid Params ctrl_pdata=%p, pdev=%p\n", __func__,
- ctrl_pdata, pdev);
+ pr_err("%s: Invalid Params ctrl_pdata=%pK, pdev=%pK\n",
+ __func__, ctrl_pdata, pdev);
rc = -EINVAL;
goto end;
}
@@ -3370,7 +3370,7 @@ static int mdss_dsi_res_init(struct platform_device *pdev)
mdss_dsi_res->shared_data = devm_kzalloc(&pdev->dev,
sizeof(struct dsi_shared_data),
GFP_KERNEL);
- pr_debug("%s Allocated shared_data=%p\n", __func__,
+ pr_debug("%s Allocated shared_data=%pK\n", __func__,
mdss_dsi_res->shared_data);
if (!mdss_dsi_res->shared_data) {
pr_err("%s Unable to alloc mem for shared_data\n",
@@ -3436,7 +3436,7 @@ static int mdss_dsi_res_init(struct platform_device *pdev)
rc = -ENOMEM;
goto mem_fail;
}
- pr_debug("%s Allocated ctrl_pdata[%d]=%p\n",
+ pr_debug("%s Allocated ctrl_pdata[%d]=%pK\n",
__func__, i, mdss_dsi_res->ctrl_pdata[i]);
mdss_dsi_res->ctrl_pdata[i]->shared_data =
mdss_dsi_res->shared_data;
@@ -3446,7 +3446,7 @@ static int mdss_dsi_res_init(struct platform_device *pdev)
}
mdss_dsi_res->pdev = pdev;
- pr_debug("%s: Setting up mdss_dsi_res=%p\n", __func__, mdss_dsi_res);
+ pr_debug("%s: Setting up mdss_dsi_res=%pK\n", __func__, mdss_dsi_res);
return 0;
@@ -3773,11 +3773,11 @@ int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode,
pr_debug("%s:%d unable to remap dsi phy regulator resources\n",
__func__, __LINE__);
else
- pr_info("%s: phy_regulator_base=%p phy_regulator_size=%x\n",
+ pr_info("%s: phy_regulator_base=%pK phy_regulator_size=%x\n",
__func__, ctrl->phy_regulator_io.base,
ctrl->phy_regulator_io.len);
- pr_info("%s: ctrl_base=%p ctrl_size=%x phy_base=%p phy_size=%x\n",
+ pr_info("%s: ctrl_base=%pK ctrl_size=%x phy_base=%pK phy_size=%x\n",
__func__, ctrl->ctrl_base, ctrl->reg_size, ctrl->phy_io.base,
ctrl->phy_io.len);
@@ -3997,7 +3997,7 @@ static int mdss_dsi_parse_ctrl_params(struct platform_device *ctrl_pdev,
data = of_get_property(ctrl_pdev->dev.of_node,
"qcom,display-id", &len);
if (!data || len <= 0)
- pr_err("%s:%d Unable to read qcom,display-id, data=%p,len=%d\n",
+ pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
__func__, __LINE__, data, len);
else
snprintf(ctrl_pdata->panel_data.panel_info.display_id,
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.c b/drivers/video/fbdev/msm/mdss_dsi_clk.c
index 5d6fb8722dad..a7d1c251fab0 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_clk.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.c
@@ -805,7 +805,7 @@ int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
if (!client || !clk || clk > (MDSS_DSI_CORE_CLK | MDSS_DSI_LINK_CLK) ||
state > MDSS_DSI_CLK_EARLY_GATE) {
- pr_err("Invalid params, client = %p, clk = 0x%x, state = %d\n",
+ pr_err("Invalid params, client = %pK, clk = 0x%x, state = %d\n",
client, clk, state);
return -EINVAL;
}
@@ -903,7 +903,7 @@ int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
struct mdss_dsi_clk_mngr *mngr;
if (!client || (clk > MDSS_DSI_LINK_CLK_MAX)) {
- pr_err("Invalid params, client = %p, clk = 0x%x", client, clk);
+ pr_err("Invalid params, client = %pK, clk = 0x%x", client, clk);
return -EINVAL;
}
@@ -1002,7 +1002,7 @@ int mdss_dsi_clk_force_toggle(void *client, u32 clk)
struct mdss_dsi_clk_mngr *mngr;
if (!client || !clk || clk >= MDSS_DSI_CLKS_MAX) {
- pr_err("Invalid params, client = %p, clk = 0x%x\n",
+ pr_err("Invalid params, client = %pK, clk = 0x%x\n",
client, clk);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 78dc17536416..18bcdca31bf6 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -106,7 +106,7 @@ void mdss_dsi_ctrl_init(struct device *ctrl_dev,
if (ctrl->mdss_util->register_irq(ctrl->dsi_hw))
pr_err("%s: mdss_register_irq failed.\n", __func__);
- pr_debug("%s: ndx=%d base=%p\n", __func__, ctrl->ndx, ctrl->ctrl_base);
+ pr_debug("%s: ndx=%d base=%pK\n", __func__, ctrl->ndx, ctrl->ctrl_base);
init_completion(&ctrl->dma_comp);
init_completion(&ctrl->mdp_comp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 4bd705bdc05f..fe6ce30d0c89 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -354,7 +354,7 @@ int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
}
if (gpio_is_valid(ctrl_pdata->lcd_mode_sel_gpio)) {
- bool out;
+ bool out = false;
if ((pinfo->mode_sel_state == MODE_SEL_SINGLE_PORT) ||
(pinfo->mode_sel_state == MODE_GPIO_HIGH))
@@ -770,7 +770,7 @@ static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
+ pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
pinfo = &pdata->panel_info;
if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
@@ -808,7 +808,7 @@ static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
+ pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
if (pinfo->dcs_cmd_by_left) {
if (ctrl->ndx != DSI_CTRL_LEFT)
@@ -843,7 +843,7 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s: ctrl=%p ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
+ pr_debug("%s: ctrl=%pK ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
enable);
/* Any panel specific low power commands/config */
@@ -2195,7 +2195,7 @@ static int mdss_dsi_panel_timing_from_dt(struct device_node *np,
if (np->name) {
pt->timing.name = kstrdup(np->name, GFP_KERNEL);
- pr_info("%s: found new timing \"%s\" (%p)\n", __func__,
+ pr_info("%s: found new timing \"%s\" (%pK)\n", __func__,
np->name, &pt->timing);
}
@@ -2538,7 +2538,7 @@ static int mdss_panel_parse_dt(struct device_node *np,
bridge_chip_name = of_get_property(np,
"qcom,bridge-name", &len);
if (!bridge_chip_name || len <= 0) {
- pr_err("%s:%d Unable to read qcom,bridge_name, data=%p,len=%d\n",
+ pr_err("%s:%d Unable to read qcom,bridge_name, data=%pK,len=%d\n",
__func__, __LINE__, bridge_chip_name, len);
rc = -EINVAL;
goto error;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
index 7d201a574a00..94554bd5b423 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
@@ -95,12 +95,12 @@ static bool mdss_dsi_phy_v3_is_pll_on(struct mdss_dsi_ctrl_pdata *ctrl)
if (mdss_dsi_is_ctrl_clk_slave(ctrl))
return false;
- data = DSI_PHY_R32(ctrl->phy_io.base, CMN_CTRL_0);
+ data = DSI_PHY_R32(ctrl->phy_io.base, CMN_PLL_CNTRL);
/* Make sure the register has been read prior to checking the status */
mb();
- return (data & BIT(5));
+ return (data & BIT(0));
}
static void mdss_dsi_phy_v3_set_pll_source(
@@ -113,8 +113,8 @@ static void mdss_dsi_phy_v3_set_pll_source(
else
pll_src = 0x00; /* internal PLL */
- /* set the PLL src and set global clock enable */
- reg = (pll_src << 2) | BIT(5);
+ /* set the PLL src */
+ reg = (pll_src << 2);
DSI_PHY_W32(ctrl->phy_io.base, CMN_CLK_CFG1, reg);
}
@@ -183,7 +183,7 @@ static void mdss_dsi_phy_v3_config_lane_settings(
struct mdss_dsi_ctrl_pdata *ctrl)
{
int i;
- u32 tx_dctrl[] = {0x98, 0x99, 0x98, 0x9a, 0x98};
+ u32 tx_dctrl[] = {0x18, 0x19, 0x18, 0x02, 0x18};
struct mdss_dsi_phy_ctrl *pd =
&(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
@@ -198,8 +198,8 @@ static void mdss_dsi_phy_v3_config_lane_settings(
*/
DSI_PHY_W32(ctrl->phy_io.base, LNX_LPRX_CTRL(i), 0);
- DSI_PHY_W32(ctrl->phy_io.base, LNX_HSTX_STR_CTRL(i), 0x88);
DSI_PHY_W32(ctrl->phy_io.base, LNX_PIN_SWAP(i), 0x0);
+ DSI_PHY_W32(ctrl->phy_io.base, LNX_HSTX_STR_CTRL(i), 0x88);
}
mdss_dsi_phy_v3_config_lpcdrx(ctrl, true);
@@ -383,8 +383,11 @@ int mdss_dsi_phy_v3_init(struct mdss_dsi_ctrl_pdata *ctrl,
return rc;
}
- /* de-assert digital power down */
- DSI_PHY_W32(ctrl->phy_io.base, CMN_CTRL_0, BIT(6));
+ /* de-assert digital and pll power down */
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_CTRL_0, BIT(6) | BIT(5));
+
+ /* Assert PLL core reset */
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
DSI_PHY_W32(ctrl->phy_io.base, CMN_RBUF_CTRL, 0x00);
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index b2c0c78d3f2b..e0f1a37ac84e 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -2062,7 +2062,7 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
rc = PTR_ERR(vaddr);
goto err_unmap;
}
- pr_debug("alloc 0x%zuB vaddr = %p for fb%d\n", fb_size,
+ pr_debug("alloc %zuB vaddr = %pK for fb%d\n", fb_size,
vaddr, mfd->index);
mfd->fbi->screen_base = (char *) vaddr;
@@ -2161,7 +2161,7 @@ static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
- pr_debug("vma=%p, addr=%x len=%ld\n",
+ pr_debug("vma=%pK, addr=%x len=%ld\n",
vma, (unsigned int)addr, len);
pr_debug("vm_start=%x vm_end=%x vm_page_prot=%ld\n",
(unsigned int)vma->vm_start,
@@ -2328,7 +2328,7 @@ static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
return -ERANGE;
}
- pr_debug("alloc 0x%zxB @ (%pa phys) (0x%p virt) (%pa iova) for fb%d\n",
+ pr_debug("alloc 0x%zxB @ (%pa phys) (0x%pK virt) (%pa iova) for fb%d\n",
size, &phys, virt, &mfd->iova, mfd->index);
mfd->fbi->screen_base = virt;
@@ -2616,7 +2616,7 @@ static int mdss_fb_open(struct fb_info *info, int user)
}
mfd->ref_cnt++;
- pr_debug("mfd refcount:%d file:%p\n", mfd->ref_cnt, info->file);
+ pr_debug("mfd refcount:%d file:%pK\n", mfd->ref_cnt, info->file);
return 0;
@@ -2681,7 +2681,7 @@ static int mdss_fb_release_all(struct fb_info *info, bool release_all)
pr_warn("file node not found or wrong ref cnt: release all:%d refcnt:%d\n",
release_all, mfd->ref_cnt);
- pr_debug("current process=%s pid=%d mfd->ref=%d file:%p\n",
+ pr_debug("current process=%s pid=%d mfd->ref=%d file:%pK\n",
task->comm, current->tgid, mfd->ref_cnt, info->file);
if (!mfd->ref_cnt || release_all) {
@@ -3324,7 +3324,7 @@ static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (!mfd || !var) {
- pr_err("Invalid parameters mfd:%p var:%p\n", mfd, var);
+ pr_err("Invalid parameters mfd:%pK var:%pK\n", mfd, var);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 3e4e2f74a32e..eb02cad2a634 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -1360,7 +1360,7 @@ static int hdmi_tx_sysfs_create(struct hdmi_tx_ctrl *hdmi_ctrl,
return rc;
}
hdmi_ctrl->kobj = &fbi->dev->kobj;
- DEV_DBG("%s: sysfs group %p\n", __func__, hdmi_ctrl->kobj);
+ DEV_DBG("%s: sysfs group %pK\n", __func__, hdmi_ctrl->kobj);
return 0;
} /* hdmi_tx_sysfs_create */
@@ -1915,6 +1915,7 @@ static int hdmi_tx_init_ext_disp(struct hdmi_tx_ctrl *hdmi_ctrl)
hdmi_ctrl->ext_audio_data.type = EXT_DISPLAY_TYPE_HDMI;
hdmi_ctrl->ext_audio_data.kobj = hdmi_ctrl->kobj;
+ hdmi_ctrl->ext_audio_data.pdev = hdmi_ctrl->pdev;
hdmi_ctrl->ext_audio_data.codec_ops.audio_info_setup =
hdmi_tx_audio_info_setup;
hdmi_ctrl->ext_audio_data.codec_ops.get_audio_edid_blk =
@@ -2978,7 +2979,7 @@ int msm_hdmi_register_audio_codec(struct platform_device *pdev,
if (!hdmi_ctrl || !ops) {
DEV_ERR("%s: invalid input\n", __func__);
- return -ENODEV;
+ return -EPROBE_DEFER;
}
ret = msm_ext_disp_register_audio_codec(hdmi_ctrl->ext_pdev, ops);
@@ -4030,7 +4031,7 @@ static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
DEV_DBG("%s: '%s' remap failed or not available\n",
__func__, hdmi_tx_io_name(i));
}
- DEV_INFO("%s: '%s': start = 0x%p, len=0x%x\n", __func__,
+ DEV_INFO("%s: '%s': start = 0x%pK, len=0x%x\n", __func__,
hdmi_tx_io_name(i), pdata->io[i].base,
pdata->io[i].len);
}
@@ -4581,7 +4582,7 @@ static int hdmi_tx_get_dt_data(struct platform_device *pdev,
data = of_get_property(pdev->dev.of_node, "qcom,display-id", &len);
if (!data || len <= 0)
- pr_err("%s:%d Unable to read qcom,display-id, data=%p,len=%d\n",
+ pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
__func__, __LINE__, data, len);
else
snprintf(hdmi_ctrl->panel_data.panel_info.display_id,
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index e1aa004e14e6..f35156a2cfcb 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -635,7 +635,7 @@ struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
- pr_debug("bus vote client %s created:%p id :%d\n", client_name,
+ pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
client, id);
id++;
list_add(&client->list, &mdss_res->reg_bus_clist);
@@ -649,7 +649,7 @@ void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
if (!client) {
pr_err("reg bus vote: invalid client handle\n");
} else {
- pr_debug("bus vote client %s destroyed:%p id:%u\n",
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
mutex_lock(&mdss_res->reg_bus_lock);
list_del_init(&client->list);
@@ -2081,7 +2081,7 @@ static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
mdata->iclient = msm_ion_client_create(mdata->pdev->name);
if (IS_ERR_OR_NULL(mdata->iclient)) {
- pr_err("msm_ion_client_create() return error (%p)\n",
+ pr_err("msm_ion_client_create() return error (%pK)\n",
mdata->iclient);
mdata->iclient = NULL;
}
@@ -2726,7 +2726,7 @@ static int mdss_mdp_probe(struct platform_device *pdev)
if (rc)
pr_debug("unable to map MDSS VBIF non-realtime base\n");
else
- pr_debug("MDSS VBIF NRT HW Base addr=%p len=0x%x\n",
+ pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -3554,7 +3554,7 @@ static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
atomic_set(&head[i].kref.refcount, 0);
mutex_init(&head[i].lock);
- pr_debug("%s: cdm off (%d) = %p\n", __func__, i, head[i].base);
+ pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
}
mdata->cdm_off = head;
@@ -3621,7 +3621,7 @@ static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
for (i = 0; i < len; i++) {
head[i].num = i;
head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
- pr_debug("dsc off (%d) = %p\n", i, head[i].base);
+ pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
}
mdata->dsc_off = head;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 2218e9c4ac81..ed55057e1d7e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -1162,7 +1162,7 @@ int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR;
prefill_params.is_cmd = !mixer->ctl->is_video_mode;
prefill_params.pnum = pipe->num;
- prefill_params.is_bwc = mdss_mdp_is_ubwc_format(pipe->src_fmt);
+ prefill_params.is_ubwc = mdss_mdp_is_ubwc_format(pipe->src_fmt);
prefill_params.is_nv12 = mdss_mdp_is_nv12_format(pipe->src_fmt);
mdss_mdp_get_bw_vote_mode(mixer, mdata->mdp_rev, perf,
@@ -4426,17 +4426,17 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
}
previous_frame_pu_type = mdss_mdp_get_pu_type(ctl);
- mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
- if (ctl->mixer_left)
+ if (ctl->mixer_left) {
+ mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
ctl->roi = ctl->mixer_left->roi;
+ }
if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
- if (sctl) {
+ if (sctl && sctl->mixer_left) {
mdss_mdp_set_mixer_roi(sctl->mixer_left, r_roi);
- if (sctl->mixer_left)
- sctl->roi = sctl->mixer_left->roi;
+ sctl->roi = sctl->mixer_left->roi;
}
} else if (is_dual_lm_single_display(ctl->mfd) && ctl->mixer_right) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 0779f7e7afae..2c2dc6f18fd9 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -903,6 +903,22 @@ exit:
return rc;
}
+static bool mdss_mdp_cmd_is_autorefresh_enabled(struct mdss_mdp_ctl *mctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+ bool enabled = false;
+
+ /* check the ctl to make sure the lock was initialized */
+ if (!ctx || !ctx->ctl)
+ return 0;
+
+ mutex_lock(&ctx->autorefresh_lock);
+ if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
+ enabled = true;
+ mutex_unlock(&ctx->autorefresh_lock);
+
+ return enabled;
+}
static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
{
@@ -1069,7 +1085,7 @@ static void mdss_mdp_cmd_intf_callback(void *data, int event)
}
}
-static void mdss_mdp_cmd_writeptr_done(void *arg)
+static void mdss_mdp_cmd_lineptr_done(void *arg)
{
struct mdss_mdp_ctl *ctl = arg;
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
@@ -1082,6 +1098,7 @@ static void mdss_mdp_cmd_writeptr_done(void *arg)
}
lineptr_time = ktime_get();
+ pr_debug("intr lineptr_time=%lld\n", ktime_to_ms(lineptr_time));
spin_lock(&ctx->clk_lock);
list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
@@ -1376,6 +1393,19 @@ static int mdss_mdp_cmd_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
return rc;
}
+/*
+ * Interface used to update the new lineptr value set by the sysfs node.
+ * Value is instantly updated only when autorefresh is enabled, else
+ * new value would be set in the next kickoff.
+ */
+static int mdss_mdp_cmd_update_lineptr(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ if (mdss_mdp_cmd_is_autorefresh_enabled(ctl))
+ return mdss_mdp_cmd_lineptr_ctrl(ctl, enable);
+
+ return 0;
+}
+
/**
* mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
* @arg: void pointer to the controller context.
@@ -1423,7 +1453,10 @@ static void pingpong_done_work(struct work_struct *work)
if (!ctl->is_master)
ctl = mdss_mdp_get_main_ctl(ctl);
- if (mdss_mdp_is_lineptr_supported(ctl))
+
+ /* do not disable lineptr when autorefresh is enabled */
+ if (mdss_mdp_is_lineptr_supported(ctl)
+ && !mdss_mdp_cmd_is_autorefresh_enabled(ctl))
mdss_mdp_cmd_lineptr_ctrl(ctl, false);
}
}
@@ -1873,7 +1906,7 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctl->roi_bkup.w,
ctl->roi_bkup.h);
- pr_debug("%s: intf_num=%d ctx=%p koff_cnt=%d\n", __func__,
+ pr_debug("%s: intf_num=%d ctx=%pK koff_cnt=%d\n", __func__,
ctl->intf_num, ctx, atomic_read(&ctx->koff_cnt));
rc = __mdss_mdp_wait4pingpong(ctx);
@@ -2107,7 +2140,7 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
struct mdss_panel_info *pinfo;
if (!mctl || !mctl->is_master || !mctl->panel_data) {
- pr_err("invalid ctl mctl:%p pdata:%p\n",
+ pr_err("invalid ctl mctl:%pK pdata:%pK\n",
mctl, mctl ? mctl->panel_data : 0);
return -ENODEV;
}
@@ -3174,7 +3207,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
ctx->intf_stopped = 0;
- pr_debug("%s: ctx=%p num=%d aux=%d\n", __func__, ctx,
+ pr_debug("%s: ctx=%pK num=%d aux=%d\n", __func__, ctx,
default_pp_num, aux_pp_num);
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
@@ -3182,7 +3215,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
ctx->default_pp_num, mdss_mdp_cmd_readptr_done, ctl);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
- ctx->default_pp_num, mdss_mdp_cmd_writeptr_done, ctl);
+ ctx->default_pp_num, mdss_mdp_cmd_lineptr_done, ctl);
ret = mdss_mdp_cmd_tearcheck_setup(ctx, false);
if (ret)
@@ -3447,6 +3480,7 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
+ ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
pr_debug("%s:-\n", __func__);
return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 72fc20d97f44..cee168a33f85 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -297,7 +297,7 @@ int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
for (i = 0; i < count; i++) {
head[i].base = mdata->mdss_io.base + offsets[i];
- pr_debug("adding Video Intf #%d offset=0x%x virt=%p\n", i,
+ pr_debug("adding Video Intf #%d offset=0x%x virt=%pK\n", i,
offsets[i], head[i].base);
head[i].ref_cnt = 0;
head[i].intf_num = i + MDSS_MDP_INTF0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 600701041309..d3a836ed2519 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -137,8 +137,12 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
if ((ds_data->lm_width > get_panel_xres(pinfo)) ||
(ds_data->lm_height > get_panel_yres(pinfo)) ||
(ds_data->lm_width == 0) ||
- (ds_data->lm_height == 0)) {
- pr_err("Invalid LM width / height setting\n");
+ (ds_data->lm_height == 0) ||
+ (is_dsc_compression(pinfo) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ ds_data->lm_width, ds_data->lm_height))) {
+ pr_err("Invalid left LM {%d,%d} setting\n",
+ ds_data->lm_width, ds_data->lm_height);
return -EINVAL;
}
@@ -163,8 +167,12 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
if ((ds_data->lm_width > get_panel_xres(pinfo)) ||
(ds_data->lm_height > get_panel_yres(pinfo)) ||
(ds_data->lm_width == 0) ||
- (ds_data->lm_height == 0)) {
- pr_err("Invalid LM width / height setting\n");
+ (ds_data->lm_height == 0) ||
+ (is_dsc_compression(pinfo) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ ds_data->lm_width, ds_data->lm_height))) {
+ pr_err("Invalid right LM {%d,%d} setting\n",
+ ds_data->lm_width, ds_data->lm_height);
return -EINVAL;
}
@@ -174,7 +182,7 @@ static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
*/
ctl->mixer_right->width = ds_data->lm_width;
ctl->mixer_right->height = ds_data->lm_height;
- pr_info("Update mixer-right width/height: %dx%d\n",
+ pr_debug("Update mixer-right width/height: %dx%d\n",
ds_data->lm_width, ds_data->lm_height);
if (ctl->mixer_left &&
@@ -744,10 +752,6 @@ static int __validate_pipe_priorities(struct mdss_mdp_pipe *left,
(left->priority >= right->priority))
return -EINVAL;
- if ((left->multirect.num < right->multirect.num) &&
- (left->priority > right->priority))
- return -EINVAL;
-
return 0;
}
@@ -768,7 +772,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
pipe->src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
if (!pipe->src_fmt || !mixer) {
- pr_err("invalid layer format:%d or mixer:%p\n",
+ pr_err("invalid layer format:%d or mixer:%pK\n",
layer->buffer.format, pipe->mixer_left);
ret = -EINVAL;
goto end;
@@ -2246,7 +2250,7 @@ validate_exit:
}
} else {
pipe->file = file;
- pr_debug("file pointer attached with pipe is %p\n",
+ pr_debug("file pointer attached with pipe is %pK\n",
file);
}
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index e5cdc750193e..04e3c09e36d7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -1083,7 +1083,7 @@ struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
list_move_tail(&buf->buf_list, &mdp5_data->bufs_used);
list_add_tail(&buf->pipe_list, &pipe->buf_queue);
- pr_debug("buffer alloc: %p\n", buf);
+ pr_debug("buffer alloc: %pK\n", buf);
return buf;
}
@@ -1137,7 +1137,7 @@ void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
buf->last_freed = local_clock();
buf->state = MDP_BUF_STATE_UNUSED;
- pr_debug("buffer freed: %p\n", buf);
+ pr_debug("buffer freed: %pK\n", buf);
list_move_tail(&buf->buf_list, &mdp5_data->bufs_pool);
}
@@ -1514,7 +1514,7 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
if (buf) {
switch (buf->state) {
case MDP_BUF_STATE_READY:
- pr_debug("pnum=%d buf=%p first buffer ready\n",
+ pr_debug("pnum=%d buf=%pK first buffer ready\n",
pipe->num, buf);
break;
case MDP_BUF_STATE_ACTIVE:
@@ -2235,7 +2235,7 @@ static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
u32 unset_ndx = 0;
int cnt = 0;
- pr_debug("releasing all resources for fb%d file:%p\n",
+ pr_debug("releasing all resources for fb%d file:%pK\n",
mfd->index, file);
mutex_lock(&mdp5_data->ov_lock);
@@ -3407,18 +3407,20 @@ static ssize_t mdss_mdp_misr_store(struct device *dev,
req.frame_count = 1;
} else {
pr_err("misr not supported fo this fb:%d\n", mfd->index);
+ rc = -ENODEV;
+ return rc;
}
if (enable_misr) {
mdss_misr_set(mdata, &req , ctl);
- if (is_panel_split(mfd))
+ if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
mdss_misr_set(mdata, &sreq , ctl);
} else {
mdss_misr_disable(mdata, &req, ctl);
- if (is_panel_split(mfd))
+ if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
mdss_misr_disable(mdata, &sreq , ctl);
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 8cfb8e46777c..1eb695200dfe 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -2756,7 +2756,7 @@ int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
__set_pipe_multirect_opmode(pipe);
if (src_data == NULL) {
- pr_debug("src_data=%p pipe num=%dx\n",
+ pr_debug("src_data=%pK pipe num=%dx\n",
src_data, pipe->num);
goto update_nobuf;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index a760711e7501..efd09302de45 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -1180,7 +1180,7 @@ static int pp_rgb_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
int ret = 0;
if (!pipe) {
- pr_err("invalid param pipe %p\n", pipe);
+ pr_err("invalid param pipe %pK\n", pipe);
return -EINVAL;
}
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
@@ -1198,7 +1198,7 @@ static int pp_dma_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
int ret = 0;
if (!pipe) {
- pr_err("invalid param pipe %p\n", pipe);
+ pr_err("invalid param pipe %pK\n", pipe);
return -EINVAL;
}
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
@@ -1817,7 +1817,7 @@ void mdss_mdp_pipe_pp_clear(struct mdss_mdp_pipe *pipe)
struct pp_hist_col_info *hist_info;
if (!pipe) {
- pr_err("Invalid pipe context passed, %p\n",
+ pr_err("Invalid pipe context passed, %pK\n",
pipe);
return;
}
@@ -1943,7 +1943,7 @@ static int pp_mixer_setup(struct mdss_mdp_mixer *mixer)
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!mixer || !mixer->ctl || !mixer->ctl->mfd || !mdata) {
- pr_err("invalid parameters, mixer %p ctl %p mfd %p mdata %p\n",
+ pr_err("invalid parameters, mixer %pK ctl %pK mfd %pK mdata %pK\n",
mixer, (mixer ? mixer->ctl : NULL),
(mixer ? (mixer->ctl ? mixer->ctl->mfd : NULL) : NULL),
mdata);
@@ -2667,7 +2667,7 @@ int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd)
struct mdp_pa_v2_cfg_data *pa_v2_cache_cfg = NULL;
if (!mfd) {
- pr_err("invalid input: mfd = 0x%p\n", mfd);
+ pr_err("invalid input: mfd = 0x%pK\n", mfd);
return -EINVAL;
}
@@ -2904,7 +2904,7 @@ static int mdss_mdp_pp_dt_parse(struct device *dev)
ret = 0;
}
} else {
- pr_err("invalid dev %p mdata %p\n", dev, mdata);
+ pr_err("invalid dev %pK mdata %pK\n", dev, mdata);
ret = -EINVAL;
}
bail_out:
@@ -3038,7 +3038,7 @@ int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd)
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!mfd || !mdata) {
- pr_err("Invalid mfd %p mdata %p\n", mfd, mdata);
+ pr_err("Invalid mfd %pK mdata %pK\n", mfd, mdata);
return -EPERM;
}
@@ -3054,7 +3054,7 @@ int mdss_mdp_pp_default_overlay_config(struct msm_fb_data_type *mfd,
int ret = 0;
if (!mfd || !pdata) {
- pr_err("Invalid parameters mfd %p pdata %p\n", mfd, pdata);
+ pr_err("Invalid parameters mfd %pK pdata %pK\n", mfd, pdata);
return -EINVAL;
}
@@ -3126,7 +3126,7 @@ static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
!ad->bl_att_lut) {
- pr_err("Invalid ad info: bl_mfd = 0x%p, ad->bl_mfd->panel_info = 0x%p, bl_att_lut = 0x%p\n",
+ pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
ad->bl_mfd,
(!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
ad->bl_att_lut);
@@ -3626,7 +3626,7 @@ int mdss_mdp_pcc_config(struct msm_fb_data_type *mfd,
if (pp_ops[PCC].pp_get_config) {
addr = mdss_mdp_get_dspp_addr_off(disp_num);
if (IS_ERR_OR_NULL(addr)) {
- pr_err("invalid dspp base_addr %p\n",
+ pr_err("invalid dspp base_addr %pK\n",
addr);
ret = -EINVAL;
goto pcc_clk_off;
@@ -4314,7 +4314,7 @@ int mdss_mdp_hist_lut_config(struct msm_fb_data_type *mfd,
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
base_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
if (IS_ERR_OR_NULL(base_addr)) {
- pr_err("invalid base addr %p\n",
+ pr_err("invalid base addr %pK\n",
base_addr);
ret = -EINVAL;
goto hist_lut_clk_off;
@@ -4568,7 +4568,7 @@ int mdss_mdp_gamut_config(struct msm_fb_data_type *mfd,
if (pp_ops[GAMUT].pp_get_config) {
addr = mdss_mdp_get_dspp_addr_off(disp_num);
if (IS_ERR_OR_NULL(addr)) {
- pr_err("invalid dspp base addr %p\n",
+ pr_err("invalid dspp base addr %pK\n",
addr);
ret = -EINVAL;
goto gamut_clk_off;
@@ -4754,7 +4754,7 @@ static int pp_hist_enable(struct pp_hist_col_info *hist_info,
spin_lock_irqsave(&hist_info->hist_lock, flag);
if (hist_info->col_en) {
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
- pr_err("%s Hist collection has already been enabled %p\n",
+ pr_err("%s Hist collection has already been enabled %pK\n",
__func__, hist_info->base);
ret = -EBUSY;
goto exit;
@@ -4903,7 +4903,7 @@ static int pp_hist_disable(struct pp_hist_col_info *hist_info)
spin_lock_irqsave(&hist_info->hist_lock, flag);
if (hist_info->col_en == false) {
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
- pr_debug("Histogram already disabled (%p)\n", hist_info->base);
+ pr_debug("Histogram already disabled (%pK)\n", hist_info->base);
ret = -EINVAL;
goto exit;
}
@@ -5000,7 +5000,7 @@ int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en)
unsigned long flag;
int ret = 0;
if (!intr) {
- pr_err("NULL addr passed, %p\n", intr);
+ pr_err("NULL addr passed, %pK\n", intr);
return -EINVAL;
}
@@ -5564,7 +5564,7 @@ static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
*ret_ad = NULL;
if (!mfd) {
- pr_err("invalid parameter mfd %p\n", mfd);
+ pr_err("invalid parameter mfd %pK\n", mfd);
return -EINVAL;
}
mdata = mfd_to_mdata(mfd);
@@ -6145,7 +6145,7 @@ static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd)
struct mdss_ad_info *ad;
if (!mfd) {
- pr_err("mfd = 0x%p\n", mfd);
+ pr_err("mfd = 0x%pK\n", mfd);
return -EINVAL;
}
@@ -6179,13 +6179,13 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
u32 width;
if (!mfd) {
- pr_err("mfd = 0x%p\n", mfd);
+ pr_err("mfd = 0x%pK\n", mfd);
return -EINVAL;
}
ctl = mfd_to_ctl(mfd);
if (!ctl) {
- pr_err("ctl = 0x%p\n", ctl);
+ pr_err("ctl = 0x%pK\n", ctl);
return -EINVAL;
}
sctl = mdss_mdp_get_split_ctl(ctl);
@@ -6385,7 +6385,7 @@ static void pp_ad_calc_worker(struct work_struct *work)
}
mdp5_data = mfd_to_mdp5_data(ad->mfd);
if (!mdp5_data) {
- pr_err("mdp5_data = 0x%p\n", mdp5_data);
+ pr_err("mdp5_data = 0x%pK\n", mdp5_data);
mutex_unlock(&ad->lock);
return;
}
@@ -6393,7 +6393,7 @@ static void pp_ad_calc_worker(struct work_struct *work)
ctl = mfd_to_ctl(ad->mfd);
mdata = mfd_to_mdata(ad->mfd);
if (!ctl || !mdata || ad->calc_hw_num >= mdata->nad_cfgs) {
- pr_err("ctl = 0x%p, mdata = 0x%p, ad->calc_hw_num = %d, mdata->nad_cfg = %d\n",
+ pr_err("ctl = 0x%pK, mdata = 0x%pK, ad->calc_hw_num = %d, mdata->nad_cfg = %d\n",
ctl, mdata, ad->calc_hw_num,
(!mdata ? 0 : mdata->nad_cfgs));
mutex_unlock(&ad->lock);
@@ -7006,7 +7006,7 @@ static int sspp_cache_location(u32 pipe_type, enum pp_config_block *block)
int ret = 0;
if (!block) {
- pr_err("invalid params %p\n", block);
+ pr_err("invalid params %pK\n", block);
return -EINVAL;
}
switch (pipe_type) {
@@ -7035,7 +7035,7 @@ int mdss_mdp_pp_sspp_config(struct mdss_mdp_pipe *pipe)
int ret = 0;
if (!pipe) {
- pr_err("invalid params, pipe %p\n", pipe);
+ pr_err("invalid params, pipe %pK\n", pipe);
return -EINVAL;
}
@@ -7157,7 +7157,7 @@ static int pp_update_pcc_pipe_setup(struct mdss_mdp_pipe *pipe, u32 location)
char __iomem *pipe_base = NULL;
if (!pipe) {
- pr_err("invalid param pipe %p\n", pipe);
+ pr_err("invalid param pipe %pK\n", pipe);
return -EINVAL;
}
@@ -7209,7 +7209,7 @@ int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version)
u32 ver_info = mdp_pp_legacy;
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
ret = -EINVAL;
goto exit_version;
}
@@ -7290,7 +7290,7 @@ int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
uint32_t ops;
if (!layer) {
- pr_err("invalid layer pointer passed %p\n", layer);
+ pr_err("invalid layer pointer passed %pK\n", layer);
return -EFAULT;
}
@@ -7302,7 +7302,7 @@ int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
ret = copy_from_user(pp_info, layer->pp_info,
sizeof(struct mdp_overlay_pp_params));
if (ret) {
- pr_err("layer list copy from user failed, pp_info = %p\n",
+ pr_err("layer list copy from user failed, pp_info = %pK\n",
layer->pp_info);
ret = -EFAULT;
goto exit_pp_info;
@@ -7435,7 +7435,7 @@ static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd)
int ret = 0;
if (!mdata || !mfd) {
- pr_err("invalid params mdata %p mfd %p\n", mdata, mfd);
+ pr_err("invalid params mdata %pK mfd %pK\n", mdata, mfd);
return -EINVAL;
}
if (!mdata->ad_calc_wq)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
index 882145d4ff6c..48235c5b85ba 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -112,7 +112,7 @@ static int pp_hist_lut_cache_params_v1_7(struct mdp_hist_lut_data *config,
int ret = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -122,7 +122,7 @@ static int pp_hist_lut_cache_params_v1_7(struct mdp_hist_lut_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
@@ -174,7 +174,7 @@ static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config,
int ret = 0;
if (!config || !pipe) {
- pr_err("Invalid param config %p pipe %p\n",
+ pr_err("Invalid param config %pK pipe %pK\n",
config, pipe);
return -EINVAL;
}
@@ -245,7 +245,7 @@ int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
int ret = 0;
if (!config || !res_cache) {
- pr_err("invalid param config %p res_cache %p\n",
+ pr_err("invalid param config %pK res_cache %pK\n",
config, res_cache);
return -EINVAL;
}
@@ -254,7 +254,7 @@ int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
return -EINVAL;
}
if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
- pr_err("NULL payload for block %d mdss_pp_res %p pipe_res %p\n",
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
res_cache->block, res_cache->mdss_pp_res,
res_cache->pipe_res);
return -EINVAL;
@@ -295,7 +295,7 @@ int pp_dither_cache_params_v1_7(struct mdp_dither_cfg_data *config,
struct mdp_dither_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -305,7 +305,7 @@ int pp_dither_cache_params_v1_7(struct mdp_dither_cfg_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
@@ -367,7 +367,7 @@ int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
{
int ret = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %pi pp_res %p\n",
+ pr_err("invalid param config %pi pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -396,7 +396,7 @@ static int pp_gamut_cache_params_v1_7(struct mdp_gamut_cfg_data *config,
int ret = 0, i = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -407,7 +407,7 @@ static int pp_gamut_cache_params_v1_7(struct mdp_gamut_cfg_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
res_cache = mdss_pp_res->pp_data_v1_7;
@@ -564,7 +564,7 @@ int pp_gamut_cache_params(struct mdp_gamut_cfg_data *config,
{
int ret = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -587,7 +587,7 @@ static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config,
struct mdp_pcc_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
if (!pipe || !config) {
- pr_err("invalid params pipe %p config %p\n", pipe, config);
+ pr_err("invalid params pipe %pK config %pK\n", pipe, config);
return -EINVAL;
}
@@ -645,7 +645,7 @@ static int pp_pcc_cache_params_v1_7(struct mdp_pcc_cfg_data *config,
struct mdp_pcc_data_v1_7 *v17_cache_data, v17_usr_config;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -656,7 +656,7 @@ static int pp_pcc_cache_params_v1_7(struct mdp_pcc_cfg_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
@@ -696,7 +696,7 @@ int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
{
int ret = 0;
if (!config || !res_cache) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, res_cache);
return -EINVAL;
}
@@ -705,7 +705,7 @@ int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
return -EINVAL;
}
if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
- pr_err("NULL payload for block %d mdss_pp_res %p pipe_res %p\n",
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
res_cache->block, res_cache->mdss_pp_res,
res_cache->pipe_res);
return -EINVAL;
@@ -744,7 +744,7 @@ static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
struct mdp_igc_lut_data_v1_7 *v17_cache_data, v17_usr_config;
u32 disp_num;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -754,7 +754,7 @@ static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
res_cache = mdss_pp_res->pp_data_v1_7;
@@ -790,7 +790,7 @@ static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
}
if (copy_from_kernel && (!v17_usr_config.c0_c1_data ||
!v17_usr_config.c2_data)) {
- pr_err("copy from kernel invalid params c0_c1_data %p c2_data %p\n",
+ pr_err("copy from kernel invalid params c0_c1_data %pK c2_data %pK\n",
v17_usr_config.c0_c1_data,
v17_usr_config.c2_data);
ret = -EINVAL;
@@ -846,7 +846,7 @@ static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
struct mdp_igc_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
int ret = 0, fix_up = 0, i = 0;
if (!config || !pipe) {
- pr_err("invalid param config %p pipe %p\n",
+ pr_err("invalid param config %pK pipe %pK\n",
config, pipe);
return -EINVAL;
}
@@ -874,7 +874,7 @@ static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
if (!v17_usr_config.c0_c1_data ||
!v17_usr_config.c2_data ||
v17_usr_config.len != IGC_LUT_ENTRIES) {
- pr_err("invalid c0_c1data %p c2_data %p tbl len %d\n",
+ pr_err("invalid c0_c1data %pK c2_data %pK tbl len %d\n",
v17_usr_config.c0_c1_data,
v17_usr_config.c2_data,
v17_usr_config.len);
@@ -968,7 +968,7 @@ int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
{
int ret = 0;
if (!config || !res_cache) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, res_cache);
return -EINVAL;
}
@@ -977,7 +977,7 @@ int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
return -EINVAL;
}
if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
- pr_err("NULL payload for block %d mdss_pp_res %p pipe_res %p\n",
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
res_cache->block, res_cache->mdss_pp_res,
res_cache->pipe_res);
ret = -EINVAL;
@@ -1127,7 +1127,7 @@ int pp_pgc_lut_cache_params(struct mdp_pgc_lut_data *config,
{
int ret = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -1152,7 +1152,7 @@ static int pp_pa_cache_params_v1_7(struct mdp_pa_v2_cfg_data *config,
int disp_num, ret = 0;
if (!config || !mdss_pp_res) {
- pr_err("Invalid param config %p pp_res %p\n",
+ pr_err("Invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -1164,7 +1164,7 @@ static int pp_pa_cache_params_v1_7(struct mdp_pa_v2_cfg_data *config,
}
if (!mdss_pp_res->pp_data_v1_7) {
- pr_err("Invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
+ pr_err("Invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
@@ -1252,7 +1252,7 @@ static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config,
int ret = 0;
if (!config || !pipe) {
- pr_err("Invalid param config %p pipe %p\n",
+ pr_err("Invalid param config %pK pipe %pK\n",
config, pipe);
return -EINVAL;
}
@@ -1308,7 +1308,7 @@ int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
{
int ret = 0;
if (!config || !res_cache) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, res_cache);
return -EINVAL;
}
@@ -1317,7 +1317,7 @@ int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
return -EINVAL;
}
if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
- pr_err("NULL payload for block %d mdss_pp_res %p pipe_res %p\n",
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
res_cache->block, res_cache->mdss_pp_res,
res_cache->pipe_res);
return -EINVAL;
@@ -1389,7 +1389,7 @@ int pp_copy_layer_igc_payload(struct mdp_overlay_pp_params *pp_info)
}
exit:
if (ret) {
- pr_err("layer list copy from user failed, IGC cfg payload = %p\n",
+ pr_err("layer list copy from user failed, IGC cfg payload = %pK\n",
pp_info->igc_cfg.cfg_payload);
ret = -EFAULT;
kfree(cfg_payload);
@@ -1419,7 +1419,7 @@ int pp_copy_layer_hist_lut_payload(struct mdp_overlay_pp_params *pp_info)
pp_info->hist_lut_cfg.cfg_payload,
sizeof(struct mdp_hist_lut_data_v1_7));
if (ret) {
- pr_err("layer list copy from user failed, Hist LUT cfg payload = %p\n",
+ pr_err("layer list copy from user failed, Hist LUT cfg payload = %pK\n",
pp_info->hist_lut_cfg.cfg_payload);
ret = -EFAULT;
kfree(cfg_payload);
@@ -1457,7 +1457,7 @@ int pp_copy_layer_pa_payload(struct mdp_overlay_pp_params *pp_info)
pp_info->pa_v2_cfg_data.cfg_payload,
sizeof(struct mdp_pa_data_v1_7));
if (ret) {
- pr_err("layer list copy from user failed, PA cfg payload = %p\n",
+ pr_err("layer list copy from user failed, PA cfg payload = %pK\n",
pp_info->pa_v2_cfg_data.cfg_payload);
ret = -EFAULT;
kfree(cfg_payload);
@@ -1495,7 +1495,7 @@ int pp_copy_layer_pcc_payload(struct mdp_overlay_pp_params *pp_info)
pp_info->pcc_cfg_data.cfg_payload,
sizeof(struct mdp_pcc_data_v1_7));
if (ret) {
- pr_err("layer list copy from user failed, PCC cfg payload = %p\n",
+ pr_err("layer list copy from user failed, PCC cfg payload = %pK\n",
pp_info->pcc_cfg_data.cfg_payload);
ret = -EFAULT;
kfree(cfg_payload);
@@ -1530,7 +1530,7 @@ static int pp_pa_dither_cache_params_v1_7(
return -EINVAL;
}
if (!mdss_pp_res || !mdss_pp_res->pp_data_v1_7) {
- pr_err("invalid param mdss_pp_res %p pp_data_res %p\n",
+ pr_err("invalid param mdss_pp_res %pK pp_data_res %pK\n",
mdss_pp_res,
((mdss_pp_res) ? mdss_pp_res->pp_data_v1_7 : NULL));
return -EINVAL;
@@ -1591,12 +1591,12 @@ int pp_pa_dither_cache_params(struct mdp_dither_cfg_data *config,
int ret = 0;
if (!config || !res_cache) {
- pr_err("invalid params config %p res_cache %p\n",
+ pr_err("invalid params config %pK res_cache %pK\n",
config, res_cache);
return -EINVAL;
}
if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
- pr_err("NULL payload for block %d mdss_pp_res %p pipe_res %p\n",
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
res_cache->block, res_cache->mdss_pp_res,
res_cache->pipe_res);
return -EINVAL;
@@ -1627,7 +1627,7 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
u32 disp_num, len = 0;
if (!config || !mdss_pp_res) {
- pr_err("invalid param config %p pp_res %p\n",
+ pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}
@@ -1637,7 +1637,7 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
return -EINVAL;
}
if (!mdss_pp_res->pp_data_v3) {
- pr_err("invalid pp_data_v3 %p\n", mdss_pp_res->pp_data_v3);
+ pr_err("invalid pp_data_v3 %pK\n", mdss_pp_res->pp_data_v3);
return -EINVAL;
}
if (config->ops & MDP_PP_OPS_READ) {
@@ -1653,7 +1653,7 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
res_cache = mdss_pp_res->pp_data_v3;
v3_cache_data = &res_cache->igc_v3_data[disp_num];
if (!v3_cache_data->c0_c1_data || !v3_cache_data->c2_data) {
- pr_err("invalid payload c0_c1_data %p c2_data %p\n",
+ pr_err("invalid payload c0_c1_data %pK c2_data %pK\n",
v3_cache_data->c0_c1_data, v3_cache_data->c2_data);
goto igc_config_exit;
}
@@ -1677,7 +1677,7 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
}
if (copy_from_kernel && (!v3_kernel_data->c0_c1_data ||
!v3_kernel_data->c2_data)) {
- pr_err("copy from kernel invalid params c0_c1_data %p c2_data %p\n",
+ pr_err("copy from kernel invalid params c0_c1_data %pK c2_data %pK\n",
v3_kernel_data->c0_c1_data,
v3_kernel_data->c2_data);
ret = -EINVAL;
@@ -1739,7 +1739,7 @@ static int pp_igc_lut_cache_params_pipe_v3(
u32 table_fmt = mdp_igc_rec_max, strength = 0;
if (!config || !pipe) {
- pr_err("invalid param config %p pipe %p\n",
+ pr_err("invalid param config %pK pipe %pK\n",
config, pipe);
return -EINVAL;
}
@@ -1796,7 +1796,7 @@ static int pp_igc_lut_cache_params_pipe_v3(
c0_c1_data = v3_cache_data->c0_c1_data;
c2_data = v3_cache_data->c2_data;
if (!c0_c1_data || !c2_data) {
- pr_err("invalid param c0_c1_data %p c2_data %p\n",
+ pr_err("invalid param c0_c1_data %pK c2_data %pK\n",
c0_c1_data, c2_data);
ret = -EINVAL;
goto igc_config_exit;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.c b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
index 7742b5e4ad0c..f3eccfe957f7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
@@ -20,7 +20,7 @@ void pp_pa_set_sts(struct pp_sts_type *pp_sts,
int enable_flag, int block_type)
{
if (!pp_sts) {
- pr_err("invalid input pp_sts %p\n", pp_sts);
+ pr_err("invalid input pp_sts %pK\n", pp_sts);
return;
}
@@ -34,7 +34,7 @@ void pp_pa_set_sts(struct pp_sts_type *pp_sts,
}
if (!pa_data) {
- pr_err("invalid input pa_data %p\n", pa_data);
+ pr_err("invalid input pa_data %pK\n", pa_data);
return;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_stub.c b/drivers/video/fbdev/msm/mdss_mdp_pp_stub.c
index 3ab6bdca4bd3..29480cb999da 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_stub.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_stub.c
@@ -37,7 +37,7 @@ void *pp_get_driver_ops_stub(struct mdp_pp_driver_ops *ops)
int i = 0;
if (!ops) {
- pr_err("PP driver ops invalid %p\n", ops);
+ pr_err("PP driver ops invalid %pK\n", ops);
return ERR_PTR(-EINVAL);
}
for (i = 0; i < PP_MAX_FEATURES; i++) {
@@ -86,7 +86,7 @@ static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
static int pp_get_hist_isr(u32 *isr_mask)
{
if (!isr_mask) {
- pr_err("invalid params isr_mask %p\n", isr_mask);
+ pr_err("invalid params isr_mask %pK\n", isr_mask);
return -EINVAL;
}
@@ -99,7 +99,7 @@ static int pp_get_hist_offset(u32 block, u32 *ctl_off)
int ret = 0;
if (!ctl_off) {
- pr_err("invalid params ctl_off %p\n", ctl_off);
+ pr_err("invalid params ctl_off %pK\n", ctl_off);
return -EINVAL;
}
*ctl_off = U32_MAX;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
index 1470915a1253..1e4adc984802 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -246,7 +246,7 @@ static void pp_gamut_clock_gating_en(char __iomem *base_addr);
void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops)
{
if (!ops) {
- pr_err("PP driver ops invalid %p\n", ops);
+ pr_err("PP driver ops invalid %pK\n", ops);
return ERR_PTR(-EINVAL);
}
@@ -308,7 +308,7 @@ static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
u32 *opmode, int side)
{
if (!pp_sts || !opmode) {
- pr_err("Invalid pp_sts %p or opmode %p\n", pp_sts, opmode);
+ pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
return;
}
switch (location) {
@@ -362,7 +362,7 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
struct mdp_hist_lut_data *lut_cfg_data = NULL;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -374,7 +374,7 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
}
if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
!lut_cfg_data->cfg_payload) {
- pr_err("invalid hist_lut version %d payload %p\n",
+ pr_err("invalid hist_lut version %d payload %pK\n",
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
@@ -439,7 +439,7 @@ static int pp_hist_lut_set_config(char __iomem *base_addr,
char __iomem *hist_addr = NULL, *swap_addr = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -465,12 +465,12 @@ static int pp_hist_lut_set_config(char __iomem *base_addr,
}
lut_data = lut_cfg_data->cfg_payload;
if (!lut_data) {
- pr_err("invalid hist_lut cfg_payload %p\n", lut_data);
+ pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
return -EINVAL;
}
if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
- pr_err("invalid hist_lut len %d data %p\n",
+ pr_err("invalid hist_lut len %d data %pK\n",
lut_data->len, lut_data->data);
return -EINVAL;
}
@@ -535,7 +535,7 @@ static int pp_dither_set_config(char __iomem *base_addr,
uint32_t *pdata = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -563,7 +563,7 @@ static int pp_dither_set_config(char __iomem *base_addr,
dither_data = dither_cfg_data->cfg_payload;
if (!dither_data) {
- pr_err("invalid payload for dither %p\n", dither_data);
+ pr_err("invalid payload for dither %pK\n", dither_data);
return -EINVAL;
}
@@ -611,7 +611,7 @@ static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
struct pp_hist_col_info *hist_info = NULL;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -649,7 +649,7 @@ static int pp_get_hist_offset(u32 block, u32 *ctl_off)
int ret = 0;
if (!ctl_off) {
- pr_err("invalid params ctl_off %p\n", ctl_off);
+ pr_err("invalid params ctl_off %pK\n", ctl_off);
return -EINVAL;
}
switch (block) {
@@ -670,7 +670,7 @@ static int pp_get_hist_offset(u32 block, u32 *ctl_off)
static int pp_get_hist_isr(u32 *isr_mask)
{
if (!isr_mask) {
- pr_err("invalid params isr_mask %p\n", isr_mask);
+ pr_err("invalid params isr_mask %pK\n", isr_mask);
return -EINVAL;
}
@@ -696,7 +696,7 @@ static int pp_gamut_get_config(char __iomem *base_addr, void *cfg_data,
u32 clk_gate_disable = 0;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -834,7 +834,7 @@ static int pp_gamut_set_config(char __iomem *base_addr,
struct mdp_gamut_data_v1_7 *gamut_data = NULL;
char __iomem *base_addr_scale = base_addr;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -856,7 +856,7 @@ static int pp_gamut_set_config(char __iomem *base_addr,
gamut_data = (struct mdp_gamut_data_v1_7 *)
gamut_cfg_data->cfg_payload;
if (!gamut_data) {
- pr_err("invalid payload for gamut %p\n", gamut_data);
+ pr_err("invalid payload for gamut %pK\n", gamut_data);
return -EINVAL;
}
@@ -875,7 +875,7 @@ static int pp_gamut_set_config(char __iomem *base_addr,
for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
if (!gamut_data->c0_data[i] || !gamut_data->c1_c2_data[i]
|| (gamut_data->tbl_size[i] != tbl_sz)) {
- pr_err("invalid param for c0 %p c1c2 %p table %d size %d expected sz %d\n",
+ pr_err("invalid param for c0 %pK c1c2 %pK table %d size %d expected sz %d\n",
gamut_data->c0_data[i],
gamut_data->c1_c2_data[i], i,
gamut_data->tbl_size[i], tbl_sz);
@@ -886,7 +886,7 @@ static int pp_gamut_set_config(char __iomem *base_addr,
(!gamut_data->scale_off_data[i] ||
(gamut_data->tbl_scale_off_sz[i] !=
MDP_GAMUT_SCALE_OFF_SZ))) {
- pr_err("invalid param for scale table %p for c%d size %d expected size%d\n",
+ pr_err("invalid param for scale table %pK for c%d size %d expected size%d\n",
gamut_data->scale_off_data[i], i,
gamut_data->tbl_scale_off_sz[i],
MDP_GAMUT_SCALE_OFF_SZ);
@@ -951,7 +951,7 @@ static int pp_pcc_set_config(char __iomem *base_addr,
u32 opmode = 0;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -966,7 +966,7 @@ static int pp_pcc_set_config(char __iomem *base_addr,
}
pcc_data = pcc_cfg_data->cfg_payload;
if (!pcc_data) {
- pr_err("invalid payload for pcc %p\n", pcc_data);
+ pr_err("invalid payload for pcc %pK\n", pcc_data);
return -EINVAL;
}
@@ -1036,7 +1036,7 @@ static int pp_pcc_get_config(char __iomem *base_addr, void *cfg_data,
struct mdp_pcc_data_v1_7 pcc_data;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -1233,7 +1233,7 @@ static void pp_pa_set_six_zone(char __iomem *base_addr,
if (!pa_data->six_zone_len || !pa_data->six_zone_curve_p0 ||
!pa_data->six_zone_curve_p1) {
- pr_err("Invalid six zone data: len %d curve_p0 %p curve_p1 %p\n",
+ pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
pa_data->six_zone_len,
pa_data->six_zone_curve_p0,
pa_data->six_zone_curve_p1);
@@ -1283,7 +1283,7 @@ static int pp_pa_set_config(char __iomem *base_addr,
int ret = 0;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -1308,7 +1308,7 @@ static int pp_pa_set_config(char __iomem *base_addr,
pa_data = pa_cfg_data->cfg_payload;
if (!pa_data) {
- pr_err("invalid payload for pa %p\n", pa_data);
+ pr_err("invalid payload for pa %pK\n", pa_data);
return -EINVAL;
}
@@ -1557,7 +1557,7 @@ static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
char __iomem *pa_hold_addr = NULL;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -1690,7 +1690,7 @@ static int pp_igc_set_config(char __iomem *base_addr,
u32 data;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -1698,7 +1698,7 @@ static int pp_igc_set_config(char __iomem *base_addr,
lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
if (lut_cfg_data->version != mdp_igc_v1_7 ||
!lut_cfg_data->cfg_payload) {
- pr_err_once("invalid igc version %d payload %p\n",
+ pr_err_once("invalid igc version %d payload %pK\n",
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
@@ -1717,7 +1717,7 @@ static int pp_igc_set_config(char __iomem *base_addr,
lut_data = lut_cfg_data->cfg_payload;
if (lut_data->len != IGC_LUT_ENTRIES || !lut_data->c0_c1_data ||
!lut_data->c2_data) {
- pr_err("invalid lut len %d c0_c1_data %p c2_data %p\n",
+ pr_err("invalid lut len %d c0_c1_data %pK c2_data %pK\n",
lut_data->len, lut_data->c0_c1_data, lut_data->c2_data);
return -EINVAL;
}
@@ -1784,7 +1784,7 @@ static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
u32 data = 0, sz = 0;
if (!base_addr || !cfg_data || block_type != DSPP) {
- pr_err("invalid params base_addr %p cfg_data %p block_type %d\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
base_addr, cfg_data, block_type);
return -EINVAL;
}
@@ -1796,7 +1796,7 @@ static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
if (lut_cfg_data->version != mdp_igc_v1_7 ||
!lut_cfg_data->cfg_payload ||
lut_cfg_data->block > IGC_MASK_MAX) {
- pr_err("invalid igc version %d payload %p block %d\n",
+ pr_err("invalid igc version %d payload %pK block %d\n",
lut_cfg_data->version, lut_cfg_data->cfg_payload,
lut_cfg_data->block);
ret = -EINVAL;
@@ -1861,7 +1861,7 @@ static int pp_pgc_set_config(char __iomem *base_addr,
struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -1887,13 +1887,13 @@ static int pp_pgc_set_config(char __iomem *base_addr,
pgc_data_v17 = (struct mdp_pgc_lut_data_v1_7 *) pgc_data->cfg_payload;
if (!pgc_data_v17) {
- pr_err("invalid payload for GC %p\n", pgc_data_v17);
+ pr_err("invalid payload for GC %pK\n", pgc_data_v17);
return -EINVAL;
}
if (pgc_data_v17->len != PGC_LUT_ENTRIES || !pgc_data_v17->c0_data ||
!pgc_data_v17->c1_data || !pgc_data_v17->c2_data) {
- pr_err("Invalid params entries %d c0_data %p c1_data %p c2_data %p\n",
+ pr_err("Invalid params entries %d c0_data %pK c1_data %pK c2_data %pK\n",
pgc_data_v17->len, pgc_data_v17->c0_data,
pgc_data_v17->c1_data, pgc_data_v17->c2_data);
return -EINVAL;
@@ -1948,7 +1948,7 @@ static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
struct mdp_pgc_lut_data *pgc_data = NULL;
struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = NULL;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p block_type %d\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
base_addr, cfg_data, block_type);
return -EINVAL;
}
@@ -1956,7 +1956,7 @@ static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
pgc_data_v17 = (struct mdp_pgc_lut_data_v1_7 *)
pgc_data->cfg_payload;
if (pgc_data->version != mdp_pgc_v1_7 || !pgc_data_v17) {
- pr_err("invalid pgc version %d payload %p\n",
+ pr_err("invalid pgc version %d payload %pK\n",
pgc_data->version, pgc_data_v17);
return -EINVAL;
}
@@ -2018,7 +2018,7 @@ static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
static int pp_pcc_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_pcc_v1_7;
@@ -2028,7 +2028,7 @@ static int pp_pcc_get_version(u32 *version)
static int pp_igc_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_igc_v1_7;
@@ -2038,7 +2038,7 @@ static int pp_igc_get_version(u32 *version)
static int pp_pgc_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_pgc_v1_7;
@@ -2048,7 +2048,7 @@ static int pp_pgc_get_version(u32 *version)
static int pp_pa_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_pa_v1_7;
@@ -2058,7 +2058,7 @@ static int pp_pa_get_version(u32 *version)
static int pp_gamut_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_gamut_v1_7;
@@ -2068,7 +2068,7 @@ static int pp_gamut_get_version(u32 *version)
static int pp_dither_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_dither_v1_7;
@@ -2078,7 +2078,7 @@ static int pp_dither_get_version(u32 *version)
static int pp_hist_lut_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_hist_lut_v1_7;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
index 88407b3d920b..25cb94f89dd5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
@@ -158,7 +158,7 @@ static int pp_driver_init(struct mdp_pp_driver_ops *ops)
void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops)
{
if (!ops) {
- pr_err("PP driver ops invalid %p\n", ops);
+ pr_err("PP driver ops invalid %pK\n", ops);
return ERR_PTR(-EINVAL);
}
@@ -207,7 +207,7 @@ static int pp_get_hist_offset(u32 block, u32 *ctl_off)
int ret = 0;
if (!ctl_off) {
- pr_err("invalid params ctl_off %p\n", ctl_off);
+ pr_err("invalid params ctl_off %pK\n", ctl_off);
return -EINVAL;
}
@@ -233,7 +233,7 @@ static int pp_hist_set_config(char __iomem *base_addr,
struct pp_hist_col_info *hist_info = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -269,7 +269,7 @@ static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
char __iomem *hist_addr;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -302,7 +302,7 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
struct mdp_hist_lut_data *lut_cfg_data = NULL;
if (!base_addr || !cfg_data) {
- pr_err("invalid params base_addr %p cfg_data %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
base_addr, cfg_data);
return -EINVAL;
}
@@ -319,7 +319,7 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
}
if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
!lut_cfg_data->cfg_payload) {
- pr_err("invalid hist_lut version %d payload %p\n",
+ pr_err("invalid hist_lut version %d payload %pK\n",
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
@@ -366,7 +366,7 @@ static int pp_hist_lut_set_config(char __iomem *base_addr,
char __iomem *hist_lut_addr = NULL, *swap_addr = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -393,12 +393,12 @@ static int pp_hist_lut_set_config(char __iomem *base_addr,
}
lut_data = lut_cfg_data->cfg_payload;
if (!lut_data) {
- pr_err("invalid hist_lut cfg_payload %p\n", lut_data);
+ pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
return -EINVAL;
}
if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
- pr_err("invalid hist_lut len %d data %p\n",
+ pr_err("invalid hist_lut len %d data %pK\n",
lut_data->len, lut_data->data);
return -EINVAL;
}
@@ -435,7 +435,7 @@ hist_lut_set_sts:
static int pp_hist_lut_get_version(u32 *version)
{
if (!version) {
- pr_err("invalid param version %p\n", version);
+ pr_err("invalid param version %pK\n", version);
return -EINVAL;
}
*version = mdp_hist_lut_v1_7;
@@ -448,7 +448,7 @@ static void pp_hist_lut_opmode_config(char __iomem *base_addr,
u32 opmode = 0;
if (!base_addr || !pp_sts) {
- pr_err("invalid params base_addr %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK pp_sts_type %pK\n",
base_addr, pp_sts);
return;
}
@@ -477,7 +477,7 @@ static int pp_pa_set_config(char __iomem *base_addr,
char __iomem *block_addr = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -508,7 +508,7 @@ static int pp_pa_set_config(char __iomem *base_addr,
pa_data = pa_cfg_data->cfg_payload;
if (!pa_data) {
- pr_err("invalid payload for pa %p\n", pa_data);
+ pr_err("invalid payload for pa %pK\n", pa_data);
return -EINVAL;
}
@@ -557,7 +557,7 @@ static int pp_dither_set_config(char __iomem *base_addr,
char __iomem *dither_opmode = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -586,7 +586,7 @@ static int pp_dither_set_config(char __iomem *base_addr,
dither_data = dither_cfg_data->cfg_payload;
if (!dither_data) {
- pr_err("invalid payload for dither %p\n", dither_data);
+ pr_err("invalid payload for dither %pK\n", dither_data);
return -EINVAL;
}
@@ -641,7 +641,7 @@ static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
u32 *opmode, int side)
{
if (!pp_sts || !opmode) {
- pr_err("Invalid pp_sts %p or opmode %p\n", pp_sts, opmode);
+ pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
return;
}
switch (location) {
@@ -775,7 +775,7 @@ static void pp_pa_set_six_zone(char __iomem *base_addr,
if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE ||
!pa_data->six_zone_curve_p0 ||
!pa_data->six_zone_curve_p1) {
- pr_err("Invalid six zone data: len %d curve_p0 %p curve_p1 %p\n",
+ pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
pa_data->six_zone_len,
pa_data->six_zone_curve_p0,
pa_data->six_zone_curve_p1);
@@ -888,7 +888,7 @@ static int pp_pa_dither_set_config(char __iomem *base_addr,
char __iomem *opmode_addr = NULL, *matrix_addr = NULL;
if (!base_addr || !cfg_data || !pp_sts) {
- pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
return -EINVAL;
}
@@ -954,7 +954,7 @@ static int pp_igc_dither_set_strength(char __iomem *base_addr,
if (!base_addr || !cfg_data || (block_type != DSPP) || !pp_sts
|| (lut_cfg_data->version != mdp_igc_v3)) {
- pr_err("invalid params base_addr %p cfg_data %p block_type %d igc version %d\n",
+ pr_err("invalid params base_addr %pK cfg_data %pK block_type %d igc version %d\n",
base_addr, cfg_data, block_type, (lut_cfg_data ?
lut_cfg_data->version : mdp_pp_unknown));
return -EINVAL;
@@ -984,7 +984,7 @@ static int pp_igc_set_config(char __iomem *base_addr,
int ret = 0;
if (!base_addr || !pp_sts || !cfg_data || !config_data.igc_set_config) {
- pr_err("invalid payload base_addr %p pp_sts %p cfg_data %p igc_set_config %p\n",
+ pr_err("invalid payload base_addr %pK pp_sts %pK cfg_data %pK igc_set_config %pK\n",
base_addr, pp_sts, cfg_data,
config_data.igc_set_config);
return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index 7f58b8203713..8b0ebc3fdf05 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -939,7 +939,7 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
pr_debug("pmem buf=0x%pa\n", &data->addr);
memset(&data->srcp_f, 0, sizeof(struct fd));
} else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
- pr_debug("ion hdl=%p buf=0x%pa\n", data->srcp_dma_buf,
+ pr_debug("ion hdl=%pK buf=0x%pa\n", data->srcp_dma_buf,
&data->addr);
if (!iclient) {
pr_err("invalid ion client\n");
@@ -1104,8 +1104,9 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
data->addr += data->offset;
data->len -= data->offset;
- pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%lx\n", img->memory_id,
- data->srcp_dma_buf, &data->addr, data->len);
+ pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
+ img->memory_id, data->srcp_dma_buf,
+ &data->addr, data->len);
} else {
mdss_mdp_put_img(data, rotator, dir);
return ret ? : -EOVERFLOW;
@@ -1169,7 +1170,7 @@ static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
data->addr += data->offset;
data->len -= data->offset;
- pr_debug("ihdl=%p buf=0x%pa len=0x%lx\n",
+ pr_debug("ihdl=%pK buf=0x%pa len=0x%lx\n",
data->srcp_dma_buf, &data->addr, data->len);
} else {
mdss_mdp_put_img(data, rotator, dir);
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index bde137269422..1137c4475cab 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -900,6 +900,15 @@ static inline bool is_dsc_compression(struct mdss_panel_info *pinfo)
return false;
}
+static inline bool is_lm_configs_dsc_compatible(struct mdss_panel_info *pinfo,
+ u32 width, u32 height)
+{
+ if ((width % pinfo->dsc.slice_width) ||
+ (height % pinfo->dsc.slice_height))
+ return false;
+ return true;
+}
+
int mdss_register_panel(struct platform_device *pdev,
struct mdss_panel_data *pdata);
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
index db318de6fc6d..d2610ff80878 100644
--- a/drivers/video/fbdev/msm/mdss_util.c
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -33,7 +33,7 @@ int mdss_register_irq(struct mdss_hw *hw)
if (!mdss_irq_handlers[hw->hw_ndx])
mdss_irq_handlers[hw->hw_ndx] = hw;
else
- pr_err("panel %d's irq at %p is already registered\n",
+ pr_err("panel %d's irq at %pK is already registered\n",
hw->hw_ndx, hw->irq_handler);
spin_unlock_irqrestore(&mdss_lock, irq_flags);
diff --git a/drivers/video/fbdev/msm/mhl3/mhl_linux_tx.c b/drivers/video/fbdev/msm/mhl3/mhl_linux_tx.c
index 1514f021414a..04ba7a00bb2b 100644
--- a/drivers/video/fbdev/msm/mhl3/mhl_linux_tx.c
+++ b/drivers/video/fbdev/msm/mhl3/mhl_linux_tx.c
@@ -5599,7 +5599,7 @@ static int is_timer_handle_valid(struct mhl_dev_context *dev_context,
}
if (timer != timer_handle) {
- MHL_TX_DBG_WARN("Invalid timer handle %p received\n",
+ MHL_TX_DBG_WARN("Invalid timer handle %pK received\n",
timer_handle);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mhl3/mhl_supp.c b/drivers/video/fbdev/msm/mhl3/mhl_supp.c
index 7055d8cd758d..29de6d0b6401 100644
--- a/drivers/video/fbdev/msm/mhl3/mhl_supp.c
+++ b/drivers/video/fbdev/msm/mhl3/mhl_supp.c
@@ -185,7 +185,7 @@ static struct cbus_req *get_free_cbus_queue_entry_impl(
req->function = function;
req->line = line;
req->sequence = dev_context->sequence++;
- /*MHL_TX_DBG_ERR(,"q %d get:0x%p %s:%d\n",
+ /*MHL_TX_DBG_ERR(,"q %d get:0x%pK %s:%d\n",
req->sequence,req,function,line); */
return req;
}
@@ -197,7 +197,7 @@ static void return_cbus_queue_entry_impl(struct mhl_dev_context *dev_context,
struct cbus_req *pReq,
const char *function, int line)
{
- /* MHL_TX_DBG_ERR(,"q ret:0x%p %s:%d\n",pReq,function,line); */
+ /* MHL_TX_DBG_ERR(,"q ret:0x%pK %s:%d\n",pReq,function,line); */
list_add(&pReq->link, &dev_context->cbus_free_list);
}
@@ -372,7 +372,7 @@ static struct block_req *start_new_block_marshalling_req_impl(
sizeof(payload->as_bytes) -
sizeof(struct SI_PACK_THIS_STRUCT standard_transport_header_t);
dev_context->block_protocol.marshalling_req = req;
- MHL_TX_DBG_WARN("q %d get:0x%p %s:%d\n", req->sequence, req, function,
+ MHL_TX_DBG_WARN("q %d get:0x%pK %s:%d\n", req->sequence, req, function,
line);
return req;
}
@@ -384,7 +384,7 @@ static void return_block_queue_entry_impl(struct mhl_dev_context *dev_context,
struct block_req *pReq,
const char *function, int line)
{
- /* MHL_TX_DBG_ERR(,"q ret:0x%p %s:%d\n",pReq,function,line); */
+ /* MHL_TX_DBG_ERR(,"q ret:0x%pK %s:%d\n",pReq,function,line); */
list_add(&pReq->link, &dev_context->block_protocol.free_list);
}
@@ -1283,7 +1283,7 @@ void si_mhl_tx_drive_states(struct mhl_dev_context *dev_context)
if (req == NULL)
return;
- MHL_TX_DBG_INFO("req: %p\n", req);
+ MHL_TX_DBG_INFO("req: %pK\n", req);
/* coordinate write burst requests and grants. */
if (MHL_MSC_MSG == req->command) {
dev_context->msc_msg_last_data = req->msg_data[1];
@@ -1298,7 +1298,7 @@ void si_mhl_tx_drive_states(struct mhl_dev_context *dev_context)
}
}
- MHL_TX_DBG_INFO("req: %p\n", req);
+ MHL_TX_DBG_INFO("req: %pK\n", req);
if (req) {
uint8_t ret_val;
dev_context->current_cbus_req = req;
diff --git a/drivers/video/fbdev/msm/mhl3/platform.c b/drivers/video/fbdev/msm/mhl3/platform.c
index c0e5174880b8..b0c7e8aabb23 100644
--- a/drivers/video/fbdev/msm/mhl3/platform.c
+++ b/drivers/video/fbdev/msm/mhl3/platform.c
@@ -1590,7 +1590,7 @@ static int __devinit si_8620_mhl_tx_i2c_probe(struct i2c_client *client,
{
int ret;
- pr_info("%s(), i2c_device_id = %p\n", __func__, id);
+ pr_info("%s(), i2c_device_id = %pK\n", __func__, id);
#if defined(SIMG_USE_DTS)
/*
@@ -1844,7 +1844,7 @@ static int __devinit si_8620_mhl_tx_spi_probe(struct spi_device *spi)
{
int ret;
- pr_info("%s(), spi = %p\n", __func__, spi);
+ pr_info("%s(), spi = %pK\n", __func__, spi);
spi->bits_per_word = 8;
spi_dev = spi;
spi_bus_num = spi->master->bus_num;
@@ -2161,7 +2161,7 @@ static void __exit si_8620_exit(void)
for (idx = 0; idx < ARRAY_SIZE(device_addresses); idx++) {
MHL_TX_DBG_INFO("\n");
if (device_addresses[idx].client != NULL) {
- MHL_TX_DBG_INFO("unregistering device:%p\n",
+ MHL_TX_DBG_INFO("unregistering device:%pK\n",
device_addresses[idx].client);
i2c_unregister_device(device_addresses[idx].
client);
diff --git a/drivers/video/fbdev/msm/mhl3/si_8620_drv.c b/drivers/video/fbdev/msm/mhl3/si_8620_drv.c
index dd71f1becd1e..9d68f285d581 100644
--- a/drivers/video/fbdev/msm/mhl3/si_8620_drv.c
+++ b/drivers/video/fbdev/msm/mhl3/si_8620_drv.c
@@ -2367,7 +2367,7 @@ int si_mhl_tx_drv_get_edid_fifo_partial_block(struct drv_hw_context *hw_context,
offset = EDID_BLOCK_SIZE * (hw_context->edid_fifo_block_number & 0x01);
offset += start;
- MHL_TX_DBG_INFO("%p %p\n", hw_context, edid_buf);
+ MHL_TX_DBG_INFO("%pK %pK\n", hw_context, edid_buf);
if (EDID_BLOCK_SIZE == (offset + length))
hw_context->edid_fifo_block_number++;
@@ -2401,7 +2401,7 @@ int si_mhl_tx_drv_get_edid_fifo_next_block(struct drv_hw_context *hw_context,
offset = EDID_BLOCK_SIZE * (hw_context->edid_fifo_block_number & 0x01);
- MHL_TX_DBG_INFO("%p %p\n", hw_context, edid_buf);
+ MHL_TX_DBG_INFO("%pK %pK\n", hw_context, edid_buf);
hw_context->edid_fifo_block_number++;
#ifdef MANUAL_EDID_FETCH
diff --git a/drivers/video/fbdev/msm/mhl3/si_emsc_hid.c b/drivers/video/fbdev/msm/mhl3/si_emsc_hid.c
index 17d33c99ef54..51e2eda2827e 100644
--- a/drivers/video/fbdev/msm/mhl3/si_emsc_hid.c
+++ b/drivers/video/fbdev/msm/mhl3/si_emsc_hid.c
@@ -461,7 +461,7 @@ static int mhl3_send_ack(struct mhl3_hid_data *mhid, uint8_t reason)
return -ENODEV;
MHL3_HID_DBG_WARN("%s - HID_ACK reason code: %02X\n", __func__, reason);
- MHL3_HID_DBG_ERR("mhid->mdev: %p\n", mhid->mdev);
+ MHL3_HID_DBG_ERR("mhid->mdev: %pK\n", mhid->mdev);
mhid->out_data[0] = MHL3_HID_ACK;
mhid->out_data[1] = reason;
@@ -1089,7 +1089,7 @@ mhid_cleanup:
mhl3_send_ack(mhid, HID_ACK_NODEV);
mhid->flags |= HID_FLAGS_WQ_CANCEL;
- MHL3_HID_DBG_ERR("WORK QUEUE function FAIL - mhid: %p\n", mhid);
+ MHL3_HID_DBG_ERR("WORK QUEUE function FAIL - mhid: %pK\n", mhid);
mhl3_disconnect_and_destroy_hid_device(mhid);
/*
diff --git a/drivers/video/fbdev/msm/mhl3/si_mdt_inputdev.c b/drivers/video/fbdev/msm/mhl3/si_mdt_inputdev.c
index 13d2a08831af..926ab6c53e74 100644
--- a/drivers/video/fbdev/msm/mhl3/si_mdt_inputdev.c
+++ b/drivers/video/fbdev/msm/mhl3/si_mdt_inputdev.c
@@ -80,10 +80,11 @@ static void destroy_mouse(struct mhl_dev_context *dev_context)
if (dev_context->mdt_devs.dev_mouse == NULL)
return;
- MHL_TX_DBG_INFO("Unregistering mouse: %p\n",
+ MHL_TX_DBG_INFO("Unregistering mouse: %pK\n",
dev_context->mdt_devs.dev_mouse);
input_unregister_device(dev_context->mdt_devs.dev_mouse);
- MHL_TX_DBG_INFO("Freeing mouse: %p\n", dev_context->mdt_devs.dev_mouse);
+ MHL_TX_DBG_INFO("Freeing mouse: %pK\n",
+ dev_context->mdt_devs.dev_mouse);
input_free_device(dev_context->mdt_devs.dev_mouse);
dev_context->mdt_devs.dev_mouse = NULL;
}
@@ -93,10 +94,10 @@ static void destroy_keyboard(struct mhl_dev_context *dev_context)
if (dev_context->mdt_devs.dev_keyboard == NULL)
return;
- MHL_TX_DBG_INFO("Unregistering keyboard: %p\n",
+ MHL_TX_DBG_INFO("Unregistering keyboard: %pK\n",
dev_context->mdt_devs.dev_keyboard);
input_unregister_device(dev_context->mdt_devs.dev_keyboard);
- MHL_TX_DBG_INFO("Freeing keyboard: %p\n",
+ MHL_TX_DBG_INFO("Freeing keyboard: %pK\n",
dev_context->mdt_devs.dev_keyboard);
input_free_device(dev_context->mdt_devs.dev_keyboard);
dev_context->mdt_devs.dev_keyboard = NULL;
@@ -107,10 +108,10 @@ static void destroy_touchscreen(struct mhl_dev_context *dev_context)
if (dev_context->mdt_devs.dev_touchscreen == NULL)
return;
- MHL_TX_DBG_INFO("Unregistering mouse: %p\n",
+ MHL_TX_DBG_INFO("Unregistering mouse: %pK\n",
dev_context->mdt_devs.dev_touchscreen);
input_unregister_device(dev_context->mdt_devs.dev_touchscreen);
- MHL_TX_DBG_INFO("Freeing mouse: %p\n",
+ MHL_TX_DBG_INFO("Freeing mouse: %pK\n",
dev_context->mdt_devs.dev_touchscreen);
input_free_device(dev_context->mdt_devs.dev_touchscreen);
dev_context->mdt_devs.dev_touchscreen = NULL;
@@ -130,7 +131,7 @@ int init_mdt_keyboard(struct mhl_dev_context *dev_context)
MHL_TX_DBG_ERR("Not enough memory\n");
return -ENOMEM;
}
- MHL_TX_DBG_INFO("Allocated keyboard: %p\n", dev_keyboard);
+ MHL_TX_DBG_INFO("Allocated keyboard: %pK\n", dev_keyboard);
set_bit(EV_KEY, dev_keyboard->evbit);
set_bit(EV_REP, dev_keyboard->evbit);
@@ -158,7 +159,7 @@ int init_mdt_keyboard(struct mhl_dev_context *dev_context)
return error;
}
- MHL_TX_DBG_INFO("Registered keyboard: %p\n", dev_keyboard);
+ MHL_TX_DBG_INFO("Registered keyboard: %pK\n", dev_keyboard);
dev_context->mdt_devs.dev_keyboard = dev_keyboard;
@@ -175,7 +176,7 @@ int init_mdt_mouse(struct mhl_dev_context *dev_context)
MHL_TX_DBG_ERR("Not enough memory\n");
return -ENOMEM;
}
- MHL_TX_DBG_INFO("Allocated mouse: %p\n", dev_mouse);
+ MHL_TX_DBG_INFO("Allocated mouse: %pK\n", dev_mouse);
set_bit(EV_REL, dev_mouse->evbit);
set_bit(EV_KEY, dev_mouse->evbit);
@@ -208,7 +209,7 @@ int init_mdt_mouse(struct mhl_dev_context *dev_context)
return error;
}
- MHL_TX_DBG_INFO("Registered mouse: %p\n", dev_mouse);
+ MHL_TX_DBG_INFO("Registered mouse: %pK\n", dev_mouse);
dev_context->mdt_devs.dev_mouse = dev_mouse;
@@ -226,7 +227,7 @@ int init_mdt_touchscreen(struct mhl_dev_context *dev_context)
return -ENOMEM;
}
- MHL_TX_DBG_INFO("Allocated touch screen: %p\n", dev_touchscreen);
+ MHL_TX_DBG_INFO("Allocated touch screen: %pK\n", dev_touchscreen);
#if !defined(SINGLE_TOUCH) && defined(KERNEL_2_6_38_AND_LATER)
input_mt_init_slots(dev_touchscreen, MAX_TOUCH_CONTACTS);
@@ -301,7 +302,7 @@ int init_mdt_touchscreen(struct mhl_dev_context *dev_context)
input_free_device(dev_touchscreen);
return error;
}
- MHL_TX_DBG_INFO("Registered touchscreen: %p\n", dev_touchscreen);
+ MHL_TX_DBG_INFO("Registered touchscreen: %pK\n", dev_touchscreen);
dev_context->mdt_devs.dev_touchscreen = dev_touchscreen;
diff --git a/drivers/video/fbdev/msm/mhl3/si_mhl2_edid_3d.c b/drivers/video/fbdev/msm/mhl3/si_mhl2_edid_3d.c
index fd6918fbf1ff..20d48575f323 100644
--- a/drivers/video/fbdev/msm/mhl3/si_mhl2_edid_3d.c
+++ b/drivers/video/fbdev/msm/mhl3/si_mhl2_edid_3d.c
@@ -1133,7 +1133,7 @@ static void tx_prune_dtd_list(struct edid_3d_data_t *mhl_edid_3d_data,
* one by one
*/
MHL_TX_EDID_INFO(
- "p_desc:%p p_next_desc:%p\n",
+ "p_desc:%pK p_next_desc:%pK\n",
p_desc, p_next_desc)
*p_desc++ = *p_next_desc++;
}
@@ -1144,7 +1144,7 @@ static void tx_prune_dtd_list(struct edid_3d_data_t *mhl_edid_3d_data,
p_desc = p_holder;
} else {
p_desc++;
- MHL_TX_EDID_INFO("p_desc:%p\n", p_desc)
+ MHL_TX_EDID_INFO("p_desc:%pK\n", p_desc)
}
}
}
@@ -1446,7 +1446,7 @@ static bool si_mhl_tx_parse_detailed_timing_descriptor(
* Mark this mode for pruning by setting
* horizontal active to zero
*/
- MHL_TX_DBG_ERR("%smark for pruning%s %p\n",
+ MHL_TX_DBG_ERR("%smark for pruning%s %pK\n",
ANSI_ESC_YELLOW_TEXT,
ANSI_ESC_RESET_TEXT,
p_desc);
@@ -1500,7 +1500,7 @@ static uint8_t si_mhl_tx_parse_861_long_descriptors(
++mhl_edid_3d_data->parse_data.
num_cea_861_timing_dtds;
} else if (valid) {
- MHL_TX_EDID_INFO("stopping at %p\n",
+ MHL_TX_EDID_INFO("stopping at %pK\n",
p_data_u.p_long_descriptors)
break;
}
@@ -1600,7 +1600,7 @@ static void prune_hdmi_vsdb_vic_list(
HDMI_VIC_len = inner_loop_limit;
p_CEA_extension->byte_offset_to_18_byte_descriptors -=
num_HDMI_VICs_pruned;
- MHL_TX_EDID_INFO("%p\n", mhl_edid_3d_data->parse_data.p_HDMI_vsdb);
+ MHL_TX_EDID_INFO("%pK\n", mhl_edid_3d_data->parse_data.p_HDMI_vsdb);
if (mhl_edid_3d_data->parse_data.p_HDMI_vsdb) {
mhl_edid_3d_data->parse_data.p_HDMI_vsdb->
header.fields.length_following_header -=
@@ -3123,7 +3123,7 @@ void si_mhl_tx_process_hev_vic_burst(struct edid_3d_data_t *mhl_edid_3d_data,
ANSI_ESC_RED_TEXT, ANSI_ESC_RESET_TEXT);
return;
} else {
- MHL_TX_DBG_WARN(" %d %p\n", hev_index,
+ MHL_TX_DBG_WARN(" %d %pK\n", hev_index,
mhl_edid_3d_data->hev_vic_list)
mhl_edid_3d_data->hev_vic_info.
num_items_allocated =
@@ -3136,7 +3136,7 @@ void si_mhl_tx_process_hev_vic_burst(struct edid_3d_data_t *mhl_edid_3d_data,
MHL_TX_DBG_ERR("bogus write burst, no hev_vic_list\n")
return;
}
- MHL_TX_DBG_WARN(" %d %p\n", hev_index, mhl_edid_3d_data->hev_vic_list)
+ MHL_TX_DBG_WARN(" %d %pK\n", hev_index, mhl_edid_3d_data->hev_vic_list)
if (NULL == mhl_edid_3d_data->hev_vic_list) {
MHL_TX_DBG_ERR("%s no place to put HEV_VIC burst%s\n",
ANSI_ESC_RED_TEXT, ANSI_ESC_RESET_TEXT);
@@ -3155,7 +3155,7 @@ void si_mhl_tx_process_hev_vic_burst(struct edid_3d_data_t *mhl_edid_3d_data,
burst_id_HEV_VIC,
(union video_burst_descriptor_u *) &p_burst->
video_descriptors[i])) {
- MHL_TX_DBG_INFO(" %d %p\n",
+ MHL_TX_DBG_INFO(" %d %pK\n",
hev_index, mhl_edid_3d_data->hev_vic_list)
mhl_edid_3d_data->hev_vic_list[hev_index].
mhl3_hev_vic_descriptor =
@@ -4036,7 +4036,8 @@ static uint8_t parse_861_block(struct edid_3d_data_t *mhl_edid_3d_data,
mhl_edid_3d_data->parse_data.p_HDMI_vsdb = NULL;
- MHL_TX_EDID_INFO("tag:place holder EDID block:%p\n", p_EDID_block_data);
+ MHL_TX_EDID_INFO("tag:place holder EDID block:%pK\n",
+ p_EDID_block_data);
if (EDID_EXTENSION_BLOCK_MAP == p_CEA_extension->tag) {
struct block_map_t *p_block_map;
int i;
@@ -4123,7 +4124,7 @@ void si_mhl_tx_handle_atomic_hw_edid_read_complete(
mhl_edid_3d_data->parse_data.num_EDID_extensions;
++counter) {
MHL_TX_EDID_INFO
- (" counter:%d tag:place holder EDID block:%p\n",
+ (" counter:%d tag:place holder EDID block:%pK\n",
counter,
&mhl_edid_3d_data->
EDID_block_data[EDID_BLOCK_SIZE * counter]);
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
index a3b4466d105d..8503d84e0de4 100644
--- a/drivers/video/fbdev/msm/msm_dba/adv7533.c
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -880,8 +880,8 @@ static void adv7533_handle_cec_intr(struct adv7533 *pdata, u8 cec_status)
{
u8 cec_int_clear = 0x08;
bool cec_rx_intr = false;
- u8 cec_rx_ready;
- u8 cec_rx_timestamp;
+ u8 cec_rx_ready = 0;
+ u8 cec_rx_timestamp = 0;
if (!pdata) {
pr_err("%s: Invalid input\n", __func__);
@@ -983,7 +983,7 @@ end:
static void *adv7533_handle_hpd_intr(struct adv7533 *pdata)
{
- int ret;
+ int ret = 0;
u8 hpd_state;
u8 connected = 0, disconnected = 0;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index a21242870a35..903cab1ac059 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -380,6 +380,88 @@ end:
return ret;
}
+static int msm_ext_disp_get_intf_data_helper(struct platform_device *pdev,
+ struct msm_ext_disp_init_data **data)
+{
+ int ret = 0;
+ struct msm_ext_disp *ext_disp = NULL;
+
+ if (!pdev) {
+ pr_err("No platform device\n");
+ ret = -ENODEV;
+ goto end;
+ }
+
+ ext_disp = platform_get_drvdata(pdev);
+ if (!ext_disp) {
+ pr_err("No drvdata found\n");
+ ret = -ENODEV;
+ goto end;
+ }
+
+ mutex_lock(&ext_disp->lock);
+
+ if (ext_disp->current_disp == EXT_DISPLAY_TYPE_MAX) {
+ ret = -EINVAL;
+ pr_err("No display connected\n");
+ goto error;
+ }
+
+ ret = msm_ext_disp_get_intf_data(ext_disp, ext_disp->current_disp,
+ data);
+ if (ret)
+ goto error;
+error:
+ mutex_unlock(&ext_disp->lock);
+end:
+ return ret;
+}
+static int msm_ext_disp_cable_status(struct platform_device *pdev, u32 vote)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data)
+ goto end;
+
+ ret = data->codec_ops.cable_status(data->pdev, vote);
+
+end:
+ return ret;
+}
+
+static int msm_ext_disp_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_ext_disp_audio_edid_blk *blk)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data)
+ goto end;
+
+ ret = data->codec_ops.get_audio_edid_blk(data->pdev, blk);
+
+end:
+ return ret;
+}
+
+static int msm_ext_disp_audio_info_setup(struct platform_device *pdev,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data)
+ goto end;
+
+ ret = data->codec_ops.audio_info_setup(data->pdev, params);
+
+end:
+ return ret;
+}
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
@@ -456,11 +538,11 @@ static int msm_ext_disp_notify(struct platform_device *pdev,
if (new_state == EXT_DISPLAY_CABLE_CONNECT && ext_disp->ops) {
ext_disp->ops->audio_info_setup =
- data->codec_ops.audio_info_setup;
+ msm_ext_disp_audio_info_setup;
ext_disp->ops->get_audio_edid_blk =
- data->codec_ops.get_audio_edid_blk;
+ msm_ext_disp_get_audio_edid_blk;
ext_disp->ops->cable_status =
- data->codec_ops.cable_status;
+ msm_ext_disp_cable_status;
ext_disp->ops->get_intf_id =
msm_ext_disp_get_intf_id;
}
@@ -590,6 +672,33 @@ end:
return ret;
}
+static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
+{
+ if (!init_data) {
+ pr_err("Invalid init_data\n");
+ return -EINVAL;
+ }
+
+ if (!init_data->pdev) {
+ pr_err("Invalid display intf pdev\n");
+ return -EINVAL;
+ }
+
+ if (!init_data->kobj) {
+ pr_err("Invalid display intf kobj\n");
+ return -EINVAL;
+ }
+
+ if (!init_data->codec_ops.get_audio_edid_blk ||
+ !init_data->codec_ops.cable_status ||
+ !init_data->codec_ops.audio_info_setup) {
+ pr_err("Invalid codec operation pointers\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
@@ -610,6 +719,10 @@ int msm_ext_disp_register_intf(struct platform_device *pdev,
mutex_lock(&ext_disp->lock);
+ ret = msm_ext_disp_validate_intf(init_data);
+ if (ret)
+ goto end;
+
ret = msm_ext_disp_get_intf_data(ext_disp, init_data->type, &data);
if (!ret) {
pr_debug("Display (%s) already registered\n",
@@ -675,6 +788,14 @@ static int msm_ext_disp_probe(struct platform_device *pdev)
if (ret)
goto switch_dev_failure;
+ ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ pr_err("Failed to add child devices. Error = %d\n", ret);
+ goto child_node_failure;
+ } else {
+ pr_debug("%s: Added child devices.\n", __func__);
+ }
+
mutex_init(&ext_disp->lock);
INIT_LIST_HEAD(&ext_disp->display_list);
@@ -682,6 +803,8 @@ static int msm_ext_disp_probe(struct platform_device *pdev)
return ret;
+child_node_failure:
+ msm_ext_disp_switch_dev_unregister(ext_disp);
switch_dev_failure:
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
diff --git a/include/dt-bindings/clock/msm-clocks-cobalt.h b/include/dt-bindings/clock/msm-clocks-cobalt.h
index 3fb1e45373da..69b6b60e1a6b 100644
--- a/include/dt-bindings/clock/msm-clocks-cobalt.h
+++ b/include/dt-bindings/clock/msm-clocks-cobalt.h
@@ -383,6 +383,8 @@
#define clk_mmss_camss_gp1_clk 0xdccdd730
#define clk_mmss_camss_ispif_ahb_clk 0xbda4f0e3
#define clk_mmss_camss_jpeg0_clk 0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk 0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk 0x371ec109
#define clk_mmss_camss_jpeg_ahb_clk 0xde1fece3
#define clk_mmss_camss_jpeg_axi_clk 0x7534616b
#define clk_mmss_camss_mclk0_clk 0x056293a7
@@ -463,11 +465,10 @@
#define clk_dsi1pll_vco_clk 0x99797b50
#define clk_dp_vco_clk 0xfcaaeec7
-#define clk_hsclk_divsel_clk_src 0x0a325543
#define clk_dp_link_2x_clk_divsel_five 0xcfe3f5dd
-#define clk_dp_link_2x_clk_divsel_ten 0xfeb9924d
-#define clk_dp_link_2x_clk_mux 0xce4c4fc6
-#define clk_vco_divided_clk_src 0x3da6cb51
+#define clk_vco_divsel_four_clk_src 0xe0da19c0
+#define clk_vco_divsel_two_clk_src 0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux 0x3f8197c2
#define clk_hdmi_vco_clk 0xbb7dc20d
/* clock_gpu controlled clocks*/
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 903a8e852f5d..66bf56640fe1 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -194,10 +194,12 @@ struct coresight_device {
* Operations available for sinks
* @enable: enables the sink.
* @disable: disables the sink.
+ * @abort: captures sink trace on abort
*/
struct coresight_ops_sink {
int (*enable)(struct coresight_device *csdev);
void (*disable)(struct coresight_device *csdev);
+ void (*abort)(struct coresight_device *csdev);
};
/**
@@ -239,6 +241,7 @@ extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
+extern void coresight_abort(void);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -248,6 +251,7 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
+static inline void coresight_abort(void) {}
#endif
#if defined(CONFIG_OF) && defined(CONFIG_CORESIGHT)
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 26e97700fc73..f66264bc935a 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -14,6 +14,8 @@
#define __HDCP_QSEECOM_H
#include <linux/types.h>
+#define HDCP_MAX_MESSAGE_PARTS 4
+
enum hdcp_lib_wakeup_cmd {
HDCP_LIB_WKUP_CMD_INVALID,
HDCP_LIB_WKUP_CMD_START,
@@ -44,12 +46,25 @@ struct hdcp_lib_wakeup_data {
uint32_t timeout;
};
+struct hdcp_msg_part {
+ uint32_t offset;
+ uint32_t length;
+};
+
+struct hdcp_msg_data {
+ uint32_t num_messages;
+ struct hdcp_msg_part messages[HDCP_MAX_MESSAGE_PARTS];
+ uint8_t rx_status;
+};
+
struct hdmi_hdcp_wakeup_data {
enum hdmi_hdcp_wakeup_cmd cmd;
void *context;
char *send_msg_buf;
uint32_t send_msg_len;
uint32_t timeout;
+ uint8_t abort_mask;
+ const struct hdcp_msg_data *message_data;
};
static inline char *hdmi_hdcp_cmd_to_str(uint32_t cmd)
diff --git a/include/linux/input/ft5x06_ts.h b/include/linux/input/ft5x06_ts.h
index 1340737070f7..bd37af71fe0d 100644
--- a/include/linux/input/ft5x06_ts.h
+++ b/include/linux/input/ft5x06_ts.h
@@ -22,6 +22,7 @@
#define FT5X16_ID 0x0A
#define FT5X36_ID 0x14
#define FT6X06_ID 0x06
+#define FT6X36_ID 0x36
struct fw_upgrade_info {
bool auto_cal;
@@ -33,6 +34,14 @@ struct fw_upgrade_info {
u16 delay_erase_flash;
};
+struct ft5x06_psensor_platform_data {
+ struct input_dev *input_psensor_dev;
+ struct sensors_classdev ps_cdev;
+ int tp_psensor_opened;
+ char tp_psensor_data; /* 0 near, 1 far */
+ struct ft5x06_ts_data *data;
+};
+
struct ft5x06_ts_platform_data {
struct fw_upgrade_info info;
const char *name;
@@ -59,6 +68,7 @@ struct ft5x06_ts_platform_data {
bool no_force_update;
bool i2c_pull_up;
bool ignore_id_check;
+ bool psensor_support;
int (*power_init)(bool);
int (*power_on)(bool);
};
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 5f85508353c9..81da2aaa01e5 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -764,6 +764,7 @@ enum ipa_irq_type {
IPA_TX_SUSPEND_IRQ,
IPA_TX_HOLB_DROP_IRQ,
IPA_BAM_IDLE_IRQ,
+ IPA_BAM_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ,
IPA_IRQ_MAX
};
@@ -948,6 +949,8 @@ struct ipa_wdi_ul_params_smmu {
struct sg_table rdy_comp_ring;
phys_addr_t rdy_comp_ring_wp_pa;
u32 rdy_comp_ring_size;
+ u32 *rdy_ring_rp_va;
+ u32 *rdy_comp_ring_wp_va;
};
/**
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 81a95657a719..54c99d9cb245 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -121,6 +121,7 @@ struct msm_ext_disp_init_data {
struct kobject *kobj;
struct msm_ext_disp_intf_ops intf_ops;
struct msm_ext_disp_audio_codec_ops codec_ops;
+ struct platform_device *pdev;
};
/*
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 246945be000c..55240f9a3b94 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1896,8 +1896,11 @@ static inline int usb_translate_errors(int error_code)
#define USB_DEVICE_REMOVE 0x0002
#define USB_BUS_ADD 0x0003
#define USB_BUS_REMOVE 0x0004
+#define USB_BUS_DIED 0x0005
extern void usb_register_notify(struct notifier_block *nb);
extern void usb_unregister_notify(struct notifier_block *nb);
+extern void usb_register_atomic_notify(struct notifier_block *nb);
+extern void usb_unregister_atomic_notify(struct notifier_block *nb);
/* debugfs stuff */
extern struct dentry *usb_debug_root;
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index 1349a3440e22..b0a8d67f50fa 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
#define QSEECOM_KEY_ID_SIZE 32
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD -19 /*0xFFFFFFED*/
#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
#define QSEOS_RESULT_FAIL_KS_OP -64
#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS -65
@@ -64,6 +65,9 @@ enum qseecom_qceos_cmd_id {
QSEOS_TEE_REQUEST_CANCELLATION,
QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+ QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+ QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+ QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
QSEOS_FSM_IKE_REQ_CMD = 0x203,
@@ -181,6 +185,8 @@ __packed struct qseecom_client_send_data_ireq {
uint32_t req_len;
uint32_t rsp_ptr;/* First 4 bytes should be the return status */
uint32_t rsp_len;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
};
__packed struct qseecom_client_send_data_64bit_ireq {
@@ -190,6 +196,8 @@ __packed struct qseecom_client_send_data_64bit_ireq {
uint32_t req_len;
uint64_t rsp_ptr;
uint32_t rsp_len;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
};
__packed struct qseecom_reg_log_buf_ireq {
@@ -292,6 +300,8 @@ __packed struct qseecom_qteec_ireq {
uint32_t req_len;
uint32_t resp_ptr;
uint32_t resp_len;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
};
__packed struct qseecom_qteec_64bit_ireq {
@@ -301,6 +311,8 @@ __packed struct qseecom_qteec_64bit_ireq {
uint32_t req_len;
uint64_t resp_ptr;
uint32_t resp_len;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
};
__packed struct qseecom_client_send_fsm_key_req {
@@ -658,4 +670,37 @@ __packed struct qseecom_continue_blocked_request_ireq {
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
#endif /* __QSEECOMI_H_ */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 3464726c408a..695e33f4d1cf 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -3375,6 +3375,7 @@ struct afe_lpass_core_shared_clk_config_command {
#define DEFAULT_COPP_TOPOLOGY 0x00010314
#define DEFAULT_POPP_TOPOLOGY 0x00010BE4
#define COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY 0x0001076B
+#define COMPRESS_PASSTHROUGH_NONE_TOPOLOGY 0x00010774
#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71
#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72
#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY 0x00010F75
@@ -3590,6 +3591,15 @@ struct asm_ape_cfg {
u32 seek_table_present;
};
+struct asm_dsd_cfg {
+ u16 num_version;
+ u16 is_bitwise_big_endian;
+ u16 dsd_channel_block_size;
+ u16 num_channels;
+ u8 channel_mapping[8];
+ u32 dsd_data_rate;
+};
+
struct asm_softpause_params {
u32 enable;
u32 period;
@@ -4158,6 +4168,19 @@ struct asm_ape_fmt_blk_v2 {
} __packed;
+struct asm_dsd_fmt_blk_v2 {
+ struct apr_hdr hdr;
+ struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+ u16 num_version;
+ u16 is_bitwise_big_endian;
+ u16 dsd_channel_block_size;
+ u16 num_channels;
+ u8 channel_mapping[8];
+ u32 dsd_data_rate;
+
+} __packed;
+
#define ASM_MEDIA_FMT_AMRNB_FS 0x00010BEB
/* Enumeration for 4.75 kbps AMR-NB Encoding mode. */
@@ -4566,6 +4589,7 @@ struct asm_amrwbplus_fmt_blk_v2 {
#define ASM_MEDIA_FMT_ALAC 0x00012F31
#define ASM_MEDIA_FMT_VORBIS 0x00010C15
#define ASM_MEDIA_FMT_APE 0x00012F32
+#define ASM_MEDIA_FMT_DSD 0x00012F3E
/* Media format ID for adaptive transform acoustic coding. This
@@ -9565,6 +9589,7 @@ enum {
LEGACY_PCM = 0,
COMPRESSED_PASSTHROUGH,
COMPRESSED_PASSTHROUGH_CONVERT,
+ COMPRESSED_PASSTHROUGH_DSD,
};
#define AUDPROC_MODULE_ID_COMPRESSED_MUTE 0x00010770
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 8339d538d578..dadc2f7a4eae 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -52,6 +52,7 @@
#define FORMAT_G711_ALAW_FS 0x001a
#define FORMAT_G711_MLAW_FS 0x001b
#define FORMAT_DTS 0x001c
+#define FORMAT_DSD 0x001d
#define ENCDEC_SBCBITRATE 0x0001
#define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -471,6 +472,9 @@ int q6asm_stream_media_format_block_vorbis(struct audio_client *ac,
int q6asm_media_format_block_ape(struct audio_client *ac,
struct asm_ape_cfg *cfg, int stream_id);
+int q6asm_media_format_block_dsd(struct audio_client *ac,
+ struct asm_dsd_cfg *cfg, int stream_id);
+
int q6asm_ds1_set_endp_params(struct audio_client *ac,
int param_id, int param_value);
diff --git a/include/trace/events/exception.h b/include/trace/events/exception.h
new file mode 100644
index 000000000000..6b525da1432e
--- /dev/null
+++ b/include/trace/events/exception.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exception
+
+#if !defined(_TRACE_EXCEPTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXCEPTION_H
+
+#include <linux/tracepoint.h>
+
+struct task_struct;
+
+TRACE_EVENT(user_fault,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long addr, unsigned int fsr),
+
+ TP_ARGS(tsk, addr, fsr),
+
+ TP_STRUCT__entry(
+ __string(task_name, tsk->comm)
+ __field(unsigned long, addr)
+ __field(unsigned int, fsr)
+ ),
+
+ TP_fast_assign(
+ __assign_str(task_name, tsk->comm)
+ __entry->addr = addr;
+ __entry->fsr = fsr;
+ ),
+
+ TP_printk("task_name:%s addr:%lu, fsr:%u", __get_str(task_name),
+ __entry->addr, __entry->fsr)
+);
+
+
+struct pt_regs;
+
+TRACE_EVENT(undef_instr,
+
+ TP_PROTO(struct pt_regs *regs, void *prog_cnt),
+
+ TP_ARGS(regs, prog_cnt),
+
+ TP_STRUCT__entry(
+ __field(void *, prog_cnt)
+ __field(struct pt_regs *, regs)
+ ),
+
+ TP_fast_assign(
+ __entry->regs = regs;
+ __entry->prog_cnt = prog_cnt;
+ ),
+
+ TP_printk("pc:%p", __entry->prog_cnt)
+);
+
+TRACE_EVENT(unhandled_abort,
+
+ TP_PROTO(struct pt_regs *regs, unsigned long addr, unsigned int fsr),
+
+ TP_ARGS(regs, addr, fsr),
+
+ TP_STRUCT__entry(
+ __field(struct pt_regs *, regs)
+ __field(unsigned long, addr)
+ __field(unsigned int, fsr)
+ ),
+
+ TP_fast_assign(
+ __entry->regs = regs;
+ __entry->addr = addr;
+ __entry->fsr = fsr;
+ ),
+
+ TP_printk("addr:%lu, fsr:%u", __entry->addr, __entry->fsr)
+);
+
+TRACE_EVENT(kernel_panic,
+
+ TP_PROTO(long dummy),
+
+ TP_ARGS(dummy),
+
+ TP_STRUCT__entry(
+ __field(long, dummy)
+ ),
+
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+
+ TP_printk("dummy:%ld", __entry->dummy)
+);
+
+TRACE_EVENT(kernel_panic_late,
+
+ TP_PROTO(long dummy),
+
+ TP_ARGS(dummy),
+
+ TP_STRUCT__entry(
+ __field(long, dummy)
+ ),
+
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+
+ TP_printk("dummy:%ld", __entry->dummy)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 32172c8f7d37..0bac6947a1cb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -520,3 +520,4 @@ header-y += android_pmem.h
header-y += ipa_qmi_service_v01.h
header-y += rmnet_ipa_fd_ioctl.h
header-y += msm_ipa.h
+header-y += smcinvoke.h
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 60ebda8be9cb..6aa021e12930 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -96,6 +96,11 @@
#define IPA_MBIM_MAX_STREAM_NUM 8
/**
+ * size of the ipv6 address
+ */
+#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
+
+/**
* the attributes of the rule (routing or filtering)
*/
#define IPA_FLT_TOS (1ul << 0)
@@ -1435,12 +1440,15 @@ struct ipa_ecm_msg {
* @name: name of the wan interface
*
* CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
* netmgr need to pass the name of wan eMBMS iface when connected.
*/
struct ipa_wan_msg {
char upstream_ifname[IPA_RESOURCE_NAME_MAX];
char tethered_ifname[IPA_RESOURCE_NAME_MAX];
enum ipa_ip_type ip;
+ uint32_t ipv4_addr_gw;
+ uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN];
};
/**
diff --git a/include/uapi/linux/smcinvoke.h b/include/uapi/linux/smcinvoke.h
new file mode 100644
index 000000000000..1dc9a63c15e5
--- /dev/null
+++ b/include/uapi/linux/smcinvoke.h
@@ -0,0 +1,45 @@
+#ifndef _UAPI_SMCINVOKE_H_
+#define _UAPI_SMCINVOKE_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCINVOKE_USERSPACE_OBJ_NULL -1
+
+struct smcinvoke_buf {
+ uint64_t addr;
+ uint64_t size;
+};
+
+struct smcinvoke_obj {
+ int64_t fd;
+ int64_t reserved;
+};
+
+union smcinvoke_arg {
+ struct smcinvoke_buf b;
+ struct smcinvoke_obj o;
+};
+
+/*
+ * struct smcinvoke_cmd_req: This structure is transparently sent to TEE
+ * @op - Operation to be performed
+ * @counts - number of aruments passed
+ * @result - result of invoke operation
+ * @argsize - size of each of arguments
+ * @args - args is pointer to buffer having all arguments
+ */
+struct smcinvoke_cmd_req {
+ uint32_t op;
+ uint32_t counts;
+ int32_t result;
+ uint32_t argsize;
+ uint64_t __user args;
+};
+
+#define SMCINVOKE_IOC_MAGIC 0x98
+
+#define SMCINVOKE_IOCTL_INVOKE_REQ \
+ _IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
+
+#endif /* _UAPI_SMCINVOKE_H_ */
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 4d9c4b5c29f3..47367c663011 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -97,7 +97,8 @@
#define SND_AUDIOCODEC_EAC3 ((__u32) 0x00000018)
#define SND_AUDIOCODEC_ALAC ((__u32) 0x00000019)
#define SND_AUDIOCODEC_APE ((__u32) 0x00000020)
-#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_APE
+#define SND_AUDIOCODEC_DSD ((__u32) 0x00000021)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_DSD
/*
* Profile and modes are listed with bit masks. This allows for a
* more compact representation of fields that will not evolve
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a65d63463420..92c34fe1b2b9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -98,6 +98,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
+ cpumask_var_t cpus_requested; /* CPUS requested, but not used because of hotplug */
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
@@ -386,7 +387,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
- return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+ return cpumask_subset(p->cpus_requested, q->cpus_requested) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -486,7 +487,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
- cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+ cpumask_intersects(trial->cpus_requested, c->cpus_requested))
goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
@@ -945,17 +946,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
} else {
- retval = cpulist_parse(buf, trialcs->cpus_allowed);
+ retval = cpulist_parse(buf, trialcs->cpus_requested);
if (retval < 0)
return retval;
- if (!cpumask_subset(trialcs->cpus_allowed,
- top_cpuset.cpus_allowed))
+ if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
return -EINVAL;
+
+ cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
}
/* Nothing to do if the cpus didn't change */
- if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+ if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
return 0;
retval = validate_change(cs, trialcs);
@@ -964,6 +966,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+ cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
@@ -1754,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
switch (type) {
case FILE_CPULIST:
- seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1942,12 +1945,15 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
if (!cs)
return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
- goto free_cs;
+ goto error_allowed;
if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
- goto free_cpus;
+ goto error_effective;
+ if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+ goto error_requested;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
+ cpumask_clear(cs->cpus_requested);
nodes_clear(cs->mems_allowed);
cpumask_clear(cs->effective_cpus);
nodes_clear(cs->effective_mems);
@@ -1956,9 +1962,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
return &cs->css;
-free_cpus:
+error_requested:
+ free_cpumask_var(cs->effective_cpus);
+error_effective:
free_cpumask_var(cs->cpus_allowed);
-free_cs:
+error_allowed:
kfree(cs);
return ERR_PTR(-ENOMEM);
}
@@ -2019,6 +2027,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+ cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
@@ -2053,6 +2062,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->cpus_allowed);
+ free_cpumask_var(cs->cpus_requested);
kfree(cs);
}
@@ -2120,8 +2130,11 @@ int __init cpuset_init(void)
BUG();
if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
BUG();
+ if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+ BUG();
cpumask_setall(top_cpuset.cpus_allowed);
+ cpumask_setall(top_cpuset.cpus_requested);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems);
@@ -2255,7 +2268,8 @@ retry:
goto retry;
}
- cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+ cpumask_and(&new_cpus, cs->cpus_requested,
+ parent_cs(cs)->effective_cpus);
nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
diff --git a/kernel/panic.c b/kernel/panic.c
index 223564d3e1f8..b4a0edc489c5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -25,6 +25,9 @@
#include <linux/nmi.h>
#include <linux/console.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/exception.h>
+
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -80,6 +83,8 @@ void panic(const char *fmt, ...)
long i, i_next = 0;
int state = 0;
+ trace_kernel_panic(0);
+
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
@@ -181,6 +186,9 @@ void panic(const char *fmt, ...)
mdelay(PANIC_TIMER_STEP);
}
}
+
+ trace_kernel_panic_late(0);
+
if (panic_timeout != 0) {
/*
* This will not be a clean reboot, with everything
diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c
index 5be5137f447f..b0948acf8a71 100644
--- a/sound/soc/msm/msmcobalt.c
+++ b/sound/soc/msm/msmcobalt.c
@@ -51,8 +51,12 @@
#define SAMPLING_RATE_32KHZ 32000
#define SAMPLING_RATE_44P1KHZ 44100
#define SAMPLING_RATE_48KHZ 48000
+#define SAMPLING_RATE_88P2KHZ 88200
#define SAMPLING_RATE_96KHZ 96000
+#define SAMPLING_RATE_176P4KHZ 176400
#define SAMPLING_RATE_192KHZ 192000
+#define SAMPLING_RATE_352P8KHZ 352800
+#define SAMPLING_RATE_384KHZ 384000
#define WCD9XXX_MBHC_DEF_BUTTONS 8
#define WCD9XXX_MBHC_DEF_RLOADS 5
@@ -176,7 +180,8 @@ static const char *const vi_feed_ch_text[] = {"One", "Two"};
static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
static char const *slim_sample_rate_text[] = {"KHZ_8", "KHZ_16",
"KHZ_32", "KHZ_44P1", "KHZ_48",
- "KHZ_96", "KHZ_192"};
+ "KHZ_88P2", "KHZ_96", "KHZ_176P4",
+ "KHZ_192", "KHZ_352P8", "KHZ_384"};
static char const *bt_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
static const char *const usb_ch_text[] = {"One", "Two"};
static char const *ch_text[] = {"Two", "Three", "Four", "Five",
@@ -189,6 +194,7 @@ static char const *hdmi_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
"KHZ_192"};
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_chs, slim_rx_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_chs, slim_tx_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_1_tx_chs, slim_tx_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_chs, slim_rx_ch_text);
@@ -206,6 +212,7 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(hdmi_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_6_rx_sample_rate, slim_sample_rate_text);
@@ -278,12 +285,24 @@ static int slim_get_sample_rate_val(int sample_rate)
case SAMPLING_RATE_48KHZ:
sample_rate_val = 4;
break;
- case SAMPLING_RATE_96KHZ:
+ case SAMPLING_RATE_88P2KHZ:
sample_rate_val = 5;
break;
- case SAMPLING_RATE_192KHZ:
+ case SAMPLING_RATE_96KHZ:
sample_rate_val = 6;
break;
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 7;
+ break;
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 8;
+ break;
+ case SAMPLING_RATE_352P8KHZ:
+ sample_rate_val = 9;
+ break;
+ case SAMPLING_RATE_384KHZ:
+ sample_rate_val = 10;
+ break;
default:
sample_rate_val = 4;
break;
@@ -312,11 +331,23 @@ static int slim_get_sample_rate(int value)
sample_rate = SAMPLING_RATE_48KHZ;
break;
case 5:
- sample_rate = SAMPLING_RATE_96KHZ;
+ sample_rate = SAMPLING_RATE_88P2KHZ;
break;
case 6:
+ sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 7:
+ sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 8:
sample_rate = SAMPLING_RATE_192KHZ;
break;
+ case 9:
+ sample_rate = SAMPLING_RATE_352P8KHZ;
+ break;
+ case 10:
+ sample_rate = SAMPLING_RATE_384KHZ;
+ break;
default:
sample_rate = SAMPLING_RATE_48KHZ;
break;
@@ -370,6 +401,8 @@ static int slim_get_port_idx(struct snd_kcontrol *kcontrol)
if (strnstr(kcontrol->id.name, "SLIM_0_RX", sizeof("SLIM_0_RX")))
port_id = SLIM_RX_0;
+ else if (strnstr(kcontrol->id.name, "SLIM_2_RX", sizeof("SLIM_2_RX")))
+ port_id = SLIM_RX_2;
else if (strnstr(kcontrol->id.name, "SLIM_5_RX", sizeof("SLIM_5_RX")))
port_id = SLIM_RX_5;
else if (strnstr(kcontrol->id.name, "SLIM_6_RX", sizeof("SLIM_6_RX")))
@@ -1113,6 +1146,8 @@ static int proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("SLIM_0_RX Channels", slim_0_rx_chs,
msm_slim_rx_ch_get, msm_slim_rx_ch_put),
+ SOC_ENUM_EXT("SLIM_2_RX Channels", slim_2_rx_chs,
+ msm_slim_rx_ch_get, msm_slim_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_TX Channels", slim_0_tx_chs,
msm_slim_tx_ch_get, msm_slim_tx_ch_put),
SOC_ENUM_EXT("SLIM_1_TX Channels", slim_1_tx_chs,
@@ -1147,6 +1182,8 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
hdmi_rx_format_get, hdmi_rx_format_put),
SOC_ENUM_EXT("SLIM_0_RX SampleRate", slim_0_rx_sample_rate,
slim_rx_sample_rate_get, slim_rx_sample_rate_put),
+ SOC_ENUM_EXT("SLIM_2_RX SampleRate", slim_2_rx_sample_rate,
+ slim_rx_sample_rate_get, slim_rx_sample_rate_put),
SOC_ENUM_EXT("SLIM_0_TX SampleRate", slim_0_tx_sample_rate,
slim_tx_sample_rate_get, slim_tx_sample_rate_put),
SOC_ENUM_EXT("SLIM_5_RX SampleRate", slim_5_rx_sample_rate,
@@ -1261,6 +1298,9 @@ static int msm_slim_get_ch_from_beid(int32_t be_id)
case MSM_BACKEND_DAI_SLIMBUS_1_RX:
ch_id = SLIM_RX_1;
break;
+ case MSM_BACKEND_DAI_SLIMBUS_2_RX:
+ ch_id = SLIM_RX_2;
+ break;
case MSM_BACKEND_DAI_SLIMBUS_3_RX:
ch_id = SLIM_RX_3;
break;
@@ -1303,6 +1343,7 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
switch (dai_link->be_id) {
case MSM_BACKEND_DAI_SLIMBUS_0_RX:
case MSM_BACKEND_DAI_SLIMBUS_1_RX:
+ case MSM_BACKEND_DAI_SLIMBUS_2_RX:
case MSM_BACKEND_DAI_SLIMBUS_3_RX:
case MSM_BACKEND_DAI_SLIMBUS_4_RX:
case MSM_BACKEND_DAI_SLIMBUS_6_RX:
@@ -1877,6 +1918,10 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: rx_5_ch=%d\n", __func__,
slim_rx_cfg[5].channels);
rx_ch_count = slim_rx_cfg[5].channels;
+ } else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_2_RX) {
+ pr_debug("%s: rx_2_ch=%d\n", __func__,
+ slim_rx_cfg[2].channels);
+ rx_ch_count = slim_rx_cfg[2].channels;
} else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_6_RX) {
pr_debug("%s: rx_6_ch=%d\n", __func__,
slim_rx_cfg[6].channels);
@@ -3102,6 +3147,21 @@ static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
.ignore_suspend = 1,
},
{
+ .name = LPASS_BE_SLIMBUS_2_RX,
+ .stream_name = "Slimbus2 Playback",
+ .cpu_dai_name = "msm-dai-q6-dev.16388",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tavil_codec",
+ .codec_dai_name = "tavil_rx2",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_be_ops,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+ {
.name = LPASS_BE_SLIMBUS_3_RX,
.stream_name = "Slimbus3 Playback",
.cpu_dai_name = "msm-dai-q6-dev.16390",
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 08c2b89de646..7e0f790b30e9 100755..100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -96,7 +96,7 @@ struct msm_compr_gapless_state {
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
- 88200, 96000, 176400, 192000
+ 88200, 96000, 176400, 192000, 352800, 384000, 2822400, 5644800
};
struct msm_compr_pdata {
@@ -170,7 +170,8 @@ struct msm_compr_audio {
};
const u32 compr_codecs[] = {
- SND_AUDIOCODEC_AC3, SND_AUDIOCODEC_EAC3, SND_AUDIOCODEC_DTS};
+ SND_AUDIOCODEC_AC3, SND_AUDIOCODEC_EAC3, SND_AUDIOCODEC_DTS,
+ SND_AUDIOCODEC_DSD};
struct query_audio_effect {
uint32_t mod_id;
@@ -642,7 +643,7 @@ static void populate_codec_list(struct msm_compr_audio *prtd)
COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
prtd->compr_cap.max_fragments =
COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- prtd->compr_cap.num_codecs = 13;
+ prtd->compr_cap.num_codecs = 14;
prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
@@ -656,6 +657,7 @@ static void populate_codec_list(struct msm_compr_audio *prtd)
prtd->compr_cap.codecs[10] = SND_AUDIOCODEC_ALAC;
prtd->compr_cap.codecs[11] = SND_AUDIOCODEC_APE;
prtd->compr_cap.codecs[12] = SND_AUDIOCODEC_DTS;
+ prtd->compr_cap.codecs[13] = SND_AUDIOCODEC_DSD;
}
static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -674,6 +676,7 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
struct asm_vorbis_cfg vorbis_cfg;
struct asm_alac_cfg alac_cfg;
struct asm_ape_cfg ape_cfg;
+ struct asm_dsd_cfg dsd_cfg;
union snd_codec_options *codec_options;
int ret = 0;
@@ -885,7 +888,20 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
pr_debug("SND_AUDIOCODEC_DTS\n");
/* no media format block needed */
break;
-
+ case FORMAT_DSD:
+ pr_debug("%s: SND_AUDIOCODEC_DSD\n", __func__);
+ memset(&dsd_cfg, 0x0, sizeof(struct asm_dsd_cfg));
+ dsd_cfg.num_channels = prtd->num_channels;
+ dsd_cfg.dsd_data_rate = prtd->sample_rate;
+ dsd_cfg.num_version = 0;
+ dsd_cfg.is_bitwise_big_endian = 1;
+ dsd_cfg.dsd_channel_block_size = 1;
+ ret = q6asm_media_format_block_dsd(prtd->audio_client,
+ &dsd_cfg, stream_id);
+ if (ret < 0)
+ pr_err("%s: CMD DSD Format block failed ret %d\n",
+ __func__, ret);
+ break;
default:
pr_debug("%s, unsupported format, skip", __func__);
break;
@@ -1298,8 +1314,8 @@ static int msm_compr_set_params(struct snd_compr_stream *cstream,
prtd->sample_rate = prtd->codec_param.codec.sample_rate;
pr_debug("%s: sample_rate %d\n", __func__, prtd->sample_rate);
- if (prtd->codec_param.codec.compr_passthr >= 0 &&
- prtd->codec_param.codec.compr_passthr <= 2)
+ if (prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
+ prtd->codec_param.codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD)
prtd->compr_passthr = prtd->codec_param.codec.compr_passthr;
else
prtd->compr_passthr = LEGACY_PCM;
@@ -1410,6 +1426,12 @@ static int msm_compr_set_params(struct snd_compr_stream *cstream,
break;
}
+ case SND_AUDIOCODEC_DSD: {
+ pr_debug("%s: SND_AUDIOCODEC_DSD\n", __func__);
+ prtd->codec = FORMAT_DSD;
+ break;
+ }
+
default:
pr_err("codec not supported, id =%d\n", params->codec.id);
return -EINVAL;
@@ -2199,6 +2221,8 @@ static int msm_compr_get_codec_caps(struct snd_compr_stream *cstream,
break;
case SND_AUDIOCODEC_DTS:
break;
+ case SND_AUDIOCODEC_DSD:
+ break;
default:
pr_err("%s: Unsupported audio codec %d\n",
__func__, codec->codec);
@@ -2675,6 +2699,7 @@ static int msm_compr_dec_params_put(struct snd_kcontrol *kcontrol,
case FORMAT_ALAC:
case FORMAT_APE:
case FORMAT_DTS:
+ case FORMAT_DSD:
pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
prtd->codec);
break;
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index c439c5cf2de5..51ebd039d96b 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -230,10 +230,11 @@ static const struct soc_enum mi2s_config_enum[] = {
static const char *const sb_format[] = {
"UNPACKED",
"PACKED_16B",
+ "DSD_DOP",
};
static const struct soc_enum sb_config_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, sb_format),
+ SOC_ENUM_SINGLE_EXT(3, sb_format),
};
static const char *const tdm_data_format[] = {
@@ -2129,7 +2130,10 @@ static const struct snd_kcontrol_new sb_config_controls[] = {
msm_dai_q6_sb_format_put),
SOC_ENUM_EXT("SLIM_2_RX SetCalMode", slim_2_rx_enum,
msm_dai_q6_cal_info_get,
- msm_dai_q6_cal_info_put)
+ msm_dai_q6_cal_info_put),
+ SOC_ENUM_EXT("SLIM_2_RX Format", sb_config_enum[0],
+ msm_dai_q6_sb_format_get,
+ msm_dai_q6_sb_format_put)
};
static const struct snd_kcontrol_new rt_proxy_config_controls[] = {
@@ -2185,6 +2189,9 @@ static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
rc = snd_ctl_add(dai->component->card->snd_card,
snd_ctl_new1(&sb_config_controls[1],
dai_data));
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(&sb_config_controls[2],
+ dai_data));
break;
case SLIMBUS_7_RX:
rc = snd_ctl_add(dai->component->card->snd_card,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 9e3d85b807aa..695f57b30322 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -270,6 +270,7 @@ struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
{ SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
{ SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
{ SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
{ SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
{ SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
{ SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
@@ -851,7 +852,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
msm_pcm_routing_get_app_type_idx(
app_type);
sample_rate =
- app_type_cfg[app_type_idx].sample_rate;
+ fe_dai_app_type_cfg[fe_id].sample_rate;
bit_width =
app_type_cfg[app_type_idx].bit_width;
} else {
@@ -860,6 +861,8 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
acdb_dev_id = fe_dai_app_type_cfg[fe_id].acdb_dev_id;
topology = msm_routing_get_adm_topology(path_type,
fe_id);
+ if (compr_passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
+ topology = COMPRESS_PASSTHROUGH_NONE_TOPOLOGY;
pr_err("%s: Before adm open topology %d\n", __func__,
topology);
@@ -897,8 +900,11 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
num_copps++;
}
}
- msm_routing_send_device_pp_params(msm_bedais[i].port_id,
- copp_idx);
+ if (compr_passthr_mode != COMPRESSED_PASSTHROUGH_DSD) {
+ msm_routing_send_device_pp_params(
+ msm_bedais[i].port_id,
+ copp_idx);
+ }
}
}
if (num_copps) {
@@ -2414,6 +2420,57 @@ static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_5_RX ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -7375,6 +7432,7 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
0, 0, 0 , 0),
SND_SOC_DAPM_AIF_OUT("SPDIF_RX", "SPDIF Playback", 0, 0, 0 , 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
@@ -7643,6 +7701,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
sec_i2s_rx_mixer_controls, ARRAY_SIZE(sec_i2s_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
slimbus_rx_mixer_controls, ARRAY_SIZE(slimbus_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_2_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_2_rx_mixer_controls, ARRAY_SIZE(slimbus_2_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_5_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
slimbus_5_rx_mixer_controls, ARRAY_SIZE(slimbus_5_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_7_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -8000,6 +8060,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_0_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_2_RX", NULL, "SLIMBUS_2_RX Audio Mixer"},
+
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -9484,6 +9562,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"BE_OUT", NULL, "SEC_I2S_RX"},
{"BE_OUT", NULL, "SLIMBUS_0_RX"},
{"BE_OUT", NULL, "SLIMBUS_1_RX"},
+ {"BE_OUT", NULL, "SLIMBUS_2_RX"},
{"BE_OUT", NULL, "SLIMBUS_3_RX"},
{"BE_OUT", NULL, "SLIMBUS_4_RX"},
{"BE_OUT", NULL, "SLIMBUS_5_RX"},
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index f422fd7d3e85..009eebede28a 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -53,6 +53,7 @@
#define LPASS_BE_SLIMBUS_1_RX "SLIMBUS_1_RX"
#define LPASS_BE_SLIMBUS_1_TX "SLIMBUS_1_TX"
#define LPASS_BE_STUB_1_TX "STUB_1_TX"
+#define LPASS_BE_SLIMBUS_2_RX "SLIMBUS_2_RX"
#define LPASS_BE_SLIMBUS_3_RX "SLIMBUS_3_RX"
#define LPASS_BE_SLIMBUS_3_TX "SLIMBUS_3_TX"
#define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
@@ -211,6 +212,7 @@ enum {
MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_BACKEND_DAI_SLIMBUS_1_RX,
MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_BACKEND_DAI_SLIMBUS_2_RX,
MSM_BACKEND_DAI_SLIMBUS_4_RX,
MSM_BACKEND_DAI_SLIMBUS_4_TX,
MSM_BACKEND_DAI_SLIMBUS_3_RX,
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 6409b81c0764..20d3f5212323 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2397,6 +2397,9 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
case FORMAT_DTS:
open.fmt_id = ASM_MEDIA_FMT_DTS;
break;
+ case FORMAT_DSD:
+ open.fmt_id = ASM_MEDIA_FMT_DSD;
+ break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
rc = -EINVAL;
@@ -2404,7 +2407,8 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
}
/*Below flag indicates the DSP that Compressed audio input
stream is not IEC 61937 or IEC 60958 packetizied*/
- if (passthrough_flag == COMPRESSED_PASSTHROUGH) {
+ if (passthrough_flag == COMPRESSED_PASSTHROUGH ||
+ passthrough_flag == COMPRESSED_PASSTHROUGH_DSD) {
open.flags = 0x0;
pr_debug("%s: Flag 0 COMPRESSED_PASSTHROUGH\n", __func__);
} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_CONVERT) {
@@ -2568,6 +2572,9 @@ static int __q6asm_open_write(struct audio_client *ac, uint32_t format,
case FORMAT_APE:
open.dec_fmt_id = ASM_MEDIA_FMT_APE;
break;
+ case FORMAT_DSD:
+ open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
+ break;
default:
pr_err("%s: Invalid format 0x%x\n", __func__, format);
rc = -EINVAL;
@@ -2748,6 +2755,9 @@ static int __q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format,
case FORMAT_APE:
open.dec_fmt_id = ASM_MEDIA_FMT_APE;
break;
+ case FORMAT_DSD:
+ open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
+ break;
default:
pr_err("%s: Invalid format 0x%x\n",
__func__, wr_format);
@@ -5041,6 +5051,66 @@ fail_cmd:
return rc;
}
+/*
+ * q6asm_media_format_block_dsd- Sends DSD Decoder
+ * configuration parameters
+ *
+ * @ac: Client session handle
+ * @cfg: DSD Media Format Configuration.
+ * @stream_id: stream id of stream to be associated with this session
+ *
+ * Return 0 on success or negative error code on failure
+ */
+int q6asm_media_format_block_dsd(struct audio_client *ac,
+ struct asm_dsd_cfg *cfg, int stream_id)
+{
+ struct asm_dsd_fmt_blk_v2 fmt;
+ int rc;
+
+ pr_debug("%s: session[%d] data_rate[%d] ch[%d]\n", __func__,
+ ac->session, cfg->dsd_data_rate, cfg->num_channels);
+
+ memset(&fmt, 0, sizeof(fmt));
+ q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmtblk);
+
+ fmt.num_version = cfg->num_version;
+ fmt.is_bitwise_big_endian = cfg->is_bitwise_big_endian;
+ fmt.dsd_channel_block_size = cfg->dsd_channel_block_size;
+ fmt.num_channels = cfg->num_channels;
+ fmt.dsd_data_rate = cfg->dsd_data_rate;
+ atomic_set(&ac->cmd_state, -1);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s: Command DSD media format update failed, err: %d\n",
+ __func__, rc);
+ goto done;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for DSD FORMAT_UPDATE\n", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto done;
+ }
+ return 0;
+done:
+ return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_dsd);
+
static int __q6asm_ds1_set_endp_params(struct audio_client *ac, int param_id,
int param_value, int stream_id)
{