summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt4
-rw-r--r--arch/arm/boot/dts/qcom/Makefile2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi53
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-usbc-audio-rcm.dts30
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-usbc-audio-rcm.dts30
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi10
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--block/genhd.c16
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/char/adsprpc.c7
-rw-r--r--drivers/clk/msm/clock-gcc-8998.c14
-rw-r--r--drivers/gpu/msm/adreno.c4
-rw-r--r--drivers/gpu/msm/adreno.h2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c15
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c6
-rw-r--r--drivers/gpu/msm/kgsl_device.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c26
-rw-r--r--drivers/input/misc/vl53L0/stmvl53l0.h1
-rw-r--r--drivers/input/misc/vl53L0/stmvl53l0_module-cci.c14
-rw-r--r--drivers/input/misc/vl53L0/stmvl53l0_module.c12
-rw-r--r--drivers/mfd/wcd934x-regmap.c15
-rw-r--r--drivers/mmc/card/block.c58
-rw-r--r--drivers/mmc/core/bus.c1
-rw-r--r--drivers/mmc/core/core.c11
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/mmc.c1
-rw-r--r--drivers/mmc/core/sd.c18
-rw-r--r--drivers/mmc/host/sdhci-msm.c3
-rw-r--r--drivers/mmc/host/sdhci-msm.h1
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c637
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h139
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h69
-rw-r--r--drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c2091
-rw-r--r--drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h619
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/pci/host/pci-msm.c145
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpi.c6
-rw-r--r--drivers/platform/msm/gsi/gsi.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c3
-rw-r--r--drivers/platform/msm/mhi/mhi.h81
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c176
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.h3
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c8
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c1
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c117
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c183
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c171
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c368
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c399
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c18
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c1037
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c10
-rw-r--r--drivers/power/supply/qcom/smb-lib.c3
-rw-r--r--drivers/scsi/ufs/ufs_quirks.c16
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h10
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c3
-rw-r--r--drivers/soc/qcom/service-locator.c6
-rw-r--r--drivers/thermal/msm_thermal-dev.c7
-rw-r--r--drivers/usb/dwc3/core.c34
-rw-r--r--drivers/usb/dwc3/core.h31
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c3
-rw-r--r--drivers/usb/dwc3/gadget.c30
-rw-r--r--drivers/usb/dwc3/gadget.h1
-rw-r--r--drivers/usb/gadget/configfs.c17
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c13
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h11
-rw-r--r--include/linux/msm_mhi.h31
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c37
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c18
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.c8
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.h5
-rw-r--r--sound/soc/msm/apq8096-auto.c351
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c27
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c74
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c14
96 files changed, 6366 insertions, 1274 deletions
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 0b46fd3d8ebf..d00e26b4d5ed 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -75,6 +75,11 @@ Optional Properties:
during clock scaling. If this property is not
defined, then it falls back to the default HS
bus speed mode to maintain backward compatibility.
+ - qcom,sdr104-wa: On Certain chipsets, SDR104 mode might be unstable causing CRC errors
+ on the interface. So there is a workaround implemented to skip printing
+ register dumps on CRC errors and also downgrade bus speed mode to
+ SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
+ this workaround.
In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 6f0d99d560cd..acf12239c813 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2107,7 +2107,9 @@ Required properties:
"qcom,apq8096-asoc-snd-adp-agave" for adp agave codec and
node is "sound-adp-agave",
"qcom,apq8096-asoc-snd-adp-mmxf" for adp mmxf codec and
- node is "sound-adp-mmxf".
+ node is "sound-adp-mmxf",
+ "qcom,apq8096-asoc-snd-auto-custom" for auto custom codec and
+ node is "sound-auto-custom".
- qcom,model : The user-visible name of this sound card.
- asoc-platform: This is phandle list containing the references to platform device
nodes that are used as part of the sound card dai-links.
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index a2e7311705e3..74aefe4e616d 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -166,6 +166,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
sdm660-pm660a-headset-jacktype-no-cdp.dtb \
sdm660-pm660a-headset-jacktype-no-rcm.dtb \
sdm660-usbc-audio-mtp.dtb \
+ sdm660-usbc-audio-rcm.dtb \
sdm658-mtp.dtb \
sdm658-cdp.dtb \
sdm658-rcm.dtb \
@@ -191,6 +192,7 @@ dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
sdm630-pm660a-rumi.dtb \
sdm630-mtp.dtb \
sdm630-usbc-audio-mtp.dtb \
+ sdm630-usbc-audio-rcm.dtb \
sdm630-cdp.dtb \
sdm630-rcm.dtb \
sdm630-internal-codec-mtp.dtb \
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index b7a3d3f5cba5..e74aded8c9e3 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -624,11 +624,13 @@
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-loopback.1";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
- <&dai_mi2s>, <&dai_mi2s_quat>,
+ <&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
+ <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
<&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>,
<&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>,
@@ -638,12 +640,14 @@
<&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>,
<&dai_quat_tdm_tx_2>, <&dai_quat_tdm_tx_3>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
- "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
+ "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
"msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899",
@@ -668,6 +672,16 @@
};
qcom,msm-dai-mi2s {
+ dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
+ &sec_mi2s_sd1_active>;
+ pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep
+ &sec_mi2s_sd1_sleep>;
+ };
+
dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
index 03801ee90589..3888047b9f8c 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -14,7 +14,7 @@ qcom,itech_3000mah {
/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jan10th2017*/
qcom,max-voltage-uv = <4350000>;
qcom,fg-cc-cv-threshold-mv = <4340>;
- qcom,fastchg-current-ma = <3000>;
+ qcom,fastchg-current-ma = <2000>;
qcom,batt-id-kohm = <100>;
qcom,battery-beta = <3435>;
qcom,battery-type = "itech_b00826lf_3000mah_ver1660_jan10th2017";
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
index 8cbb29aac927..11600ef2bfd3 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
@@ -13,6 +13,7 @@
qcom,qrd_msm8998_skuk_3000mah {
/* QRD8997_ST1031GA_3000mAh_averaged_MasterSlave_Jan10th2017 */
qcom,max-voltage-uv = <4400000>;
+ qcom,fg-cc-cv-threshold-mv = <4390>;
qcom,fastchg-current-ma = <3000>;
qcom,batt-id-kohm = <68>;
qcom,battery-beta = <3380>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 8d7309e96c0f..d007e8bcfc33 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -607,11 +607,13 @@
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-loopback.1";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
- <&dai_mi2s>, <&dai_mi2s_quat>,
+ <&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
+ <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
<&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>,
<&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>,
@@ -621,12 +623,14 @@
<&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>,
<&dai_quat_tdm_tx_2>, <&dai_quat_tdm_tx_3>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
- "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
+ "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
"msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899",
@@ -651,6 +655,8 @@
qcom,msm-dai-mi2s {
dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
&sec_mi2s_sd1_active>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 84b4efd71253..97036ae144ae 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -628,11 +628,13 @@
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-loopback.1";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
- <&dai_mi2s>, <&dai_mi2s_quat>,
+ <&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
+ <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
<&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>,
<&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>,
@@ -642,12 +644,14 @@
<&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>,
<&dai_quat_tdm_tx_2>, <&dai_quat_tdm_tx_3>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
- "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
+ "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
"msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899",
@@ -688,6 +692,16 @@
};
qcom,msm-dai-mi2s {
+ dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
+ &sec_mi2s_sd1_active>;
+ pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep
+ &sec_mi2s_sd1_sleep>;
+ };
+
dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
index 7370422d737e..b6e6fb4193b4 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -533,11 +533,13 @@
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-loopback.1";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
- <&dai_mi2s>, <&dai_mi2s_quat>,
+ <&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
<&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
+ <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
+ <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
<&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>,
<&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>,
@@ -547,12 +549,14 @@
<&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>,
<&dai_quat_tdm_tx_2>, <&dai_quat_tdm_tx_3>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
- "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
+ "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
"msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899",
@@ -571,6 +575,16 @@
};
qcom,msm-dai-mi2s {
+ dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
+ &sec_mi2s_sd1_active>;
+ pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep
+ &sec_mi2s_sd1_sleep>;
+ };
+
dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 49eafeaa5d70..7c0f8e3c331f 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3337,6 +3337,57 @@
};
};
+ qcom,msm-dai-tdm-sec-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37137>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36881 36883 36885 36887>;
+ qcom,msm-cpudai-tdm-clk-rate = <0>;
+ dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36881>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_tx_1: qcom,msm-dai-q6-tdm-sec-tx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36883>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_tx_2: qcom,msm-dai-q6-tdm-sec-tx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36885>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_tx_3: qcom,msm-dai-q6-tdm-sec-tx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36887>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-tert-rx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37152>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
index 2ed0f2250de5..d7372f2bb2e4 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
@@ -99,7 +99,6 @@
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_actuator_vaf_active>;
pinctrl-1 = <&cam_actuator_vaf_suspend>;
- status = "disabled";
};
eeprom0: qcom,eeprom@0 {
@@ -278,6 +277,7 @@
qcom,csid-sd-index = <1>;
qcom,mount-angle = <90>;
qcom,eeprom-src = <&eeprom1>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator1>;
cam_vdig-supply = <&pm8998_lvs1>;
cam_vio-supply = <&pm8998_lvs1>;
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 02b7a44ee0d2..d1194b3ffcec 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -1575,6 +1575,8 @@
qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
100000000 200000000 4294967295>;
+ qcom,sdr104-wa;
+
status = "disabled";
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera.dtsi
index 8b226586ca7b..75b30791ffdc 100644
--- a/arch/arm/boot/dts/qcom/sdm630-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-camera.dtsi
@@ -54,8 +54,8 @@
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
"csiphy_ahb2crif";
- qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
- 0 384000000 0 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 310000000 0 0 269333333 0
+ 0 200000000 0 0>;
status = "ok";
};
@@ -92,8 +92,8 @@
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
"csiphy_ahb2crif";
- qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
- 0 384000000 0 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 310000000 0 0 269333333 0
+ 0 200000000 0 0>;
status = "ok";
};
@@ -130,8 +130,8 @@
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
"csiphy_ahb2crif";
- qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
- 0 384000000 0 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 310000000 0 0 269333333 0
+ 0 200000000 0 0>;
status = "ok";
};
@@ -171,7 +171,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 384000000 384000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 310000000 200000000
0 0 0 0 0>;
status = "ok";
};
@@ -212,7 +212,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 310000000 200000000
0 0 0 0 0>;
status = "ok";
};
@@ -253,7 +253,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 310000000 200000000
0 0 0 0 0>;
status = "ok";
};
@@ -294,7 +294,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 310000000 200000000
0 0 0 0 0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-rcm.dts b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-rcm.dts
new file mode 100644
index 000000000000..6c944305acff
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-rcm.dts
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-cdp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, USBC Audio, RCM";
+ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
+ qcom,board-id = <21 3>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 9626e0548789..67e899d8ba5e 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -1628,6 +1628,8 @@
reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
iommus = <&anoc2_smmu 0x1a00>,
<&anoc2_smmu 0x1a01>;
+ clocks = <&clock_rpmcc RPM_RF_CLK1_PIN>;
+ clock-names = "cxo_ref_clk_pin";
interrupts = <0 413 0>, /* CE0 */
<0 414 0>, /* CE1 */
<0 415 0>, /* CE2 */
@@ -1640,6 +1642,14 @@
<0 423 0>, /* CE9 */
<0 424 0>, /* CE10 */
<0 425 0>; /* CE11 */
+ vdd-0.8-cx-mx-supply = <&pm660_l5>;
+ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <525000 950000>;
+ qcom,vdd-1.8-xo-config = <1750000 1900000>;
+ qcom,vdd-1.3-rfa-config = <1200000 1370000>;
+ qcom,vdd-3.3-ch0-config = <3200000 3400000>;
qcom,wlan-msa-memory = <0x100000>;
qcom,smmu-s1-bypass;
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-rcm.dts b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-rcm.dts
new file mode 100644
index 000000000000..6528558e92ec
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-rcm.dts
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm660.dtsi"
+#include "sdm660-cdp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660L, USBC Audio, RCM";
+ compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp";
+ qcom,board-id = <21 3>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 1e0b6136e1b4..be200f8dd531 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -1879,6 +1879,8 @@
reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
iommus = <&anoc2_smmu 0x1a00>,
<&anoc2_smmu 0x1a01>;
+ clocks = <&clock_rpmcc RPM_RF_CLK1_PIN>;
+ clock-names = "cxo_ref_clk_pin";
interrupts = <0 413 0>, /* CE0 */
<0 414 0>, /* CE1 */
<0 415 0>, /* CE2 */
@@ -1891,6 +1893,14 @@
<0 423 0>, /* CE9 */
<0 424 0>, /* CE10 */
<0 425 0>; /* CE11 */
+ vdd-0.8-cx-mx-supply = <&pm660_l5>;
+ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <525000 950000>;
+ qcom,vdd-1.8-xo-config = <1750000 1900000>;
+ qcom,vdd-1.3-rfa-config = <1200000 1370000>;
+ qcom,vdd-3.3-ch0-config = <3200000 3400000>;
qcom,wlan-msa-memory = <0x100000>;
qcom,smmu-s1-bypass;
};
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 0f51c3b3e7d6..94f9a8edfd12 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -240,6 +240,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_QSEECOM=y
CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_CPUTIME=y
CONFIG_QPNP_MISC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 43ca4093117e..b86787f5f467 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -659,7 +659,6 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
-CONFIG_RCU_STALL_WATCHDOG_BITE=y
CONFIG_FAULT_INJECTION=y
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_UFS_FAULT_INJECTION=y
diff --git a/block/genhd.c b/block/genhd.c
index dae7c9ed87e5..7f1e8f81ceb4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -656,12 +656,16 @@ void del_gendisk(struct gendisk *disk)
disk->flags &= ~GENHD_FL_UP;
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
- /*
- * Unregister bdi before releasing device numbers (as they can get
- * reused and we'd get clashes in sysfs).
- */
- bdi_unregister(&disk->queue->backing_dev_info);
- blk_unregister_queue(disk);
+ if (disk->queue) {
+ /*
+ * Unregister bdi before releasing device numbers (as they can
+ * get reused and we'd get clashes in sysfs).
+ */
+ bdi_unregister(&disk->queue->backing_dev_info);
+ blk_unregister_queue(disk);
+ } else {
+ WARN_ON(1);
+ }
blk_unregister_region(disk_devt(disk), disk->minors);
part_stat_set_all(&disk->part0, 0);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 8882f0bc94a5..51a08995442d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -393,6 +393,7 @@ static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
_CPU_ATTR(present, &cpu_present_mask),
+ _CPU_ATTR(core_ctl_isolated, &cpu_isolated_mask),
};
/*
@@ -627,6 +628,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[0].attr.attr,
&cpu_attrs[1].attr.attr,
&cpu_attrs[2].attr.attr,
+ &cpu_attrs[3].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
&dev_attr_isolated.attr,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index e0106a7e31fa..479599473381 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -553,7 +553,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
if (!IS_ERR_OR_NULL(map->handle))
ion_free(fl->apps->client, map->handle);
- if (sess->smmu.enabled) {
+ if (sess && sess->smmu.enabled) {
if (map->size || map->phys)
msm_dma_unmap_sg(sess->smmu.dev,
map->table->sgl,
@@ -645,6 +645,9 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
else
sess = fl->sctx;
+ VERIFY(err, !IS_ERR_OR_NULL(sess));
+ if (err)
+ goto bail;
VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
if (err)
goto bail;
@@ -2416,7 +2419,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
- if (me->channel[cid].ssrcount !=
+ if (cid == 0 && me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
if (fastrpc_mmap_remove_ssr(fl))
pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
diff --git a/drivers/clk/msm/clock-gcc-8998.c b/drivers/clk/msm/clock-gcc-8998.c
index f9d713a22c76..b1c8cc43769f 100644
--- a/drivers/clk/msm/clock-gcc-8998.c
+++ b/drivers/clk/msm/clock-gcc-8998.c
@@ -42,6 +42,7 @@ static void __iomem *virt_dbgbase;
#define gpll0_out_main_source_val 1
#define gpll0_ao_source_val 1
#define gpll4_out_main_source_val 5
+#define gpll0_early_div_source_val 6
#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
@@ -164,6 +165,7 @@ static struct pll_vote_clk gpll0_ao = {
};
DEFINE_EXT_CLK(gpll0_out_main, &gpll0.c);
+DEFINE_EXT_CLK(gpll0_early_div, &gpll0.c);
static struct local_vote_clk gcc_mmss_gpll0_clk = {
.cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
@@ -328,7 +330,7 @@ static struct clk_freq_tbl ftbl_blsp_qup_spi_apps_clk_src[] = {
F( 960000, cxo_clk_src, 10, 1, 2),
F( 4800000, cxo_clk_src, 4, 0, 0),
F( 9600000, cxo_clk_src, 2, 0, 0),
- F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 15000000, gpll0_early_div, 5, 1, 4),
F( 19200000, cxo_clk_src, 1, 0, 0),
F( 25000000, gpll0_out_main, 12, 1, 2),
F( 50000000, gpll0_out_main, 12, 0, 0),
@@ -496,10 +498,10 @@ static struct rcg_clk blsp1_qup6_spi_apps_clk_src = {
};
static struct clk_freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
- F( 3686400, gpll0_out_main, 1, 96, 15625),
- F( 7372800, gpll0_out_main, 1, 192, 15625),
- F( 14745600, gpll0_out_main, 1, 384, 15625),
- F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 3686400, gpll0_early_div, 1, 192, 15625),
+ F( 7372800, gpll0_early_div, 1, 384, 15625),
+ F( 14745600, gpll0_early_div, 1, 768, 15625),
+ F( 16000000, gpll0_early_div, 1, 4, 75),
F( 19200000, cxo_clk_src, 1, 0, 0),
F( 24000000, gpll0_out_main, 5, 1, 5),
F( 32000000, gpll0_out_main, 1, 4, 75),
@@ -2732,6 +2734,8 @@ static int msm_gcc_8998_probe(struct platform_device *pdev)
if (ret)
return ret;
+ gpll0_early_div.c.rate = 300000000;
+
ret = enable_rpm_scaling();
if (ret < 0)
return ret;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3faa5aaf9d03..89c7590ad121 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2675,11 +2675,11 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
}
static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
- struct clk *clk)
+ struct clk *clk, bool on)
{
if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
- ADRENO_DEVICE(device), name, clk);
+ ADRENO_DEVICE(device), name, clk, on);
}
static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 33fdb9ae11fa..218d08e6dfc3 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -789,7 +789,7 @@ struct adreno_gpudev {
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
- const char *, struct clk *);
+ const char *, struct clk *, bool on);
};
/**
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 87300096fbf1..0715022be6e3 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -1640,11 +1640,15 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
}
static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
- const char *name, struct clk *clk)
+ const char *name, struct clk *clk, bool on)
{
+
+ if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
+ !adreno_is_a508(adreno_dev))
+ return;
+
/* Handle clock settings for GFX PSCBCs */
- if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) ||
- adreno_is_a508(adreno_dev)) {
+ if (on) {
if (!strcmp(name, "mem_iface_clk")) {
clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
@@ -1652,6 +1656,11 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
}
+ } else {
+ if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ }
}
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 161b718b8a38..d79d9613043f 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -813,10 +813,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
dwords += 6;
/*
- * REG_TO_MEM packet on A5xx needs another ordinal.
+ * REG_TO_MEM packet on A5xx and above needs another ordinal.
* Add 2 more dwords since we do profiling before and after.
*/
- if (adreno_is_a5xx(adreno_dev))
+ if (!ADRENO_LEGACY_PM4(adreno_dev))
dwords += 2;
/*
@@ -833,7 +833,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
kernel_profiling = true;
dwords += 6;
- if (adreno_is_a5xx(adreno_dev))
+ if (!ADRENO_LEGACY_PM4(adreno_dev))
dwords += 2;
}
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index aca484618268..177b283a2dda 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -166,7 +166,7 @@ struct kgsl_functable {
unsigned int prelevel, unsigned int postlevel, bool post);
void (*regulator_disable_poll)(struct kgsl_device *device);
void (*clk_set_options)(struct kgsl_device *device,
- const char *name, struct clk *clk);
+ const char *name, struct clk *clk, bool on);
void (*gpu_model)(struct kgsl_device *device, char *str,
size_t bufsz);
void (*stop_fault_timer)(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index e4c431546d2a..0150d50c925b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -150,9 +150,6 @@ static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr,
*ab = pwr->bus_ab_mbytes;
else
*ab = (pwr->bus_percent_ab * max_bw) / 100;
-
- if (*ab > ib)
- *ab = ib;
}
/**
@@ -2003,10 +2000,6 @@ static int _get_clocks(struct kgsl_device *device)
if (!strcmp(name, "isense_clk"))
pwr->isense_clk_indx = i;
-
- if (device->ftbl->clk_set_options)
- device->ftbl->clk_set_options(device, name,
- pwr->grp_clks[i]);
break;
}
}
@@ -2453,6 +2446,22 @@ static void kgsl_pwrctrl_disable(struct kgsl_device *device)
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
+static void
+kgsl_pwrctrl_clk_set_options(struct kgsl_device *device, bool on)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i;
+
+ for (i = 0; i < KGSL_MAX_CLKS; i++) {
+ if (pwr->grp_clks[i] == NULL)
+ continue;
+
+ if (device->ftbl->clk_set_options)
+ device->ftbl->clk_set_options(device, clocks[i],
+ pwr->grp_clks[i], on);
+ }
+}
+
/**
* _init() - Get the GPU ready to start, but don't turn anything on
* @device - Pointer to the kgsl_device struct
@@ -2499,6 +2508,7 @@ static int _wake(struct kgsl_device *device)
device->ftbl->resume(device);
/* fall through */
case KGSL_STATE_SLUMBER:
+ kgsl_pwrctrl_clk_set_options(device, true);
status = device->ftbl->start(device,
device->pwrctrl.superfast);
device->pwrctrl.superfast = false;
@@ -2535,6 +2545,7 @@ static int _wake(struct kgsl_device *device)
device->pwrctrl.interval_timeout);
break;
case KGSL_STATE_AWARE:
+ kgsl_pwrctrl_clk_set_options(device, true);
/* Enable state before turning on irq */
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -2649,6 +2660,7 @@ _slumber(struct kgsl_device *device)
status = kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
+ kgsl_pwrctrl_clk_set_options(device, false);
kgsl_pwrctrl_disable(device);
kgsl_pwrscale_sleep(device);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/input/misc/vl53L0/stmvl53l0.h b/drivers/input/misc/vl53L0/stmvl53l0.h
index ae517ebe461a..b15d78cc6825 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0.h
+++ b/drivers/input/misc/vl53L0/stmvl53l0.h
@@ -131,6 +131,7 @@ struct stmvl53l0_data {
struct miscdevice miscdev;
int irq;
+ int irq_gpio;
unsigned int reset;
/* control flag from HAL */
diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
index e08edbcc73f9..79fba00ea086 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
+++ b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
@@ -248,6 +248,7 @@ static int stmvl53l0_cci_init(struct cci_data *data)
cci_client->retries = 3;
cci_client->id_map = 0;
cci_client->cci_i2c_master = data->cci_master;
+ cci_client->i2c_freq_mode = I2C_FAST_MODE;
rc = data->client->i2c_func_tbl->i2c_util(data->client, MSM_CCI_INIT);
if (rc < 0) {
vl53l0_errmsg("%d: CCI Init failed\n", __LINE__);
@@ -295,8 +296,20 @@ static int32_t stmvl53l0_platform_probe(struct platform_device *pdev)
rc = stmvl53l0_get_dt_data(&pdev->dev, cci_object);
if (rc < 0) {
vl53l0_errmsg("%d, failed rc %d\n", __LINE__, rc);
+ kfree(vl53l0_data->client_object);
+ kfree(vl53l0_data);
return rc;
}
+ vl53l0_data->irq_gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+ "stm,irq-gpio", 0, NULL);
+
+ if (!gpio_is_valid(vl53l0_data->irq_gpio)) {
+ vl53l0_errmsg("%d failed get irq gpio", __LINE__);
+ kfree(vl53l0_data->client_object);
+ kfree(vl53l0_data);
+ return -EINVAL;
+ }
+
cci_object->subdev_id = pdev->id;
/* Set device type as platform device */
@@ -418,6 +431,7 @@ int stmvl53l0_power_up_cci(void *cci_object, unsigned int *preset_flag)
}
}
data->power_up = 1;
+ usleep_range(3000, 3500);
*preset_flag = 1;
vl53l0_dbgmsg("End\n");
diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module.c b/drivers/input/misc/vl53L0/stmvl53l0_module.c
index f242e5f497d0..6881aba9fc64 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0_module.c
+++ b/drivers/input/misc/vl53L0/stmvl53l0_module.c
@@ -38,8 +38,8 @@
#include "vl53l0_api.h"
#include "vl53l010_api.h"
-/*#define USE_INT */
-#define IRQ_NUM 59
+#define USE_INT
+
/* #define DEBUG_TIME_LOG */
#ifdef DEBUG_TIME_LOG
struct timeval start_tv, stop_tv;
@@ -2668,12 +2668,12 @@ int stmvl53l0_setup(struct stmvl53l0_data *data)
#ifdef USE_INT
/* init interrupt */
- gpio_request(IRQ_NUM, "vl53l0_gpio_int");
- gpio_direction_input(IRQ_NUM);
- irq = gpio_to_irq(IRQ_NUM);
+ gpio_request(data->irq_gpio, "vl53l0_gpio_int");
+ gpio_direction_input(data->irq_gpio);
+ irq = gpio_to_irq(data->irq_gpio);
if (irq < 0) {
vl53l0_errmsg("filed to map GPIO: %d to interrupt:%d\n",
- IRQ_NUM, irq);
+ data->irq_gpio, irq);
} else {
vl53l0_dbgmsg("register_irq:%d\n", irq);
/* IRQF_TRIGGER_FALLING- poliarity:0 IRQF_TRIGGER_RISNG -
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index fbaf05e58aff..e8ba1495de2b 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1926,6 +1926,19 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
case WCD934X_ANA_MBHC_ELECT:
case WCD934X_ANA_MBHC_ZDET:
case WCD934X_ANA_MICB2:
+ case WCD934X_CODEC_RPM_CLK_MCLK_CFG:
+ case WCD934X_CLK_SYS_MCLK_PRG:
+ case WCD934X_CHIP_TIER_CTRL_EFUSE_CTL:
+ case WCD934X_ANA_BIAS:
+ case WCD934X_ANA_BUCK_CTL:
+ case WCD934X_ANA_RCO:
+ case WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL:
+ case WCD934X_CODEC_RPM_CLK_GATE:
+ case WCD934X_BIAS_VBG_FINE_ADJ:
+ case WCD934X_CODEC_CPR_SVS_CX_VDD:
+ case WCD934X_CODEC_CPR_SVS2_CX_VDD:
+ case WCD934X_CDC_TOP_TOP_CFG1:
+ case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
return true;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5d6c44b00bc2..e9f1a19dfe3f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2189,6 +2189,17 @@ static int mmc_blk_err_check(struct mmc_card *card,
int need_retune = card->host->need_retune;
int ecc_err = 0, gen_err = 0;
+ if (card->host->sdr104_wa && mmc_card_sd(card) &&
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !card->sdr104_blocked &&
+ (brq->data.error == -EILSEQ ||
+ brq->data.error == -EIO ||
+ brq->data.error == -ETIMEDOUT ||
+ brq->cmd.error == -EILSEQ ||
+ brq->cmd.error == -EIO ||
+ brq->cmd.error == -ETIMEDOUT))
+ card->err_in_sdr104 = true;
+
/*
* sbc.error indicates a problem with the set block count
* command. No data will have been transferred.
@@ -3645,6 +3656,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+ bool reset = false;
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
unsigned long waitfor = jiffies;
#endif
@@ -3690,6 +3702,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ if (card->err_in_sdr104) {
+ /*
+ * Data CRC/timeout errors will manifest as CMD/DATA
+ * ERR. But we'd like to retry these too.
+ * Moreover, no harm done if this fails too for multiple
+ * times, we anyway reduce the bus-speed and retry the
+ * same request.
+ * If that fails too, we don't override this status.
+ */
+ if (status == MMC_BLK_ABORT ||
+ status == MMC_BLK_CMD_ERR ||
+ status == MMC_BLK_DATA_ERR ||
+ status == MMC_BLK_RETRY)
+ /* reset on all of these errors and retry */
+ reset = true;
+
+ status = MMC_BLK_RETRY;
+ card->err_in_sdr104 = false;
+ }
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -3730,8 +3762,32 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
- if (retry++ < MMC_BLK_MAX_RETRIES)
+ if (retry++ < MMC_BLK_MAX_RETRIES) {
break;
+ } else if (reset) {
+ reset = false;
+ /*
+ * If we exhaust all the retries due to
+ * CRC/timeout errors in SDR140 mode with UHS SD
+ * cards, re-configure the card in SDR50
+ * bus-speed mode.
+ * All subsequent re-init of this card will be
+ * in SDR50 mode, unless it is removed and
+ * re-inserted. When new UHS SD cards are
+ * inserted, it may start at SDR104 mode if
+ * supported by the card.
+ */
+ pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ req->rq_disk->disk_name);
+ mmc_host_clear_sdr104(card->host);
+ mmc_suspend_clk_scaling(card->host);
+ mmc_blk_reset(md, card->host, type);
+ /* SDR104 mode is blocked from now on */
+ card->sdr104_blocked = true;
+ /* retry 5 times again */
+ retry = 0;
+ break;
+ }
/* Fall through */
case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type) &&
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index ec6075ec5767..311f6d639d06 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -428,7 +428,6 @@ void mmc_remove_card(struct mmc_card *card)
}
kfree(card->wr_pack_stats.packing_events);
- kfree(card->cached_ext_csd);
put_device(&card->dev);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5396e1d00178..41f0935440fd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2126,6 +2126,7 @@ int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
int claimed_host = 0;
unsigned long flags;
int retry_cnt = delay_ms/10;
+ bool pm = false;
do {
spin_lock_irqsave(&host->lock, flags);
@@ -2134,11 +2135,17 @@ int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
host->claimer = current;
host->claim_cnt += 1;
claimed_host = 1;
+ if (host->claim_cnt == 1)
+ pm = true;
}
spin_unlock_irqrestore(&host->lock, flags);
if (!claimed_host)
mmc_delay(10);
} while (!claimed_host && retry_cnt--);
+
+ if (pm)
+ pm_runtime_get_sync(mmc_dev(host));
+
if (host->ops->enable && claimed_host && host->claim_cnt == 1)
host->ops->enable(host);
return claimed_host;
@@ -4033,6 +4040,10 @@ int _mmc_detect_card_removed(struct mmc_host *host)
if (ret) {
mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 1116544eebc1..c66187299598 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -80,7 +80,6 @@ void mmc_init_context_info(struct mmc_host *host);
extern bool mmc_can_scale_clk(struct mmc_host *host);
extern int mmc_init_clk_scaling(struct mmc_host *host);
-extern int mmc_suspend_clk_scaling(struct mmc_host *host);
extern int mmc_resume_clk_scaling(struct mmc_host *host);
extern int mmc_exit_clk_scaling(struct mmc_host *host);
extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 542f1733d0dd..5ab09b4ae868 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -722,7 +722,6 @@ static int mmc_read_ext_csd(struct mmc_card *card)
return err;
}
- card->cached_ext_csd = ext_csd;
err = mmc_decode_ext_csd(card, ext_csd);
kfree(ext_csd);
return err;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ec5ce79e84e7..5b4d5d74fe55 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -433,26 +433,26 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
(card->host->f_max > UHS_SDR104_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
- } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
- (card->host->f_max > UHS_DDR50_MIN_DTR)) {
- card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50) &&
(card->host->f_max > UHS_SDR50_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+ (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
(card->host->f_max > UHS_SDR25_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
}
@@ -1285,6 +1285,8 @@ static int _mmc_sd_resume(struct mmc_host *host)
#endif
mmc_card_clr_suspended(host->card);
+ if (host->card->sdr104_blocked)
+ goto out;
err = mmc_resume_clk_scaling(host);
if (err) {
pr_err("%s: %s: fail to resume clock scaling (%d)\n",
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 2eaac11ec8ba..987d61bdda2d 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1960,6 +1960,8 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
if (of_get_property(np, "qcom,core_3_0v_support", NULL))
pdata->core_3_0v_support = true;
+ pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+
return pdata;
out:
return NULL;
@@ -4579,6 +4581,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pdata->nonhotplug)
msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+ msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
/* Initialize ICE if present */
if (msm_host->ice.pdev) {
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 2e4f2179378e..92f61708001e 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -153,6 +153,7 @@ struct sdhci_msm_pltfm_data {
u32 ice_clk_min;
struct sdhci_msm_pm_qos_data pm_qos_data;
bool core_3_0v_support;
+ bool sdr104_wa;
};
struct sdhci_msm_bus_vote {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 44633dc5d2be..40a34c283955 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3083,7 +3083,10 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
mmc_hostname(host->mmc), intmask,
host->data->error, ktime_to_ms(ktime_sub(
ktime_get(), host->data_start_time)));
- sdhci_dumpregs(host);
+
+ if (!host->mmc->sdr104_wa ||
+ (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
+ sdhci_dumpregs(host);
}
sdhci_finish_data(host);
} else {
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 5fe8bc184868..27a6c75682c4 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -27,6 +27,7 @@ ath10k_pci-y += pci.o \
obj-$(CONFIG_ATH10K_TARGET_SNOC) += ath10k_snoc.o
ath10k_snoc-y += snoc.o \
qmi.o \
+ wcn3990_qmi_service_v01.o \
ce.o
ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 7b5fc52d269a..7d20f087da71 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -12,10 +12,14 @@
#include <soc/qcom/subsystem_notif.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/service-locator.h>
#include "core.h"
#include "qmi.h"
#include "snoc.h"
-#include <soc/qcom/icnss.h>
+#include "wcn3990_qmi_service_v01.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(ath10k_fw_ready_wait_event);
static int
ath10k_snoc_service_notifier_notify(struct notifier_block *nb,
@@ -228,3 +232,634 @@ int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar)
return 0;
}
+static char *
+ath10k_snoc_driver_event_to_str(enum ath10k_snoc_driver_event_type type)
+{
+ switch (type) {
+ case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE:
+ return "SERVER_ARRIVE";
+ case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT:
+ return "SERVER_EXIT";
+ case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND:
+ return "FW_READY";
+ case ATH10K_SNOC_DRIVER_EVENT_MAX:
+ return "EVENT_MAX";
+ }
+
+ return "UNKNOWN";
+};
+
+static int
+ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type,
+ u32 flags, void *data)
+{
+ int ret = 0;
+ int i = 0;
+ struct ath10k *ar = (struct ath10k *)data;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Posting event: %s type: %d\n",
+ ath10k_snoc_driver_event_to_str(type), type);
+
+ if (type >= ATH10K_SNOC_DRIVER_EVENT_MAX) {
+ ath10k_err(ar, "Invalid Event type: %d, can't post", type);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&qmi_cfg->event_lock);
+
+ for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++) {
+ if (atomic_read(&qmi_cfg->qmi_ev_list[i].event_handled)) {
+ qmi_cfg->qmi_ev_list[i].type = type;
+ qmi_cfg->qmi_ev_list[i].data = data;
+ init_completion(&qmi_cfg->qmi_ev_list[i].complete);
+ qmi_cfg->qmi_ev_list[i].ret =
+ ATH10K_SNOC_EVENT_PENDING;
+ qmi_cfg->qmi_ev_list[i].sync =
+ !!(flags & ATH10K_SNOC_EVENT_SYNC);
+ atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 0);
+ list_add_tail(&qmi_cfg->qmi_ev_list[i].list,
+ &qmi_cfg->event_list);
+ break;
+ }
+ }
+
+ if (i >= ATH10K_SNOC_DRIVER_EVENT_MAX)
+ i = ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE;
+
+ spin_unlock_bh(&qmi_cfg->event_lock);
+
+ queue_work(qmi_cfg->event_wq, &qmi_cfg->event_work);
+
+ if (!(flags & ATH10K_SNOC_EVENT_SYNC))
+ goto out;
+
+ if (flags & ATH10K_SNOC_EVENT_UNINTERRUPTIBLE)
+ wait_for_completion(&qmi_cfg->qmi_ev_list[i].complete);
+ else
+ ret = wait_for_completion_interruptible(
+ &qmi_cfg->qmi_ev_list[i].complete);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Completed event: %s(%d)\n",
+ ath10k_snoc_driver_event_to_str(type), type);
+
+ spin_lock_bh(&qmi_cfg->event_lock);
+ if (ret == -ERESTARTSYS &&
+ qmi_cfg->qmi_ev_list[i].ret == ATH10K_SNOC_EVENT_PENDING) {
+ qmi_cfg->qmi_ev_list[i].sync = false;
+ atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1);
+ spin_unlock_bh(&qmi_cfg->event_lock);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_bh(&qmi_cfg->event_lock);
+
+out:
+ return ret;
+}
+
+static int
+ath10k_snoc_wlan_mode_send_sync_msg(struct ath10k *ar,
+ enum wlfw_driver_mode_enum_v01 mode)
+{
+ int ret;
+ struct wlfw_wlan_mode_req_msg_v01 req;
+ struct wlfw_wlan_mode_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+ return -ENODEV;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Sending Mode request, mode: %d\n", mode);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ ath10k_err(ar, "Send mode req failed, mode: %d ret: %d\n",
+ mode, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "QMI mode request rejected:");
+ ath10k_err(ar, "mode:%d result:%d error:%d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "wlan Mode request send success, mode: %d\n", mode);
+ return 0;
+}
+
+static int
+ath10k_snoc_wlan_cfg_send_sync_msg(struct ath10k *ar,
+ struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+ int ret;
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+ return -ENODEV;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Sending config request\n");
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ memcpy(&req, data, sizeof(req));
+
+ req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ ath10k_err(ar, "Send config req failed %d\n", ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "QMI config request rejected:");
+ ath10k_err(ar, "result:%d error:%d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "wlan config request success..\n");
+ return 0;
+}
+
+int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_wlan_enable_cfg *config,
+ enum ath10k_driver_mode mode,
+ const char *host_version)
+{
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ u32 i;
+ int ret;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Mode: %d, config: %p, host_version: %s\n",
+ mode, config, host_version);
+
+ memset(&req, 0, sizeof(req));
+ if (!config || !host_version) {
+ ath10k_err(ar, "WLAN_EN Config Invalid:%p: host_version:%p\n",
+ config, host_version);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ wait_event_timeout(ath10k_fw_ready_wait_event,
+ (atomic_read(&qmi_cfg->fw_ready) &&
+ atomic_read(&qmi_cfg->server_connected)),
+ msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT));
+
+ req.host_version_valid = 1;
+ strlcpy(req.host_version, host_version,
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+ req.tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req.tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req.tgt_cfg_len; i++) {
+ req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req.svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req.svc_cfg_len; i++) {
+ req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req.shadow_reg_valid = 1;
+ if (config->num_shadow_reg_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+ req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+ else
+ req.shadow_reg_len = config->num_shadow_reg_cfg;
+
+ memcpy(req.shadow_reg, config->shadow_reg_cfg,
+ sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len);
+
+ ret = ath10k_snoc_wlan_cfg_send_sync_msg(ar, &req);
+ if (ret) {
+ ath10k_err(ar, "WLAN config send failed\n");
+ return ret;
+ }
+
+ ret = ath10k_snoc_wlan_mode_send_sync_msg(ar, mode);
+ if (ret) {
+ ath10k_err(ar, "WLAN mode send failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar)
+{
+ return ath10k_snoc_wlan_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
+}
+
+static int ath10k_snoc_ind_register_send_sync_msg(struct ath10k *ar)
+{
+ int ret;
+ struct wlfw_ind_register_req_msg_v01 req;
+ struct wlfw_ind_register_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Sending indication register message,\n");
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.client_id_valid = 1;
+ req.client_id = WLFW_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.msa_ready_enable_valid = 1;
+ req.msa_ready_enable = 1;
+
+ req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+ req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+ resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ ath10k_err(ar, "Send indication register req failed %d\n", ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "QMI indication register request rejected:");
+ ath10k_err(ar, "resut:%d error:%d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_qmi_wlfw_clnt_notify_work(struct work_struct *work)
+{
+ int ret;
+ struct ath10k_snoc_qmi_config *qmi_cfg =
+ container_of(work, struct ath10k_snoc_qmi_config,
+ qmi_recv_msg_work);
+ struct ath10k_snoc *ar_snoc =
+ container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Receiving Event in work queue context\n");
+
+ do {
+ } while ((ret = qmi_recv_msg(qmi_cfg->wlfw_clnt)) == 0);
+
+ if (ret != -ENOMSG)
+ ath10k_err(ar, "Error receiving message: %d\n", ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Receiving Event completed\n");
+}
+
+static void
+ath10k_snoc_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event,
+ void *notify_priv)
+{
+ struct ath10k_snoc_qmi_config *qmi_cfg =
+ (struct ath10k_snoc_qmi_config *)notify_priv;
+ struct ath10k_snoc *ar_snoc =
+ container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI client notify: %d\n", event);
+
+ if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+ return;
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ schedule_work(&qmi_cfg->qmi_recv_msg_work);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Unknown Event: %d\n", event);
+ break;
+ }
+}
+
+static void
+ath10k_snoc_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
+ unsigned int msg_id, void *msg,
+ unsigned int msg_len, void *ind_cb_priv)
+{
+ struct ath10k_snoc_qmi_config *qmi_cfg =
+ (struct ath10k_snoc_qmi_config *)ind_cb_priv;
+ struct ath10k_snoc *ar_snoc =
+ container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Received Ind 0x%x, msg_len: %d\n", msg_id, msg_len);
+ switch (msg_id) {
+ case QMI_WLFW_FW_READY_IND_V01:
+ ath10k_snoc_driver_event_post(
+ ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND, 0, ar);
+ break;
+ case QMI_WLFW_MSA_READY_IND_V01:
+ qmi_cfg->msa_ready = true;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Received MSA Ready, ind = 0x%x\n", msg_id);
+ break;
+ default:
+ ath10k_err(ar, "Invalid msg_id 0x%x\n", msg_id);
+ break;
+ }
+}
+
+static int ath10k_snoc_driver_event_server_arrive(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ if (!qmi_cfg)
+ return -ENODEV;
+
+ qmi_cfg->wlfw_clnt = qmi_handle_create(
+ ath10k_snoc_qmi_wlfw_clnt_notify, qmi_cfg);
+ if (!qmi_cfg->wlfw_clnt) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "QMI client handle create failed\n");
+ return -ENOMEM;
+ }
+
+ ret = qmi_connect_to_service(qmi_cfg->wlfw_clnt,
+ WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ ath10k_err(ar, "QMI WLAN Service not found : %d\n", ret);
+ goto err_qmi_config;
+ }
+
+ ret = qmi_register_ind_cb(qmi_cfg->wlfw_clnt,
+ ath10k_snoc_qmi_wlfw_clnt_ind, qmi_cfg);
+ if (ret < 0) {
+ ath10k_err(ar, "Failed to register indication callback: %d\n",
+ ret);
+ goto err_qmi_config;
+ }
+
+ ret = ath10k_snoc_ind_register_send_sync_msg(ar);
+ if (ret) {
+ ath10k_err(ar, "Failed to config qmi ind register\n");
+ goto err_qmi_config;
+ }
+
+ atomic_set(&qmi_cfg->server_connected, 1);
+ wake_up_all(&ath10k_fw_ready_wait_event);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "QMI Server Arrive Configuration Success\n");
+ return 0;
+
+err_qmi_config:
+ qmi_handle_destroy(qmi_cfg->wlfw_clnt);
+ qmi_cfg->wlfw_clnt = NULL;
+ return ret;
+}
+
+static int ath10k_snoc_driver_event_server_exit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI Server Exit event received\n");
+ atomic_set(&qmi_cfg->fw_ready, 0);
+ qmi_cfg->msa_ready = false;
+ atomic_set(&qmi_cfg->server_connected, 0);
+ return 0;
+}
+
+static int ath10k_snoc_driver_event_fw_ready_ind(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "FW Ready event received.\n");
+ atomic_set(&qmi_cfg->fw_ready, 1);
+ wake_up_all(&ath10k_fw_ready_wait_event);
+
+ return 0;
+}
+
+static void ath10k_snoc_driver_event_work(struct work_struct *work)
+{
+ struct ath10k_snoc_qmi_driver_event *event;
+ int ret;
+ struct ath10k_snoc_qmi_config *qmi_cfg =
+ container_of(work, struct ath10k_snoc_qmi_config, event_work);
+ struct ath10k_snoc *ar_snoc =
+ container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+ struct ath10k *ar = ar_snoc->ar;
+
+ spin_lock_bh(&qmi_cfg->event_lock);
+
+ while (!list_empty(&qmi_cfg->event_list)) {
+ event = list_first_entry(&qmi_cfg->event_list,
+ struct ath10k_snoc_qmi_driver_event,
+ list);
+ list_del(&event->list);
+ spin_unlock_bh(&qmi_cfg->event_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Processing event: %s%s(%d)\n",
+ ath10k_snoc_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type);
+
+ switch (event->type) {
+ case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE:
+ ret = ath10k_snoc_driver_event_server_arrive(ar);
+ break;
+ case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT:
+ ret = ath10k_snoc_driver_event_server_exit(ar);
+ break;
+ case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND:
+ ret = ath10k_snoc_driver_event_fw_ready_ind(ar);
+ break;
+ default:
+ ath10k_err(ar, "Invalid Event type: %d", event->type);
+ kfree(event);
+ continue;
+ }
+
+ atomic_set(&event->event_handled, 1);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Event Processed: %s%s(%d), ret: %d\n",
+ ath10k_snoc_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type, ret);
+ spin_lock_bh(&qmi_cfg->event_lock);
+ if (event->sync) {
+ event->ret = ret;
+ complete(&event->complete);
+ continue;
+ }
+ spin_unlock_bh(&qmi_cfg->event_lock);
+ spin_lock_bh(&qmi_cfg->event_lock);
+ }
+
+ spin_unlock_bh(&qmi_cfg->event_lock);
+}
+
+static int
+ath10k_snoc_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ int ret = 0;
+ struct ath10k_snoc_qmi_config *qmi_cfg =
+ container_of(this, struct ath10k_snoc_qmi_config, wlfw_clnt_nb);
+ struct ath10k_snoc *ar_snoc =
+ container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Event Notify: code: %ld", code);
+
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ ret = ath10k_snoc_driver_event_post(
+ ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE, 0, ar);
+ break;
+ case QMI_SERVER_EXIT:
+ ret = ath10k_snoc_driver_event_post(
+ ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT, 0, ar);
+ break;
+ default:
+ ath10k_err(ar, "Invalid code: %ld", code);
+ break;
+ }
+
+ return ret;
+}
+
+int ath10k_snoc_start_qmi_service(struct ath10k *ar)
+{
+ int ret;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ qmi_cfg->event_wq = alloc_workqueue("ath10k_snoc_driver_event",
+ WQ_UNBOUND, 1);
+ if (!qmi_cfg->event_wq) {
+ ath10k_err(ar, "Workqueue creation failed\n");
+ return -EFAULT;
+ }
+
+ spin_lock_init(&qmi_cfg->event_lock);
+ atomic_set(&qmi_cfg->fw_ready, 0);
+ atomic_set(&qmi_cfg->server_connected, 0);
+
+ INIT_WORK(&qmi_cfg->event_work, ath10k_snoc_driver_event_work);
+ INIT_WORK(&qmi_cfg->qmi_recv_msg_work,
+ ath10k_snoc_qmi_wlfw_clnt_notify_work);
+ INIT_LIST_HEAD(&qmi_cfg->event_list);
+
+ for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++)
+ atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1);
+
+ qmi_cfg->wlfw_clnt_nb.notifier_call =
+ ath10k_snoc_qmi_wlfw_clnt_svc_event_notify;
+ ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &qmi_cfg->wlfw_clnt_nb);
+ if (ret < 0) {
+ ath10k_err(ar, "Notifier register failed: %d\n", ret);
+ ret = -EFAULT;
+ goto out_destroy_wq;
+ }
+
+ atomic_set(&qmi_cfg->fw_ready, 1);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI service started successfully\n");
+ return 0;
+
+out_destroy_wq:
+ destroy_workqueue(qmi_cfg->event_wq);
+ return ret;
+}
+
+void ath10k_snoc_stop_qmi_service(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Removing QMI service..\n");
+
+ qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &qmi_cfg->wlfw_clnt_nb);
+
+ wake_up_all(&ath10k_fw_ready_wait_event);
+ destroy_workqueue(qmi_cfg->event_wq);
+ qmi_cfg = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index f8ba3288753b..c8bc26bb96b2 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -11,9 +11,146 @@
*/
#ifndef _QMI_H_
#define _QMI_H_
+
+#define ATH10K_SNOC_EVENT_PENDING 2989
+#define ATH10K_SNOC_EVENT_SYNC BIT(0)
+#define ATH10K_SNOC_EVENT_UNINTERRUPTIBLE BIT(1)
+#define ATH10K_SNOC_WLAN_FW_READY_TIMEOUT 8000
+
+#define WLFW_SERVICE_INS_ID_V01 0
+#define WLFW_CLIENT_ID 0x4b4e454c
+#define WLFW_TIMEOUT_MS 20000
+
+enum ath10k_snoc_driver_event_type {
+ ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE,
+ ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT,
+ ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND,
+ ATH10K_SNOC_DRIVER_EVENT_MAX,
+};
+
+/* enum ath10k_driver_mode: ath10k driver mode
+ * @ATH10K_MISSION: mission mode
+ * @ATH10K_FTM: ftm mode
+ * @ATH10K_EPPING: epping mode
+ * @ATH10K_OFF: off mode
+ */
+enum ath10k_driver_mode {
+ ATH10K_MISSION,
+ ATH10K_FTM,
+ ATH10K_EPPING,
+ ATH10K_OFF
+};
+
+/* struct ath10k_ce_tgt_pipe_cfg: target pipe configuration
+ * @pipe_num: pipe number
+ * @pipe_dir: pipe direction
+ * @nentries: entries in pipe
+ * @nbytes_max: pipe max size
+ * @flags: pipe flags
+ * @reserved: reserved
+ */
+struct ath10k_ce_tgt_pipe_cfg {
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+ u32 reserved;
+};
+
+/* struct ath10k_ce_svc_pipe_cfg: service pipe configuration
+ * @service_id: target version
+ * @pipe_dir: pipe direction
+ * @pipe_num: pipe number
+ */
+struct ath10k_ce_svc_pipe_cfg {
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
+};
+
+/* struct ath10k_shadow_reg_cfg: shadow register configuration
+ * @ce_id: copy engine id
+ * @reg_offset: offset to copy engine
+ */
+struct ath10k_shadow_reg_cfg {
+ u16 ce_id;
+ u16 reg_offset;
+};
+
+/* struct ath10k_wlan_enable_cfg: wlan enable configuration
+ * @num_ce_tgt_cfg: no of ce target configuration
+ * @ce_tgt_cfg: target ce configuration
+ * @num_ce_svc_pipe_cfg: no of ce service configuration
+ * @ce_svc_cfg: ce service configuration
+ * @num_shadow_reg_cfg: no of shadow registers
+ * @shadow_reg_cfg: shadow register configuration
+ */
+struct ath10k_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct ath10k_ce_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct ath10k_ce_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+/* struct ath10k_snoc_qmi_driver_event: qmi driver event
+ * event_handled: event handled by event work handler
+ * sync: event synced
+ * ret: event received return value
+ * list: list to queue qmi event for process
+ * type: driver event type
+ * complete: completion for event handle complete
+ * data: encapsulate driver data for event handler callback
+ */
+struct ath10k_snoc_qmi_driver_event {
+ atomic_t event_handled;
+ bool sync;
+ int ret;
+ struct list_head list;
+ enum ath10k_snoc_driver_event_type type;
+ struct completion complete;
+ void *data;
+};
+
+/* struct ath10k_snoc_qmi_config: qmi service configuration
+ * fw_ready: wlan firmware ready for wlan operation
+ * msa_ready: wlan firmware msa memory ready for board data download
+ * server_connected: qmi server connected
+ * event_work: QMI event work
+ * event_list: QMI event list
+ * qmi_recv_msg_work: QMI message receive work
+ * event_wq: QMI event work queue
+ * wlfw_clnt_nb: WLAN firmware indication callback
+ * wlfw_clnt: QMI notifier handler for wlan firmware
+ * qmi_ev_list: QMI event list
+ * event_lock: spinlock for qmi event work queue
+ */
+struct ath10k_snoc_qmi_config {
+ atomic_t fw_ready;
+ bool msa_ready;
+ atomic_t server_connected;
+ struct work_struct event_work;
+ struct list_head event_list;
+ struct work_struct qmi_recv_msg_work;
+ struct workqueue_struct *event_wq;
+ struct notifier_block wlfw_clnt_nb;
+ struct qmi_handle *wlfw_clnt;
+ struct ath10k_snoc_qmi_driver_event
+ qmi_ev_list[ATH10K_SNOC_DRIVER_EVENT_MAX];
+ spinlock_t event_lock; /* spinlock for qmi event work queue */
+};
+
int ath10k_snoc_pd_restart_enable(struct ath10k *ar);
int ath10k_snoc_modem_ssr_register_notifier(struct ath10k *ar);
int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar);
int ath10k_snoc_pdr_unregister_notifier(struct ath10k *ar);
-
+int ath10k_snoc_start_qmi_service(struct ath10k *ar);
+void ath10k_snoc_stop_qmi_service(struct ath10k *ar);
+int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_wlan_enable_cfg *config,
+ enum ath10k_driver_mode mode,
+ const char *host_version);
+int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 89042dcf70a0..add0a7cd9edb 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
-
#include "core.h"
#include "debug.h"
#include "hif.h"
@@ -25,10 +24,11 @@
#include "ce.h"
#include "snoc.h"
#include "qmi.h"
-#include <soc/qcom/icnss.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+
#define WCN3990_MAX_IRQ 12
+
const char *ce_name[WCN3990_MAX_IRQ] = {
"WLAN_CE_0",
"WLAN_CE_1",
@@ -957,7 +957,7 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
msleep(SNOC_HIF_POWER_DOWN_DELAY);
- icnss_wlan_disable(ICNSS_OFF);
+ ath10k_snoc_qmi_wlan_disable(ar);
}
int ath10k_snoc_get_ce_id(struct ath10k *ar, int irq)
@@ -1061,7 +1061,7 @@ static int ath10k_snoc_get_soc_info(struct ath10k *ar)
static int ath10k_snoc_wlan_enable(struct ath10k *ar)
{
- struct icnss_wlan_enable_cfg cfg;
+ struct ath10k_wlan_enable_cfg cfg;
int pipe_num;
struct ath10k_ce_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
@@ -1080,19 +1080,20 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
}
cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
- sizeof(struct ce_tgt_pipe_cfg);
- cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)
+ sizeof(struct ath10k_ce_tgt_pipe_cfg);
+ cfg.ce_tgt_cfg = (struct ath10k_ce_tgt_pipe_cfg *)
&tgt_cfg;
cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
- sizeof(struct ce_svc_pipe_cfg);
- cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)
+ sizeof(struct ath10k_ce_svc_pipe_cfg);
+ cfg.ce_svc_cfg = (struct ath10k_ce_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
- sizeof(struct icnss_shadow_reg_cfg);
- cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *)
+ sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
- return icnss_wlan_enable(&cfg, ICNSS_MISSION, "5.1.0.26N");
+ return ath10k_snoc_qmi_wlan_enable(ar, &cfg,
+ ATH10K_MISSION, "5.1.0.26N");
}
static int ath10k_snoc_bus_configure(struct ath10k *ar)
@@ -1245,6 +1246,12 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ar);
ar_snoc->ar = ar;
+ ret = ath10k_snoc_start_qmi_service(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to start QMI service: %d\n", ret);
+ goto err_core_destroy;
+ }
+
spin_lock_init(&ar_snoc->opaque_ctx.ce_lock);
ar_snoc->opaque_ctx.bus_ops = &ath10k_snoc_bus_ops;
ath10k_snoc_resource_init(ar);
@@ -1325,6 +1332,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
ath10k_snoc_free_pipes(ar);
+ ath10k_snoc_stop_qmi_service(ar);
ath10k_core_destroy(ar);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__);
@@ -1352,10 +1360,6 @@ static int __init ath10k_snoc_init(void)
{
int ret;
- if (!icnss_is_fw_ready()) {
- pr_err("failed to get fw ready indication\n");
- return -EAGAIN;
- }
ret = platform_driver_register(&ath10k_snoc_driver);
if (ret)
pr_err("failed to register ath10k snoc driver: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index c62519b2a340..99ae157885bb 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -16,6 +16,7 @@
#include "hw.h"
#include "ce.h"
#include "pci.h"
+#include "qmi.h"
#include <soc/qcom/service-locator.h>
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -143,60 +144,7 @@ struct ath10k_snoc {
int total_domains;
struct notifier_block get_service_nb;
atomic_t fw_crashed;
-};
-
-/* struct ath10k_ce_tgt_pipe_cfg: target pipe configuration
- * @pipe_num: pipe number
- * @pipe_dir: pipe direction
- * @nentries: entries in pipe
- * @nbytes_max: pipe max size
- * @flags: pipe flags
- * @reserved: reserved
- */
-struct ath10k_ce_tgt_pipe_cfg {
- u32 pipe_num;
- u32 pipe_dir;
- u32 nentries;
- u32 nbytes_max;
- u32 flags;
- u32 reserved;
-};
-
-/* struct ath10k_ce_svc_pipe_cfg: service pipe configuration
- * @service_id: target version
- * @pipe_dir: pipe direction
- * @pipe_num: pipe number
- */
-struct ath10k_ce_svc_pipe_cfg {
- u32 service_id;
- u32 pipe_dir;
- u32 pipe_num;
-};
-
-/* struct ath10k_shadow_reg_cfg: shadow register configuration
- * @ce_id: copy engine id
- * @reg_offset: offset to copy engine
- */
-struct ath10k_shadow_reg_cfg {
- u16 ce_id;
- u16 reg_offset;
-};
-
-/* struct ath10k_wlan_enable_cfg: wlan enable configuration
- * @num_ce_tgt_cfg: no of ce target configuration
- * @ce_tgt_cfg: target ce configuration
- * @num_ce_svc_pipe_cfg: no of ce service configuration
- * @ce_svc_cfg: ce service configuration
- * @num_shadow_reg_cfg: no of shadow registers
- * @shadow_reg_cfg: shadow register configuration
- */
-struct ath10k_wlan_enable_cfg {
- u32 num_ce_tgt_cfg;
- struct ath10k_ce_tgt_pipe_cfg *ce_tgt_cfg;
- u32 num_ce_svc_pipe_cfg;
- struct ath10k_ce_svc_pipe_cfg *ce_svc_cfg;
- u32 num_shadow_reg_cfg;
- struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+ struct ath10k_snoc_qmi_config qmi_cfg;
};
struct ath10k_event_pd_down_data {
@@ -204,19 +152,6 @@ struct ath10k_event_pd_down_data {
bool fw_rejuvenate;
};
-/* enum ath10k_driver_mode: ath10k driver mode
- * @ATH10K_MISSION: mission mode
- * @ATH10K_FTM: ftm mode
- * @ATH10K_EPPING: epping mode
- * @ATH10K_OFF: off mode
- */
-enum ath10k_driver_mode {
- ATH10K_MISSION,
- ATH10K_FTM,
- ATH10K_EPPING,
- ATH10K_OFF
-};
-
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
{
return (struct ath10k_snoc *)ar->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c
new file mode 100644
index 000000000000..7d6cf8b23814
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c
@@ -0,0 +1,2091 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "wcn3990_qmi_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(uint8_t),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h
new file mode 100644
index 000000000000..21513b8f6200
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h
@@ -0,0 +1,619 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+ WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+ WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+ WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((uint32_t)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((uint32_t)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((uint32_t)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((uint32_t)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((uint32_t)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((uint32_t)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((uint64_t)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((uint64_t)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((uint64_t)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((uint64_t)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((uint64_t)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ u32 pipe_num;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ u32 service_id;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 cold_boot_cal_done_enable_valid;
+ u8 cold_boot_cal_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 203
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01
+ mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u8 daemon_support;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 size;
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index dc22a29349cb..4a299f238c54 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -524,9 +524,8 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
- (const volatile void __iomem *)
- wil_blob->blob.data + pos, count);
+ wil_memcpy_fromio_32(buf, (const void __iomem *)
+ wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 01a27335ec34..54d978d884ff 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -141,14 +141,6 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
}
}
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_fromio_32(dst, src, count);
- wil_halp_unvote(wil);
-}
-
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
@@ -167,15 +159,6 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_toio_32(dst, src, count);
- wil_halp_unvote(wil);
-}
-
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index f64323b03a3b..6111ef6408ea 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -795,12 +795,6 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src,
- size_t count);
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index eade4f85632a..72695d3b9224 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/rpm-smd-regulator.h>
@@ -5573,34 +5574,84 @@ static irqreturn_t handle_global_irq(int irq, void *data)
return IRQ_HANDLED;
}
-void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
+static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev)
{
- int pos, i;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+ int bypass_en = 0;
+
+ if (!domain) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: client does not have an iommu domain\n",
+ dev->rc_idx);
+ return;
+ }
+
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+ if (!bypass_en) {
+ int ret;
+ phys_addr_t pcie_base_addr =
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+ ret = iommu_unmap(domain, iova, PAGE_SIZE);
+ if (ret != PAGE_SIZE)
+ PCIE_ERR(dev,
+ "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
+ dev->rc_idx, ret);
+ }
+}
+
+void msm_pcie_destroy_irq(unsigned int irq)
+{
+ int pos;
+ struct pci_dev *pdev = irq_get_chip_data(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+ struct msi_desc *firstentry;
struct msm_pcie_dev_t *dev;
+ u32 nvec;
+ int firstirq;
- if (pcie_dev)
- dev = pcie_dev;
- else
- dev = irq_get_chip_data(irq);
+ if (!pdev) {
+ pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
+ return;
+ }
+ dev = PCIE_BUS_PRIV_DATA(pdev->bus);
if (!dev) {
- pr_err("PCIe: device is null. IRQ:%d\n", irq);
+ pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
return;
}
+ if (!entry) {
+ PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
+ dev->rc_idx, irq);
+ return;
+ }
+
+ firstentry = first_pci_msi_entry(pdev);
+ if (!firstentry) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
+ dev->rc_idx, irq);
+ return;
+ }
+
+ firstirq = firstentry->irq;
+ nvec = (1 << entry->msi_attrib.multiple);
+
if (dev->msi_gicm_addr) {
PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
- for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
- if (irq == dev->msi[i].num)
- break;
- if (i == MSM_PCIE_MAX_MSI) {
+ if (irq < firstirq || irq > firstirq + nvec - 1) {
PCIE_ERR(dev,
"Could not find irq: %d in RC%d MSI table\n",
irq, dev->rc_idx);
return;
} else {
- pos = i;
+ if (irq == firstirq + nvec - 1)
+ msm_pcie_unmap_qgic_addr(dev, pdev);
+ pos = irq - firstirq;
}
} else {
PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
@@ -5620,7 +5671,7 @@ void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
void arch_teardown_msi_irq(unsigned int irq)
{
PCIE_GEN_DBG("irq %d deallocated\n", irq);
- msm_pcie_destroy_irq(irq, NULL);
+ msm_pcie_destroy_irq(irq);
}
void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -5639,7 +5690,7 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
continue;
nvec = 1 << entry->msi_attrib.multiple;
for (i = 0; i < nvec; i++)
- msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
+ arch_teardown_msi_irq(entry->irq + i);
}
}
@@ -5701,6 +5752,7 @@ static int arch_setup_msi_irq_default(struct pci_dev *pdev,
PCIE_DBG(dev, "irq %d allocated\n", irq);
+ irq_set_chip_data(irq, pdev);
irq_set_msi_desc(irq, desc);
/* write msi vector and data */
@@ -5748,10 +5800,64 @@ again:
return irq;
}
+static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev,
+ struct msi_msg *msg)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+ int ret, bypass_en = 0;
+ dma_addr_t iova;
+ phys_addr_t pcie_base_addr, gicm_db_offset;
+
+ msg->address_hi = 0;
+ msg->address_lo = dev->msi_gicm_addr;
+
+ if (!domain) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: client does not have an iommu domain\n",
+ dev->rc_idx);
+ return 0;
+ }
+
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+
+ PCIE_DBG(dev,
+ "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
+ dev->rc_idx, bypass_en ? "bypass" : "enabled",
+ pdev->bus->number, pdev->devfn);
+
+ if (bypass_en)
+ return 0;
+
+ gicm_db_offset = dev->msi_gicm_addr -
+ rounddown(dev->msi_gicm_addr, PAGE_SIZE);
+ /*
+ * Use PCIe DBI address as the IOVA since client cannot
+ * use this address for their IOMMU mapping. This will
+ * prevent any conflicts between PCIe host and
+ * client's mapping.
+ */
+ pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+ ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
+ dev->rc_idx, ret);
+ return -ENOMEM;
+ }
+
+ msg->address_lo = iova + gicm_db_offset;
+
+ return 0;
+}
+
static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
struct msi_desc *desc, int nvec)
{
- int irq, index, firstirq = 0;
+ int irq, index, ret, firstirq = 0;
struct msi_msg msg;
struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
@@ -5768,12 +5874,16 @@ static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
firstirq = irq;
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_chip_data(irq, pdev);
}
/* write msi vector and data */
irq_set_msi_desc(firstirq, desc);
- msg.address_hi = 0;
- msg.address_lo = dev->msi_gicm_addr;
+
+ ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
+ if (ret)
+ return ret;
+
msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
write_msi_msg(firstirq, &msg);
@@ -5845,7 +5955,6 @@ static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c
index 4ca5d5fa0531..3fe41ee4c3c1 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpi.c
@@ -408,13 +408,19 @@ static void lpi_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
static int lpi_notifier_service_cb(struct notifier_block *this,
unsigned long opcode, void *ptr)
{
+ static bool initial_boot = true;
+
pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
switch (opcode) {
case AUDIO_NOTIFIER_SERVICE_DOWN:
+ if (initial_boot)
+ break;
lpi_dev_up = false;
break;
case AUDIO_NOTIFIER_SERVICE_UP:
+ if (initial_boot)
+ initial_boot = false;
lpi_dev_up = true;
break;
default:
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index c2902beaa0b8..08bffc344429 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2824,10 +2824,8 @@ static int msm_gsi_probe(struct platform_device *pdev)
gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
"gsi", 0);
- if (gsi_ctx->ipc_logbuf == NULL) {
- GSIERR("failed to get ipc_logbuf\n");
- return -ENOMEM;
- }
+ if (gsi_ctx->ipc_logbuf == NULL)
+ GSIERR("failed to create IPC log, continue...\n");
gsi_ctx->dev = dev;
init_completion(&gsi_ctx->gen_ee_cmd_compl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index aa681d3eacaa..5b706b6f493b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2253,7 +2253,8 @@ static int ipa3_q6_set_ex_path_to_apps(void)
reg_write.pipeline_clear_options =
IPAHAL_HPS_CLEAR;
reg_write.offset =
- ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+ ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
+ ep_idx);
ipahal_get_status_ep_valmask(
ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
&valmask);
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index 4bce96102525..60e02fcb5e4b 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -95,9 +95,12 @@ struct bhi_ctxt_t {
u32 poll_timeout;
/* BHI/E vector table */
bool manage_boot; /* fw download done by MHI host */
+ bool support_rddm;
struct work_struct fw_load_work;
struct firmware_info firmware_info;
struct bhie_vec_table fw_table;
+ struct bhie_vec_table rddm_table;
+ size_t rddm_size;
};
enum MHI_CHAN_DIR {
@@ -140,12 +143,6 @@ enum MHI_CHAIN {
MHI_TRE_CHAIN_reserved = 0x80000000
};
-enum MHI_EVENT_RING_STATE {
- MHI_EVENT_RING_UINIT = 0x0,
- MHI_EVENT_RING_INIT = 0x1,
- MHI_EVENT_RING_reserved = 0x80000000
-};
-
enum MHI_STATE {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
@@ -154,9 +151,8 @@ enum MHI_STATE {
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
- MHI_STATE_SYS_ERR = 0x8,
- MHI_STATE_LIMIT = 0x9,
- MHI_STATE_reserved = 0x80000000
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_LIMIT,
};
enum MHI_BRSTMODE {
@@ -168,22 +164,36 @@ enum MHI_BRSTMODE {
};
enum MHI_PM_STATE {
- MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
- MHI_PM_POR = 0x1, /* Power On Reset State */
- MHI_PM_M0 = 0x2,
- MHI_PM_M1 = 0x4,
- MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
- MHI_PM_M2 = 0x10,
- MHI_PM_M3_ENTER = 0x20,
- MHI_PM_M3 = 0x40,
- MHI_PM_M3_EXIT = 0x80,
+ MHI_PM_DISABLE = BIT(0), /* MHI is not enabled */
+ MHI_PM_POR = BIT(1), /* Power On Reset State */
+ MHI_PM_M0 = BIT(2),
+ MHI_PM_M1 = BIT(3),
+ MHI_PM_M1_M2_TRANSITION = BIT(4), /* Register access not allowed */
+ MHI_PM_M2 = BIT(5),
+ MHI_PM_M3_ENTER = BIT(6),
+ MHI_PM_M3 = BIT(7),
+ MHI_PM_M3_EXIT = BIT(8),
+ MHI_PM_SYS_ERR_DETECT = BIT(9),
+ MHI_PM_SYS_ERR_PROCESS = BIT(10),
+ MHI_PM_SHUTDOWN_PROCESS = BIT(11),
+ MHI_PM_LD_ERR_FATAL_DETECT = BIT(12), /* Link not accessible */
+ MHI_PM_SSR_PENDING = BIT(13)
+};
+
+struct mhi_pm_transitions {
+ enum MHI_PM_STATE from_state;
+ u32 to_states;
};
#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
MHI_PM_M1 | MHI_PM_M2))
-#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
- (pm_state < MHI_PM_M3_EXIT))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M1 | MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS)))
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ pm_state >= MHI_PM_SYS_ERR_DETECT)
struct __packed mhi_event_ctxt {
u32 mhi_intmodt;
u32 mhi_event_er_type;
@@ -239,7 +249,6 @@ enum MHI_PKT_TYPE {
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
MHI_PKT_TYPE_STALE_EVENT, /* Internal event */
- MHI_PKT_TYPE_SYS_ERR_EVENT = 0xFF,
};
struct __packed mhi_tx_pkt {
@@ -393,7 +402,8 @@ enum STATE_TRANSITION {
STATE_TRANSITION_LINK_DOWN,
STATE_TRANSITION_WAKE,
STATE_TRANSITION_BHIE,
- STATE_TRANSITION_SYS_ERR,
+ STATE_TRANSITION_RDDM,
+ STATE_TRANSITION_SYS_ERR = MHI_STATE_SYS_ERR,
STATE_TRANSITION_MAX
};
@@ -402,7 +412,8 @@ enum MHI_EXEC_ENV {
MHI_EXEC_ENV_SBL = 0x1,
MHI_EXEC_ENV_AMSS = 0x2,
MHI_EXEC_ENV_BHIE = 0x3,
- MHI_EXEC_ENV_reserved = 0x80000000
+ MHI_EXEC_ENV_RDDM = 0x4,
+ MHI_EXEC_ENV_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
};
struct mhi_chan_info {
@@ -480,7 +491,7 @@ struct mhi_counters {
};
struct mhi_flags {
- u32 mhi_initialized;
+ bool mhi_initialized;
u32 link_up;
bool bb_required;
};
@@ -546,6 +557,7 @@ struct mhi_device_ctxt {
struct mhi_event_ring_cfg *ev_ring_props;
struct work_struct st_thread_worker;
struct work_struct process_m1_worker;
+ struct work_struct process_sys_err_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
@@ -587,7 +599,8 @@ struct mhi_device_ctxt {
void (*assert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_set);
void (*deassert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt);
-
+ void (*status_cb)(enum MHI_CB_REASON, void *priv);
+ void *priv_data; /* private data for bus master */
struct completion cmd_complete;
};
@@ -612,7 +625,6 @@ struct mhi_event_ring_cfg {
*/
u32 priority;
enum MHI_RING_CLASS class;
- enum MHI_EVENT_RING_STATE state;
irqreturn_t (*mhi_handler_ptr)(int , void *);
};
#define MHI_EV_PRIORITY_TASKLET (1)
@@ -673,13 +685,12 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt **cmd_pkt, u32 event_index);
int parse_cmd_event(struct mhi_device_ctxt *ctxt,
union mhi_event_pkt *event, u32 event_index);
-int mhi_test_for_device_ready(
- struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_test_for_device_reset(
- struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
void mhi_state_change_worker(struct work_struct *work);
+void mhi_sys_err_worker(struct work_struct *work);
int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt);
@@ -709,7 +720,7 @@ int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt, bool graceful);
int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
@@ -757,5 +768,13 @@ void mhi_ev_task(unsigned long data);
void process_event_ring(struct work_struct *work);
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(struct mhi_device_ctxt *,
+ enum MHI_PM_STATE);
+void mhi_reset_chan(struct mhi_device_ctxt *mhi_dev_ctxt, int chan);
+void free_tre_ring(struct mhi_device_ctxt *mhi_dev_ctxt, int chan);
+void process_disable_transition(enum MHI_PM_STATE transition_state,
+ struct mhi_device_ctxt *mhi_dev_ctxt);
+bool mhi_in_sys_err(struct mhi_device_ctxt *mhi_dev_ctxt);
+void bhi_exit(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 0cc8967757ec..3bc8205b5f0f 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -137,17 +137,36 @@ static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
-/* Load firmware via bhie protocol */
-static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
+/* transfer firmware or ramdump via bhie protocol */
+static int bhi_bhie_transfer(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct bhie_vec_table *vec_table,
+ bool tx_vec_table)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
- struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ /* last element is the vector table */
const struct bhie_mem_info *bhie_mem_info =
- &fw_table->bhie_mem_info[fw_table->segment_count - 1];
+ &vec_table->bhie_mem_info[vec_table->segment_count - 1];
u32 val;
- const u32 tx_sequence = fw_table->sequence++;
+ const u32 tx_sequence = vec_table->sequence++;
unsigned long timeout;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned bhie_vecaddr_high_offs, bhie_vecaddr_low_offs,
+ bhie_vecsize_offs, bhie_vecdb_offs,
+ bhie_vecstatus_offs;
+
+ if (tx_vec_table) {
+ bhie_vecaddr_high_offs = BHIE_TXVECADDR_HIGH_OFFS;
+ bhie_vecaddr_low_offs = BHIE_TXVECADDR_LOW_OFFS;
+ bhie_vecsize_offs = BHIE_TXVECSIZE_OFFS;
+ bhie_vecdb_offs = BHIE_TXVECDB_OFFS;
+ bhie_vecstatus_offs = BHIE_TXVECSTATUS_OFFS;
+ } else {
+ bhie_vecaddr_high_offs = BHIE_RXVECADDR_HIGH_OFFS;
+ bhie_vecaddr_low_offs = BHIE_RXVECADDR_LOW_OFFS;
+ bhie_vecsize_offs = BHIE_RXVECSIZE_OFFS;
+ bhie_vecdb_offs = BHIE_RXVECDB_OFFS;
+ bhie_vecstatus_offs = BHIE_RXVECSTATUS_OFFS;
+ }
/* Program TX/RX Vector table */
read_lock_bh(pm_xfer_lock);
@@ -157,27 +176,17 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
}
val = HIGH_WORD(bhie_mem_info->phys_addr);
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECADDR_HIGH_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
+ bhie_vecaddr_high_offs, val);
val = LOW_WORD(bhie_mem_info->phys_addr);
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECADDR_LOW_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
+ bhie_vecaddr_low_offs, val);
val = (u32)bhie_mem_info->size;
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECSIZE_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, bhie_vecsize_offs, val);
/* Ring DB to begin Xfer */
- mhi_reg_write_field(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECDB_OFFS,
- BHIE_TXVECDB_SEQNUM_BMSK,
- BHIE_TXVECDB_SEQNUM_SHFT,
+ mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, bhie_vecdb_offs,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
tx_sequence);
read_unlock_bh(pm_xfer_lock);
@@ -190,10 +199,10 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
read_unlock_bh(pm_xfer_lock);
return -EIO;
}
- val = mhi_reg_read(bhi_ctxt->bhi_base, BHIE_TXVECSTATUS_OFFS);
+ val = mhi_reg_read(bhi_ctxt->bhi_base, bhie_vecstatus_offs);
read_unlock_bh(pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "TXVEC_STATUS:0x%x\n", val);
+ "%sVEC_STATUS:0x%x\n", tx_vec_table ? "TX" : "RX", val);
current_seq = (val & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
BHIE_TXVECSTATUS_SEQNUM_SHFT;
status = (val & BHIE_TXVECSTATUS_STATUS_BMSK) >>
@@ -201,17 +210,60 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
if ((status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
(current_seq == tx_sequence)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Image transfer complete\n");
+ "%s transfer complete\n",
+ tx_vec_table ? "image" : "rddm");
return 0;
}
msleep(BHI_POLL_SLEEP_TIME_MS);
}
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Error xfering image via BHIE\n");
+ "Error xfer %s via BHIE\n", tx_vec_table ? "image" : "rddm");
return -EIO;
}
+static int bhi_rddm_graceful(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int ret;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+ enum MHI_EXEC_ENV exec_env = mhi_dev_ctxt->dev_exec_env;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with pm_state:0x%x exec_env:0x%x mhi_state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state, exec_env,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ if (exec_env != MHI_EXEC_ENV_RDDM) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Not in RDDM exec env, exec_env:0x%x\n", exec_env);
+ return -EIO;
+ }
+
+ ret = bhi_bhie_transfer(mhi_dev_ctxt, rddm_table, false);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "rddm transfer status:%d\n", ret);
+ return ret;
+}
+
+/* collect ramdump from device using bhie protocol */
+int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+
+ if (!rddm_table->bhie_mem_info) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "RDDM table == NULL\n");
+ return -ENOMEM;
+ }
+
+ if (!in_panic)
+ return bhi_rddm_graceful(mhi_dev_ctxt);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "RDDM collection in panic not yet supported\n");
+ return -EINVAL;
+}
+
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
@@ -425,7 +477,8 @@ void bhi_firmware_download(struct work_struct *work)
return;
}
- ret = bhi_load_bhie_firmware(mhi_dev_ctxt);
+ ret = bhi_bhie_transfer(mhi_dev_ctxt, &mhi_dev_ctxt->bhi_ctxt.fw_table,
+ true);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Load amss firmware\n");
@@ -437,6 +490,7 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
const struct firmware *firmware;
struct scatterlist *itr;
int ret, i;
@@ -503,7 +557,75 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
fw_table->sequence++;
release_firmware(firmware);
+ /* allocate memory and setup rddm table */
+ if (bhi_ctxt->support_rddm) {
+ ret = bhi_alloc_bhie_xfer(mhi_dev_ctxt, bhi_ctxt->rddm_size,
+ rddm_table);
+ if (!ret) {
+ for (i = 0, itr = &rddm_table->sg_list[1];
+ i < rddm_table->segment_count - 1; i++, itr++) {
+ size_t size = rddm_table->bhie_mem_info[i].size;
+
+ rddm_table->bhi_vec_entry[i].phys_addr =
+ rddm_table->bhie_mem_info[i].phys_addr;
+ rddm_table->bhi_vec_entry[i].size = size;
+ sg_set_buf(itr, rddm_table->
+ bhie_mem_info[i].aligned, size);
+ sg_dma_address(itr) =
+ rddm_table->bhie_mem_info[i].phys_addr;
+ sg_dma_len(itr) = size;
+ }
+ rddm_table->sequence++;
+ } else {
+ /* out of memory for rddm, not fatal error */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Could not successfully allocate mem for rddm\n");
+ }
+ }
+
/* Schedule a worker thread and wait for BHI Event */
schedule_work(&bhi_ctxt->fw_load_work);
return 0;
}
+
+void bhi_exit(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+ struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
+ struct bhie_mem_info *bhie_mem_info;
+ int i;
+
+ if (bhi_ctxt->manage_boot == false)
+ return;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "freeing firmware and rddm memory\n");
+
+ /* free memory allocated for firmware */
+ kfree(fw_table->sg_list);
+ fw_table->sg_list = NULL;
+ bhie_mem_info = fw_table->bhie_mem_info;
+ for (i = 0; i < fw_table->segment_count; i++, bhie_mem_info++)
+ dma_free_coherent(dev, bhie_mem_info->alloc_size,
+ bhie_mem_info->pre_aligned,
+ bhie_mem_info->dma_handle);
+ fw_table->bhie_mem_info = NULL;
+ /* vector table is the last entry in bhie_mem_info */
+ fw_table->bhi_vec_entry = NULL;
+
+ if (!rddm_table->bhie_mem_info)
+ return;
+
+ /* free memory allocated for rddm */
+ kfree(rddm_table->sg_list);
+ rddm_table->sg_list = NULL;
+ bhie_mem_info = rddm_table->bhie_mem_info;
+ for (i = 0; i < rddm_table->segment_count; i++, bhie_mem_info++)
+ dma_free_coherent(dev, bhie_mem_info->alloc_size,
+ bhie_mem_info->pre_aligned,
+ bhie_mem_info->dma_handle);
+ rddm_table->bhie_mem_info = NULL;
+ rddm_table->bhi_vec_entry = NULL;
+}
diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h
index 15137ba5dfdf..8f7b3d69347c 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.h
+++ b/drivers/platform/msm/mhi/mhi_bhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -90,5 +90,6 @@
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
void bhi_firmware_download(struct work_struct *work);
+int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index ae677bae63dc..ea324339eac7 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -226,8 +226,7 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
if (GET_EV_PROPS(EV_TYPE,
- mhi_dev_ctxt->ev_ring_props[i].flags) == type &&
- !mhi_dev_ctxt->ev_ring_props[i].state) {
+ mhi_dev_ctxt->ev_ring_props[i].flags) == type) {
ret_val = mhi_init_local_event_ring(mhi_dev_ctxt,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
i);
@@ -292,7 +291,6 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
break;
}
}
- mhi_dev_ctxt->ev_ring_props[ring_index].state = MHI_EVENT_RING_INIT;
spin_unlock_irqrestore(lock, flags);
return ret_val;
}
@@ -309,6 +307,7 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[index];
+ spin_lock_irq(&local_ev_ctxt->ring_lock);
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
local_ev_ctxt->rp = local_ev_ctxt->base;
@@ -317,6 +316,5 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
- /* Flush writes to MMIO */
- wmb();
+ spin_unlock_irq(&local_ev_ctxt->ring_lock);
}
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index f1c562974816..64a09a2f9fbb 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -189,6 +189,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
+ INIT_WORK(&mhi_dev_ctxt->process_sys_err_worker, mhi_sys_err_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index 9aa9aeb7e646..70e4393f2f59 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -23,16 +23,18 @@ static int mhi_process_event_ring(
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
union mhi_event_pkt event_to_process;
- int ret_val = 0;
+ int count = 0;
struct mhi_event_ctxt *ev_ctxt = NULL;
unsigned long flags;
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
- mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "enter ev_index:%u\n", ev_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter ev_index:%u\n", ev_index);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_dev_ctxt->mhi_pm_state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "No event access, PM_STATE:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EIO;
}
@@ -98,6 +100,7 @@ static int mhi_process_event_ring(
{
u32 chan;
struct mhi_ring *ring;
+ unsigned long flags;
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
@@ -107,12 +110,12 @@ static int mhi_process_event_ring(
break;
}
ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- spin_lock_bh(&ring->ring_lock);
+ spin_lock_irqsave(&ring->ring_lock, flags);
if (ring->ch_state == MHI_CHAN_STATE_ENABLED)
parse_xfer_event(mhi_dev_ctxt,
&event_to_process,
ev_index);
- spin_unlock_bh(&ring->ring_lock);
+ spin_unlock_irqrestore(&ring->ring_lock, flags);
__pm_relax(&mhi_dev_ctxt->w_lock);
event_quota--;
break;
@@ -136,18 +139,41 @@ static int mhi_process_event_ring(
mhi_dev_ctxt->mhi_state =
mhi_get_m_state(mhi_dev_ctxt);
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
- mhi_dev_ctxt->counters.m0_m1++;
- schedule_work(&mhi_dev_ctxt->
- process_m1_worker);
+ enum MHI_PM_STATE state;
+
+ state = mhi_tryset_pm_state
+ (mhi_dev_ctxt, MHI_PM_M1);
+ if (state == MHI_PM_M1) {
+ mhi_dev_ctxt->counters.m0_m1++;
+ schedule_work
+ (&mhi_dev_ctxt->
+ process_m1_worker);
+ }
}
write_unlock_irqrestore(&mhi_dev_ctxt->
- pm_xfer_lock,
- flags);
+ pm_xfer_lock, flags);
break;
case STATE_TRANSITION_M3:
process_m3_transition(mhi_dev_ctxt);
break;
+ case STATE_TRANSITION_SYS_ERR:
+ {
+ enum MHI_PM_STATE new_state;
+ unsigned long flags;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected\n");
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ new_state = mhi_tryset_pm_state
+ (mhi_dev_ctxt, MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irqrestore
+ (&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_dev_ctxt->
+ process_sys_err_worker);
+ break;
+ }
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported STE received ring 0x%x State:%s\n",
@@ -158,28 +184,36 @@ static int mhi_process_event_ring(
}
case MHI_PKT_TYPE_EE_EVENT:
{
- enum STATE_TRANSITION new_state;
+ enum STATE_TRANSITION new_state = 0;
+ enum MHI_EXEC_ENV event =
+ MHI_READ_EXEC_ENV(&event_to_process);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI EEE received ring 0x%x\n", ev_index);
+ "MHI EE received ring 0x%x event:0x%x\n",
+ ev_index, event);
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- switch (MHI_READ_EXEC_ENV(&event_to_process)) {
+ switch (event) {
case MHI_EXEC_ENV_SBL:
new_state = STATE_TRANSITION_SBL;
- mhi_init_state_transition(mhi_dev_ctxt,
- new_state);
break;
case MHI_EXEC_ENV_AMSS:
new_state = STATE_TRANSITION_AMSS;
- mhi_init_state_transition(mhi_dev_ctxt,
- new_state);
break;
case MHI_EXEC_ENV_BHIE:
new_state = STATE_TRANSITION_BHIE;
+ break;
+ case MHI_EXEC_ENV_RDDM:
+ new_state = STATE_TRANSITION_RDDM;
+ break;
+ default:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Invalid EE Event 0x%x received\n",
+ event);
+ }
+ if (new_state)
mhi_init_state_transition(mhi_dev_ctxt,
new_state);
- }
break;
}
case MHI_PKT_TYPE_STALE_EVENT:
@@ -187,11 +221,6 @@ static int mhi_process_event_ring(
"Stale Event received for chan:%u\n",
MHI_EV_READ_CHID(EV_CHID, local_rp));
break;
- case MHI_PKT_TYPE_SYS_ERR_EVENT:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI System Error Detected. Triggering Reset\n");
- BUG();
- break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
@@ -207,13 +236,13 @@ static int mhi_process_event_ring(
ev_index,
ev_ctxt->mhi_event_read_ptr);
spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
- ret_val = 0;
+ count++;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "exit ev_index:%u\n", ev_index);
- return ret_val;
+ return count;
}
void mhi_ev_task(unsigned long data)
@@ -222,10 +251,40 @@ void mhi_ev_task(unsigned long data)
struct mhi_device_ctxt *mhi_dev_ctxt =
mhi_ring->mhi_dev_ctxt;
int ev_index = mhi_ring->index;
+ const int CTRL_EV = 0; /* event ring for ctrl events */
+ int ret;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
+
/* Process event ring */
- mhi_process_event_ring(mhi_dev_ctxt, ev_index, U32_MAX);
+ ret = mhi_process_event_ring(mhi_dev_ctxt, ev_index, U32_MAX);
+ /*
+ * If we received MSI for primary event ring with no events to process
+ * check status register to see if device enter SYSERR status
+ */
+ if (ev_index == CTRL_EV && !ret) {
+ bool in_sys_err = false;
+ unsigned long flags;
+ enum MHI_PM_STATE new_state;
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ in_sys_err = mhi_in_sys_err(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ if (in_sys_err) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected\n");
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ new_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_dev_ctxt->
+ process_sys_err_worker);
+ }
+ }
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
@@ -258,7 +317,7 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
ret_val = mhi_process_event_ring(client_config->mhi_dev_ctxt,
client_config->event_ring_index,
1);
- if (ret_val)
+ if (ret_val < 0)
mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
"NAPI failed to process event ring\n");
return &(client_config->result);
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index 644004672cd2..46baf7332900 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -30,11 +30,6 @@
#include "mhi_bhi.h"
#include "mhi_trace.h"
-static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt);
-static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
- struct mhi_ring *bb_ctxt);
-
static int enable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt,
int nr_el,
@@ -306,6 +301,47 @@ static int populate_tre_ring(struct mhi_client_config *client_config)
return 0;
}
+void mhi_notify_client(struct mhi_client_handle *client_handle,
+ enum MHI_CB_REASON reason)
+{
+ struct mhi_cb_info cb_info = {0};
+ struct mhi_result result = {0};
+ struct mhi_client_config *client_config;
+
+ cb_info.result = NULL;
+ cb_info.cb_reason = reason;
+
+ if (client_handle == NULL)
+ return;
+
+ client_config = client_handle->client_config;
+
+ if (client_config->client_info.mhi_client_cb) {
+ result.user_data = client_config->user_data;
+ cb_info.chan = client_config->chan_info.chan_nr;
+ cb_info.result = &result;
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "Calling back for chan %d, reason %d\n",
+ cb_info.chan,
+ reason);
+ client_config->client_info.mhi_client_cb(&cb_info);
+ }
+}
+
+void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_CB_REASON reason)
+{
+ int i;
+ struct mhi_client_handle *client_handle = NULL;
+
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+ if (VALID_CHAN_NR(i)) {
+ client_handle = mhi_dev_ctxt->client_handle_list[i];
+ mhi_notify_client(client_handle, reason);
+ }
+ }
+}
+
int mhi_open_channel(struct mhi_client_handle *client_handle)
{
int ret_val = 0;
@@ -389,10 +425,10 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ret_val = 0;
}
- spin_lock(&cfg->event_lock);
+ spin_lock_irq(&cfg->event_lock);
cmd_event_pkt = cfg->cmd_event_pkt;
cmd_pkt = cfg->cmd_pkt;
- spin_unlock(&cfg->event_lock);
+ spin_unlock_irq(&cfg->event_lock);
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
@@ -628,10 +664,7 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
}
error_completion:
- ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
- if (ret_val)
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Error resetting cmd ret:%d\n", ret_val);
+ mhi_reset_chan(mhi_dev_ctxt, chan);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
@@ -1391,11 +1424,8 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
}
-static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt)
+void mhi_reset_chan(struct mhi_device_ctxt *mhi_dev_ctxt, int chan)
{
- u32 chan = 0;
- int ret_val = 0;
struct mhi_ring *local_chan_ctxt;
struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
@@ -1405,14 +1435,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-
- if (!VALID_CHAN_NR(chan)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Bad channel number for CCE\n");
- return -EINVAL;
- }
-
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
@@ -1420,7 +1442,7 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
ev_ctxt = &mhi_dev_ctxt->
dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Processed cmd reset event\n");
+ "Marking all events for chan:%d as stale\n", chan);
/* Clear all stale events related to Channel */
spin_lock_irqsave(&ev_ring->ring_lock, flags);
@@ -1483,7 +1505,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
- return ret_val;
}
enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1510,11 +1531,11 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_tx_pkt *pending_trb = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct mhi_ring *local_chan_ctxt = NULL;
- struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
struct mhi_client_config *client_config;
- int chan = 0, r = 0;
+ int chan = 0, r = -EIO;
+ unsigned long flags;
if (!client_handle || !result)
return -EINVAL;
@@ -1525,36 +1546,38 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
chan = client_config->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- mutex_lock(&cfg->chan_lock);
- if (bb_ctxt->rp != bb_ctxt->ack_rp) {
- pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
- result->flags = pending_trb->info;
- bb = bb_ctxt->ack_rp;
- if (bb->bb_active) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
- "Bounce buffer active chan %d, copying data\n",
- chan);
+ spin_lock_irqsave(&local_chan_ctxt->ring_lock, flags);
+ if (local_chan_ctxt->ch_state == MHI_CHAN_STATE_ENABLED) {
+ if (bb_ctxt->rp != bb_ctxt->ack_rp) {
+ pending_trb =
+ (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
+ result->flags = pending_trb->info;
+ bb = bb_ctxt->ack_rp;
+ if (bb->bb_active) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Bounce buffer active chan %d, copying data\n",
+ chan);
+ }
+ result->buf_addr = bb->client_buf;
+ result->bytes_xferd = bb->filled_size;
+ result->transaction_status = 0;
+ r = delete_element(local_chan_ctxt,
+ &local_chan_ctxt->ack_rp,
+ &local_chan_ctxt->rp, NULL);
+ WARN_ON(r);
+ r = delete_element(bb_ctxt,
+ &bb_ctxt->ack_rp,
+ &bb_ctxt->rp, NULL);
+ WARN_ON(r);
+ } else {
+ result->buf_addr = 0;
+ result->bytes_xferd = 0;
+ r = -ENODATA;
}
- result->buf_addr = bb->client_buf;
- result->bytes_xferd = bb->filled_size;
- result->transaction_status = 0;
- r = delete_element(local_chan_ctxt,
- &local_chan_ctxt->ack_rp,
- &local_chan_ctxt->rp, NULL);
- BUG_ON(r);
- r = delete_element(bb_ctxt,
- &bb_ctxt->ack_rp,
- &bb_ctxt->rp, NULL);
- BUG_ON(r);
- } else {
- result->buf_addr = 0;
- result->bytes_xferd = 0;
- r = -ENODATA;
}
- mutex_unlock(&cfg->chan_lock);
+ spin_unlock_irqrestore(&local_chan_ctxt->ring_lock, flags);
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
@@ -1647,9 +1670,10 @@ void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
if (unlikely(force_set)) {
spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
atomic_inc(&mhi_dev_ctxt->counters.device_wake);
- mhi_write_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 1);
+ if (MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB, 1);
spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
} else {
if (likely(atomic_add_unless(&mhi_dev_ctxt->
@@ -1744,7 +1768,7 @@ EXPORT_SYMBOL(mhi_deregister_channel);
int mhi_register_device(struct mhi_device *mhi_device,
const char *node_name,
- unsigned long user_data)
+ void *user_data)
{
const struct device_node *of_node;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
@@ -1793,6 +1817,7 @@ int mhi_register_device(struct mhi_device *mhi_device,
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
+ INIT_WORK(&mhi_dev_ctxt->process_sys_err_worker, mhi_sys_err_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
@@ -1828,11 +1853,15 @@ int mhi_register_device(struct mhi_device *mhi_device,
if (!core_info->bar0_base || !core_info->irq_base)
return -EINVAL;
+ if (mhi_device->support_rddm && !mhi_device->rddm_size)
+ return -EINVAL;
mhi_dev_ctxt->bus_master_rt_get = mhi_device->pm_runtime_get;
- mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_noidle;
- if (!mhi_dev_ctxt->bus_master_rt_get ||
- !mhi_dev_ctxt->bus_master_rt_put)
+ mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_put_noidle;
+ mhi_dev_ctxt->status_cb = mhi_device->status_cb;
+ mhi_dev_ctxt->priv_data = user_data;
+ if (!mhi_dev_ctxt->bus_master_rt_get || !mhi_dev_ctxt->bus_master_rt_put
+ || !mhi_dev_ctxt->status_cb)
return -EINVAL;
ret = mhi_ctxt_init(mhi_dev_ctxt);
@@ -1849,12 +1878,44 @@ int mhi_register_device(struct mhi_device *mhi_device,
mhi_dev_ctxt->runtime_get = mhi_slave_mode_runtime_get;
mhi_dev_ctxt->runtime_put = mhi_slave_mode_runtime_put;
mhi_device->mhi_dev_ctxt = mhi_dev_ctxt;
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
+ /* Store RDDM information */
+ if (mhi_device->support_rddm) {
+ mhi_dev_ctxt->bhi_ctxt.support_rddm = true;
+ mhi_dev_ctxt->bhi_ctxt.rddm_size = mhi_device->rddm_size;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device support rddm of size:0x%lx bytes\n",
+ mhi_dev_ctxt->bhi_ctxt.rddm_size);
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
return 0;
}
EXPORT_SYMBOL(mhi_register_device);
+int mhi_xfer_rddm(struct mhi_device *mhi_device, enum mhi_rddm_segment seg,
+ struct scatterlist **sg_list)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ int segments = 0;
+
+ *sg_list = NULL;
+ switch (seg) {
+ case MHI_RDDM_FW_SEGMENT:
+ *sg_list = bhi_ctxt->fw_table.sg_list;
+ segments = bhi_ctxt->fw_table.segment_count;
+ break;
+ case MHI_RDDM_RD_SEGMENT:
+ *sg_list = bhi_ctxt->rddm_table.sg_list;
+ segments = bhi_ctxt->rddm_table.segment_count;
+ break;
+ }
+ return segments;
+}
+EXPORT_SYMBOL(mhi_xfer_rddm);
+
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan,
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index d7a4f7aa93ef..caa34eadf8ea 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,6 +62,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_m3)
{
int r = 0;
+ enum MHI_PM_STATE new_state;
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
@@ -79,13 +80,20 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
}
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake) &&
- force_m3 == false)){
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Busy, Aborting M3\n");
+ force_m3 == false)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Busy, Aborting M3\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EBUSY;
}
+ if (unlikely(!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error, no register access, PM_STATE:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
+
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
@@ -93,7 +101,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (!r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M0||M1 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -102,7 +110,14 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Allowing M3 State\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+ new_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3_ENTER);
+ if (unlikely(new_state != MHI_PM_M3_ENTER)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error setting PM_STATE from 0x%x to 0x%x\n",
+ new_state, MHI_PM_M3_ENTER);
+ return -EIO;
+ }
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
@@ -110,7 +125,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
if (!r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M3 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -122,6 +137,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State:0x%x %s\n",
@@ -129,11 +145,16 @@ static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3_EXIT);
+ if (unlikely(cur_state != MHI_PM_M3_EXIT)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error setting PM_STATE from 0x%x to 0x%x\n",
+ cur_state, MHI_PM_M3_EXIT);
+ return -EAGAIN;
+ }
/* Set and wait for M0 Event */
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
@@ -164,7 +185,7 @@ int mhi_runtime_suspend(struct device *dev)
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
- r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+ r = mhi_turn_off_pcie_link(mhi_dev_ctxt, true);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Turn off link ret:%d\n", r);
@@ -294,6 +315,21 @@ unlock_pm_lock:
return ret_val;
}
+static void mhi_pm_slave_mode_power_off(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with pm_state:0x%x MHI_STATE:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI already in disabled state\n");
+ return;
+ }
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS, mhi_dev_ctxt);
+}
+
static int mhi_pm_slave_mode_suspend(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r;
@@ -367,7 +403,7 @@ ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
return count;
}
-int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
+int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt, bool graceful)
{
struct pci_dev *pcie_dev;
int r = 0;
@@ -376,22 +412,23 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dev = mhi_dev_ctxt->pcie_device;
if (0 == mhi_dev_ctxt->flags.link_up) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Link already marked as down, nothing to do\n");
goto exit;
}
- r = pci_save_state(pcie_dev);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to save pcie state ret: %d\n", r);
- }
- mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
- pci_disable_device(pcie_dev);
- r = pci_set_power_state(pcie_dev, PCI_D3hot);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Failed to set pcie power state to D3hot ret:%d\n", r);
+ if (graceful) {
+ r = pci_save_state(pcie_dev);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to save pcie state ret: %d\n", r);
+ mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
+ pci_disable_device(pcie_dev);
+ r = pci_set_power_state(pcie_dev, PCI_D3hot);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to set pcie power state to D3hot ret:%d\n",
+ r);
}
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
@@ -430,21 +467,26 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Could not set bus frequency ret: %d\n", r);
- r = msm_pcie_pm_control(MSM_PCIE_RESUME,
- pcie_dev->bus->number,
- pcie_dev,
- NULL,
- 0);
+ r = msm_pcie_pm_control(MSM_PCIE_RESUME, pcie_dev->bus->number,
+ pcie_dev, NULL, 0);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to resume pcie bus ret %d\n", r);
goto exit;
}
+ r = pci_set_power_state(pcie_dev, PCI_D0);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set PCI_D0 state ret:%d\n", r);
+ goto exit;
+ }
r = pci_enable_device(pcie_dev);
- if (r)
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Failed to enable device ret:%d\n", r);
+ goto exit;
+ }
pci_load_and_free_saved_state(pcie_dev,
&mhi_dev_ctxt->core.pcie_state);
@@ -457,6 +499,44 @@ exit:
return r;
}
+void mhi_link_state_cb(struct msm_pcie_notify *notify)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+
+ if (!notify || !notify->data) {
+ pr_err("%s: incomplete handle received\n", __func__);
+ return;
+ }
+
+ mhi_dev_ctxt = notify->data;
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_LINKDOWN:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKDOWN\n");
+ break;
+ case MSM_PCIE_EVENT_LINKUP:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKUP\n");
+ mhi_dev_ctxt->counters.link_up_cntr++;
+ break;
+ case MSM_PCIE_EVENT_WAKEUP:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_WAKE\n");
+ __pm_stay_awake(&mhi_dev_ctxt->w_lock);
+ __pm_relax(&mhi_dev_ctxt->w_lock);
+
+ if (mhi_dev_ctxt->flags.mhi_initialized) {
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ }
+ break;
+ default:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received bad link event\n");
+ return;
+ }
+}
+
int mhi_pm_control_device(struct mhi_device *mhi_device,
enum mhi_dev_ctrl ctrl)
{
@@ -477,9 +557,34 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
return mhi_pm_slave_mode_suspend(mhi_dev_ctxt);
case MHI_DEV_CTRL_RESUME:
return mhi_pm_slave_mode_resume(mhi_dev_ctxt);
- default:
+ case MHI_DEV_CTRL_POWER_OFF:
+ mhi_pm_slave_mode_power_off(mhi_dev_ctxt);
+ break;
+ case MHI_DEV_CTRL_RDDM:
+ return bhi_rddm(mhi_dev_ctxt, false);
+ case MHI_DEV_CTRL_DE_INIT:
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
+ mhi_dev_ctxt);
+ bhi_exit(mhi_dev_ctxt);
+ break;
+ case MHI_DEV_CTRL_NOTIFY_LINK_ERROR:
+ {
+ enum MHI_PM_STATE cur_state;
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
break;
}
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
EXPORT_SYMBOL(mhi_pm_control_device);
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index 22481dede21a..9f18b1e7ef85 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -13,12 +13,8 @@
#include <linux/pm_runtime.h>
#include <mhi_sys.h>
#include <mhi.h>
-#include <mhi_bhi.h>
-#include <mhi_hwio.h>
-
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
-
#include <linux/esoc_client.h>
static int mhi_ssr_notify_cb(struct notifier_block *nb,
@@ -26,35 +22,45 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb,
{
struct mhi_device_ctxt *mhi_dev_ctxt =
container_of(nb, struct mhi_device_ctxt, mhi_ssr_nb);
+ enum MHI_PM_STATE cur_state;
+ struct notif_data *notif_data = (struct notif_data *)data;
+ bool crashed = notif_data->crashed;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received ESOC notifcation:%lu crashed:%d\n", action, crashed);
switch (action) {
- case SUBSYS_BEFORE_POWERUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event BEFORE_POWERUP\n");
- break;
- case SUBSYS_AFTER_POWERUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event AFTER_POWERUP\n");
- break;
- case SUBSYS_POWERUP_FAILURE:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event POWERUP_FAILURE\n");
- break;
case SUBSYS_BEFORE_SHUTDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event BEFORE_SHUTDOWN\n");
+ /*
+ * update internal states only, we'll clean up MHI context
+ * after device shutdown completely.
+ */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
break;
case SUBSYS_AFTER_SHUTDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event AFTER_SHUTDOWN\n");
- break;
- case SUBSYS_RAMDUMP_NOTIFICATION:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event RAMDUMP\n");
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
+ mhi_dev_ctxt);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_SSR_PENDING);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ if (unlikely(cur_state != MHI_PM_SSR_PENDING))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_SSR_PENDING, cur_state);
break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received ESOC notifcation %d, NOT handling\n",
- (int)action);
+ "Not handling esoc notification:%lu\n", action);
break;
}
return NOTIFY_OK;
@@ -91,128 +97,242 @@ int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
return ret_val;
}
-void mhi_notify_client(struct mhi_client_handle *client_handle,
- enum MHI_CB_REASON reason)
+/* handles sys_err, and shutdown transition */
+void process_disable_transition(enum MHI_PM_STATE transition_state,
+ struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct mhi_cb_info cb_info = {0};
- struct mhi_result result = {0};
- struct mhi_client_config *client_config;
+ enum MHI_PM_STATE cur_state, prev_state;
+ struct mhi_client_handle *client_handle;
+ struct mhi_ring *ch_ring, *bb_ring, *cmd_ring;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan_cfg *chan_cfg;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ enum MHI_CB_REASON reason;
+ u32 timeout = mhi_dev_ctxt->poll_reset_timeout_ms;
+ int i;
+ int ret;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enter with pm_state:0x%x MHI_STATE:%s transition_state:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ transition_state);
- cb_info.result = NULL;
- cb_info.cb_reason = reason;
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(pm_xfer_lock);
+ prev_state = mhi_dev_ctxt->mhi_pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, transition_state);
+ if (cur_state == transition_state) {
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_DISABLE_TRANSITION;
+ mhi_dev_ctxt->flags.mhi_initialized = false;
+ }
+ write_unlock_irq(pm_xfer_lock);
- if (client_handle == NULL)
+ /* Not handling sys_err, could be middle of shut down */
+ if (unlikely(cur_state != transition_state)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ transition_state, cur_state);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
return;
+ }
- client_config = client_handle->client_config;
-
- if (client_config->client_info.mhi_client_cb) {
- result.user_data = client_config->user_data;
- cb_info.chan = client_config->chan_info.chan_nr;
- cb_info.result = &result;
- mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
- "Calling back for chan %d, reason %d\n",
- cb_info.chan,
- reason);
- client_config->client_info.mhi_client_cb(&cb_info);
+ /*
+ * If we're shutting down trigger device into MHI reset
+ * so we can gurantee device will not access host DDR
+ * during reset
+ */
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS &&
+ MHI_REG_ACCESS_VALID(prev_state)) {
+ read_lock_bh(pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
+ read_unlock_bh(pm_xfer_lock);
+ mhi_test_for_device_reset(mhi_dev_ctxt);
}
-}
-void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
- enum MHI_CB_REASON reason)
-{
- int i;
- struct mhi_client_handle *client_handle = NULL;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for all pending event ring processing to complete\n");
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+ tasklet_kill(&mhi_dev_ctxt->mhi_local_event_ctxt[i].ev_task);
+ flush_work(&mhi_dev_ctxt->mhi_local_event_ctxt[i].ev_worker);
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying all clients and resetting channels\n");
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- if (VALID_CHAN_NR(i)) {
- client_handle = mhi_dev_ctxt->client_handle_list[i];
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS)
+ reason = MHI_CB_MHI_SHUTDOWN;
+ else
+ reason = MHI_CB_SYS_ERROR;
+ ch_ring = mhi_dev_ctxt->mhi_local_chan_ctxt;
+ chan_cfg = mhi_dev_ctxt->mhi_chan_cfg;
+ bb_ring = mhi_dev_ctxt->chan_bb_list;
+ for (i = 0; i < MHI_MAX_CHANNELS;
+ i++, ch_ring++, chan_cfg++, bb_ring++) {
+ enum MHI_CHAN_STATE ch_state;
+
+ client_handle = mhi_dev_ctxt->client_handle_list[i];
+ if (client_handle)
mhi_notify_client(client_handle, reason);
+
+ mutex_lock(&chan_cfg->chan_lock);
+ spin_lock_irq(&ch_ring->ring_lock);
+ ch_state = ch_ring->ch_state;
+ ch_ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ spin_unlock_irq(&ch_ring->ring_lock);
+
+ /* Reset channel and free ring */
+ if (ch_state == MHI_CHAN_STATE_ENABLED) {
+ mhi_reset_chan(mhi_dev_ctxt, i);
+ free_tre_ring(mhi_dev_ctxt, i);
+ bb_ring->rp = bb_ring->base;
+ bb_ring->wp = bb_ring->base;
+ bb_ring->ack_rp = bb_ring->base;
}
+ mutex_unlock(&chan_cfg->chan_lock);
}
-}
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Finished notifying clients\n");
-int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- u32 pcie_word_val = 0;
- int r = 0;
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for all pending threads to complete\n");
+ complete(&mhi_dev_ctxt->cmd_complete);
+ flush_work(&mhi_dev_ctxt->process_m1_worker);
+ flush_work(&mhi_dev_ctxt->st_thread_worker);
+ if (mhi_dev_ctxt->bhi_ctxt.manage_boot)
+ flush_work(&mhi_dev_ctxt->bhi_ctxt.fw_load_work);
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS)
+ flush_work(&mhi_dev_ctxt->process_sys_err_worker);
- mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
- /* confirm it's a valid reading */
- if (unlikely(pcie_word_val == U32_MAX)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Invalid BHI Offset:0x%x\n", pcie_word_val);
- return -EIO;
- }
- mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
- BHI_EXECENV);
- mhi_dev_ctxt->dev_exec_env = pcie_word_val;
- if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
- mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
- } else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
- mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
- } else {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Invalid EXEC_ENV: 0x%x\n",
- pcie_word_val);
- r = -EIO;
+ /*
+ * Shutdown has higher priority than sys_err and can be called
+ * middle of sys error, check current state to confirm state
+ * was not changed.
+ */
+ if (mhi_dev_ctxt->mhi_pm_state != cur_state) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "PM State transitioned to 0x%x while processing 0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state, transition_state);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
}
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "EXEC_ENV: %d Base state %d\n",
- pcie_word_val, mhi_dev_ctxt->base_state);
- return r;
-}
-void mhi_link_state_cb(struct msm_pcie_notify *notify)
-{
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+ /* Check all counts to make sure 0 */
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake));
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
+ if (mhi_dev_ctxt->core.pci_master)
+ WARN_ON(atomic_read(&mhi_dev_ctxt->pcie_device->dev.
+ power.usage_count));
- if (!notify || !notify->data) {
- pr_err("%s: incomplete handle received\n", __func__);
- return;
+ /* Reset Event rings and CMD rings */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Resetting ev ctxt and cmd ctxt\n");
+
+ cmd_ring = mhi_dev_ctxt->mhi_local_cmd_ctxt;
+ cmd_ctxt = mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, cmd_ring++) {
+ cmd_ring->rp = cmd_ring->base;
+ cmd_ring->wp = cmd_ring->base;
+ cmd_ctxt->mhi_cmd_ring_read_ptr =
+ cmd_ctxt->mhi_cmd_ring_base_addr;
+ cmd_ctxt->mhi_cmd_ring_write_ptr =
+ cmd_ctxt->mhi_cmd_ring_base_addr;
}
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++)
+ mhi_reset_ev_ctxt(mhi_dev_ctxt, i);
- mhi_dev_ctxt = notify->data;
- switch (notify->event) {
- case MSM_PCIE_EVENT_LINKDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_LINKDOWN\n");
- break;
- case MSM_PCIE_EVENT_LINKUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_LINKUP\n");
- mhi_dev_ctxt->counters.link_up_cntr++;
- break;
- case MSM_PCIE_EVENT_WAKEUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_WAKE\n");
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
- __pm_relax(&mhi_dev_ctxt->w_lock);
+ /*
+ * If we're the bus master disable runtime suspend
+ * we will enable it back again during AMSS transition
+ */
+ if (mhi_dev_ctxt->core.pci_master)
+ pm_runtime_forbid(&mhi_dev_ctxt->pcie_device->dev);
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ bool trigger_reset = false;
- if (mhi_dev_ctxt->flags.mhi_initialized) {
- mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
- mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
- }
- break;
- default:
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received bad link event\n");
- return;
+ "Triggering device reset\n");
+ reinit_completion(&mhi_dev_ctxt->cmd_complete);
+ write_lock_irq(pm_xfer_lock);
+ /* Link can go down while processing SYS_ERR */
+ if (MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
+ mhi_init_state_transition(mhi_dev_ctxt,
+ STATE_TRANSITION_RESET);
+ trigger_reset = true;
}
+ write_unlock_irq(pm_xfer_lock);
+
+ if (trigger_reset) {
+ /*
+ * Keep the MHI state in Active (M0) state until host
+ * enter AMSS/RDDM state. Otherwise modem would error
+ * fatal if host try to enter M1 before reaching
+ * AMSS\RDDM state.
+ */
+ read_lock_bh(pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(pm_xfer_lock);
+
+ /* Wait till we enter AMSS/RDDM Exec env.*/
+ ret = wait_for_completion_timeout
+ (&mhi_dev_ctxt->cmd_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret || (mhi_dev_ctxt->dev_exec_env !=
+ MHI_EXEC_ENV_AMSS &&
+ mhi_dev_ctxt->dev_exec_env !=
+ MHI_EXEC_ENV_RDDM)) {
+
+ /*
+ * device did not reset properly, notify bus
+ * master
+ */
+ if (!mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying bus master Sys Error Status\n");
+ mhi_dev_ctxt->status_cb(
+ MHI_CB_SYS_ERROR,
+ mhi_dev_ctxt->priv_data);
+ }
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ }
+ }
+ } else {
+ write_lock_irq(pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_DISABLE);
+ write_unlock_irq(pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error transition from state:0x%x to 0x%x\n",
+ cur_state, MHI_PM_DISABLE);
+
+ if (mhi_dev_ctxt->core.pci_master &&
+ cur_state == MHI_PM_DISABLE)
+ mhi_turn_off_pcie_link(mhi_dev_ctxt,
+ MHI_REG_ACCESS_VALID(prev_state));
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exit with pm_state:0x%x exec_env:0x%x mhi_state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state, mhi_dev_ctxt->dev_exec_env,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
-int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_sys_err_worker(struct work_struct *work)
{
- int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ container_of(work, struct mhi_device_ctxt,
+ process_sys_err_worker);
- r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to start state change event, to %d\n",
- mhi_dev_ctxt->base_state);
- }
- return r;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enter with pm_state:0x%x MHI_STATE:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ process_disable_transition(MHI_PM_SYS_ERR_PROCESS, mhi_dev_ctxt);
}
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index a4da6c21b50d..c0c23c4e0756 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -13,6 +13,7 @@
#include "mhi_sys.h"
#include "mhi_hwio.h"
#include "mhi_trace.h"
+#include "mhi_bhi.h"
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -33,6 +34,7 @@ const char *state_transition_str(enum STATE_TRANSITION state)
[STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
[STATE_TRANSITION_WAKE] = "WAKE",
[STATE_TRANSITION_BHIE] = "BHIE",
+ [STATE_TRANSITION_RDDM] = "RDDM",
[STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
};
@@ -40,6 +42,53 @@ const char *state_transition_str(enum STATE_TRANSITION state)
mhi_states_transition_str[state] : "Invalid";
}
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 pcie_word_val = 0;
+ int r = 0;
+
+ mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+
+ /* confirm it's a valid reading */
+ if (unlikely(pcie_word_val == U32_MAX)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid BHI Offset:0x%x\n", pcie_word_val);
+ return -EIO;
+ }
+ mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
+ BHI_EXECENV);
+ mhi_dev_ctxt->dev_exec_env = pcie_word_val;
+ if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
+ mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
+ } else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
+ mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
+ } else {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid EXEC_ENV: 0x%x\n",
+ pcie_word_val);
+ r = -EIO;
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "EXEC_ENV: %d Base state %d\n",
+ pcie_word_val, mhi_dev_ctxt->base_state);
+ return r;
+}
+
+int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int r = 0;
+
+ r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to start state change event, to %d\n",
+ mhi_dev_ctxt->base_state);
+ }
+ return r;
+}
+
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
@@ -47,7 +96,16 @@ enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
MHISTATUS_MHISTATE_MASK,
MHISTATUS_MHISTATE_SHIFT);
- return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+ return state;
+}
+
+bool mhi_in_sys_err(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHISTATUS, MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT);
+
+ return (state) ? true : false;
}
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -69,6 +127,140 @@ void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL);
}
+/*
+ * Not all MHI states transitions are sync transitions. Linkdown, SSR, and
+ * shutdown can happen anytime asynchronously. This function will transition to
+ * new state only if it's a valid transitions.
+ *
+ * Priority increase as we go down, example while in any states from L0, start
+ * state from L1, L2, or L3 can be set. Notable exception to this rule is state
+ * DISABLE. From DISABLE state we can transition to only POR or SSR_PENDING
+ * state. Also for example while in L2 state, user cannot jump back to L1 or
+ * L0 states.
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * DISABLE <--> SSR_PENDING
+ * POR <--> POR
+ * POR -> M0 -> M1 -> M1_M2 -> M2 --> M0
+ * M1_M2 -> M0 (Device can trigger it)
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * M1 -> M3_ENTER --> M3
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE -> SSR_PENDING (via SSR Notification only)
+ * L3: LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static const struct mhi_pm_transitions const mhi_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR | MHI_PM_SSR_PENDING
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M1 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M1,
+ MHI_PM_M1_M2_TRANSITION | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M1_M2_TRANSITION,
+ MHI_PM_M2 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_SHUTDOWN_PROCESS
+ },
+ /* From SSR notification only */
+ {
+ MHI_PM_SSR_PENDING,
+ MHI_PM_DISABLE
+ }
+};
+
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+ struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_PM_STATE state)
+{
+ unsigned long cur_state = mhi_dev_ctxt->mhi_pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "cur_state:0x%lx out side of mhi_state_transitions\n",
+ cur_state);
+ return cur_state;
+ }
+
+ if (unlikely(mhi_state_transitions[index].from_state != cur_state)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "index:%u cur_state:0x%lx != actual_state: 0x%x\n",
+ index, cur_state,
+ mhi_state_transitions[index].from_state);
+ return cur_state;
+ }
+
+ if (unlikely(!(mhi_state_transitions[index].to_states & state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Not allowing pm state transition from:0x%lx to:0x%x state\n",
+ cur_state, state);
+ return cur_state;
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Transition to pm state from:0x%lx to:0x%x\n",
+ cur_state, state);
+ mhi_dev_ctxt->mhi_pm_state = state;
+ return mhi_dev_ctxt->mhi_pm_state;
+}
+
static void conditional_chan_db_write(
struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
@@ -158,20 +350,10 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
}
}
-static int process_bhie_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
- wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
-
- return 0;
-}
-
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered With State %s\n",
@@ -190,8 +372,14 @@ int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M0);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M0, cur_state);
+ return -EIO;
+ }
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
@@ -212,6 +400,7 @@ int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
void process_m1_transition(struct work_struct *work)
{
struct mhi_device_ctxt *mhi_dev_ctxt;
+ enum MHI_PM_STATE cur_state;
mhi_dev_ctxt = container_of(work,
struct mhi_device_ctxt,
@@ -224,15 +413,18 @@ void process_m1_transition(struct work_struct *work)
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
/* We either Entered M3 or we did M3->M0 Exit */
- if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- return;
- }
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1)
+ goto invalid_pm_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Transitioning to M2 Transition\n");
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M1_M2_TRANSITION);
+ if (unlikely(cur_state != MHI_PM_M1_M2_TRANSITION)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M1_M2_TRANSITION, cur_state);
+ goto invalid_pm_state;
+ }
mhi_dev_ctxt->counters.m1_m2++;
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
@@ -245,7 +437,13 @@ void process_m1_transition(struct work_struct *work)
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered M2 State\n");
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M2);
+ if (unlikely(cur_state != MHI_PM_M2)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M2, cur_state);
+ goto invalid_pm_state;
+ }
}
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -263,11 +461,17 @@ void process_m1_transition(struct work_struct *work)
pm_request_autosuspend(&mhi_dev_ctxt->pcie_device->dev);
}
mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
+
+invalid_pm_state:
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -285,25 +489,18 @@ int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (unlikely(cur_state != MHI_PM_M3)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M3, cur_state);
+ return -EIO;
+ }
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
-static int process_bhi_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
- return 0;
-}
-
static int process_ready_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
@@ -313,15 +510,12 @@ static int process_ready_transition(
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing READY state transition\n");
- r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Failed to reset thread queues\n");
- return r;
- }
-
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
r = mhi_init_mmio(mhi_dev_ctxt);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
@@ -341,6 +535,10 @@ static int process_ready_transition(
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
@@ -350,30 +548,25 @@ static int process_ready_transition(
return r;
}
-static void mhi_reset_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
- int chan)
-{
- struct mhi_chan_ctxt *chan_ctxt =
- &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- struct mhi_ring *local_chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- local_chan_ctxt->rp = local_chan_ctxt->base;
- local_chan_ctxt->wp = local_chan_ctxt->base;
- local_chan_ctxt->ack_rp = local_chan_ctxt->base;
-}
-
static int process_reset_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0, i = 0;
+ int r = 0;
+ enum MHI_PM_STATE cur_state;
+
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing RESET state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_POR);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_POR)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error transitining from state:0x%x to:0x%x\n",
+ cur_state, MHI_PM_POR);
+ return -EIO;
+ }
mhi_dev_ctxt->counters.mhi_reset_cntr++;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
@@ -387,25 +580,6 @@ static int process_reset_transition(
return r;
}
- for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp =
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
- mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt[i].
- mhi_cmd_ring_read_ptr =
- mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_CMD_RING,
- i,
- (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
- }
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
- mhi_reset_ev_ctxt(mhi_dev_ctxt, i);
-
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- if (VALID_CHAN_NR(i))
- mhi_reset_chan_ctxt(mhi_dev_ctxt, i);
- }
r = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_READY);
if (0 != r)
@@ -441,19 +615,6 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done.\n");
}
-static int process_sbl_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
-
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enabled\n");
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- return 0;
-}
-
static int process_amss_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
@@ -465,26 +626,19 @@ static int process_amss_transition(
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->flags.mhi_initialized = true;
+ complete(&mhi_dev_ctxt->cmd_complete);
- if (!mhi_dev_ctxt->flags.mhi_initialized) {
- r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
+ r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
- mhi_dev_ctxt->flags.mhi_initialized = 1;
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to set local chan state ret %d\n", r);
- mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
- return r;
- }
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Notifying clients that MHI is enabled\n");
- enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- } else {
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI is initialized\n");
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to set local chan state ret %d\n", r);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ return r;
}
+ enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- complete(&mhi_dev_ctxt->cmd_complete);
/*
* runtime_allow will decrement usage_count, counts were
@@ -508,7 +662,7 @@ static int process_amss_transition(
return 0;
}
-static int process_stt_work_item(
+void process_stt_work_item(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
@@ -520,7 +674,10 @@ static int process_stt_work_item(
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
- r = process_bhi_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
break;
case STATE_TRANSITION_RESET:
r = process_reset_transition(mhi_dev_ctxt, cur_work_item);
@@ -529,13 +686,34 @@ static int process_stt_work_item(
r = process_ready_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_SBL:
- r = process_sbl_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
break;
case STATE_TRANSITION_AMSS:
r = process_amss_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_BHIE:
- r = process_bhie_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
+ break;
+ case STATE_TRANSITION_RDDM:
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_RDDM;
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ complete(&mhi_dev_ctxt->cmd_complete);
+
+ /* Notify bus master device entered rddm mode */
+ if (!mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying bus master RDDM Status\n");
+ mhi_dev_ctxt->status_cb(MHI_CB_RDDM,
+ mhi_dev_ctxt->priv_data);
+ }
break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
@@ -543,12 +721,11 @@ static int process_stt_work_item(
state_transition_str(cur_work_item));
break;
}
- return r;
}
void mhi_state_change_worker(struct work_struct *work)
{
- int r = 0;
+ int r;
struct mhi_device_ctxt *mhi_dev_ctxt = container_of(work,
struct mhi_device_ctxt,
st_thread_worker);
@@ -564,7 +741,7 @@ void mhi_state_change_worker(struct work_struct *work)
MHI_ASSERT(r == 0,
"Failed to delete element from STT workqueue\n");
spin_unlock_irq(work_q->q_lock);
- r = process_stt_work_item(mhi_dev_ctxt, cur_work_item);
+ process_stt_work_item(mhi_dev_ctxt, cur_work_item);
}
}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index 3389de2f95b3..1d9282627d4e 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,15 +35,15 @@ module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
const char * const mhi_states_str[MHI_STATE_LIMIT] = {
- "RESET",
- "READY",
- "M0",
- "M1",
- "M2",
- "M3",
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
"Reserved: 0x06",
- "BHI",
- "SYS_ERR",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
};
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 0e28ebdd8fea..ab3c3503c2fc 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -33,7 +33,6 @@
#define MHI_SOFTWARE_CLIENT_LIMIT 23
#define MHI_UCI_IPC_LOG_PAGES (25)
-#define MAX_NR_TRBS_PER_CHAN 10
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci"
#define CTRL_MAGIC 0x4C525443
@@ -95,33 +94,35 @@ struct chan_attr {
u32 nr_trbs;
enum MHI_CHAN_DIR dir;
u32 uci_ownership;
+ bool enabled;
+ struct mhi_client_handle *mhi_handle;
+ wait_queue_head_t wq;
+ struct list_head buf_head;
+ struct mutex chan_lock;
+ atomic_t avail_pkts; /* no. avail tre to read or space avail for tx */
+ u64 pkt_count;
+};
+
+struct uci_buf {
+ void *data;
+ u64 pkt_id;
+ struct list_head node;
};
struct uci_client {
- u32 out_chan;
- u32 in_chan;
- u32 out_chan_state;
- u32 in_chan_state;
struct chan_attr in_attr;
struct chan_attr out_attr;
- struct mhi_client_handle *out_handle;
- struct mhi_client_handle *in_handle;
- size_t pending_data;
- wait_queue_head_t read_wq;
- wait_queue_head_t write_wq;
- atomic_t avail_pkts;
struct device *dev;
u8 local_tiocm;
- atomic_t ref_count;
- int mhi_status;
- void *pkt_loc;
+ struct mutex client_lock; /* sync open and close */
+ int ref_count;
+ struct uci_buf *cur_buf; /* current buffer read processing */
size_t pkt_size;
- void **in_buf_list;
+ struct work_struct outbound_worker; /* clean up outbound pkts */
atomic_t out_pkt_pend_ack;
- atomic_t mhi_disabled;
+ atomic_t completion_ack;
struct mhi_uci_ctxt_t *uci_ctxt;
- struct mutex in_chan_lock;
- struct mutex out_chan_lock;
+ bool enabled;
void *uci_ipc_log;
};
@@ -133,8 +134,6 @@ struct mhi_uci_ctxt_t {
struct mutex ctrl_mutex;
struct cdev cdev[MHI_SOFTWARE_CLIENT_LIMIT];
struct uci_client *ctrl_client;
- atomic_t mhi_disabled;
- atomic_t mhi_enable_notif_wq_active;
};
struct mhi_uci_drv_ctxt {
@@ -250,6 +249,35 @@ static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd,
static struct mhi_uci_drv_ctxt mhi_uci_drv_ctxt;
+static void mhi_uci_clean_acked_tre(struct work_struct *work)
+{
+ struct uci_client *uci_client;
+ int i = 0;
+
+ uci_client = container_of(work, struct uci_client, outbound_worker);
+ while (atomic_read(&uci_client->completion_ack)) {
+ struct uci_buf *uci_buf;
+
+ /* acquire lock per tre so we won't block other uci threads */
+ mutex_lock(&uci_client->out_attr.chan_lock);
+ uci_buf = list_first_entry_or_null(
+ &uci_client->out_attr.buf_head,
+ struct uci_buf, node);
+ if (unlikely(!uci_buf)) {
+ mutex_unlock(&uci_client->out_attr.chan_lock);
+ break;
+ }
+ list_del(&uci_buf->node);
+ kfree(uci_buf->data);
+ atomic_dec(&uci_client->completion_ack);
+ mutex_unlock(&uci_client->out_attr.chan_lock);
+ i++;
+ }
+ uci_log(uci_client->uci_ipc_log, UCI_DBG_VERBOSE,
+ "freed %d tres for chan %d\n",
+ i, uci_client->out_attr.chan_id);
+}
+
static int mhi_init_inbound(struct uci_client *client_handle)
{
int ret_val = 0;
@@ -257,37 +285,32 @@ static int mhi_init_inbound(struct uci_client *client_handle)
struct chan_attr *chan_attributes = &client_handle->in_attr;
void *data_loc = NULL;
size_t buf_size = chan_attributes->max_packet_size;
+ struct uci_buf *uci_buf;
- if (client_handle == NULL) {
- uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
- UCI_DBG_ERROR,
- "Bad Input data, quitting\n");
- return -EINVAL;
- }
chan_attributes->nr_trbs =
- mhi_get_free_desc(client_handle->in_handle);
- client_handle->in_buf_list =
- kmalloc(sizeof(void *) * chan_attributes->nr_trbs,
- GFP_KERNEL);
- if (!client_handle->in_buf_list)
- return -ENOMEM;
+ mhi_get_free_desc(client_handle->in_attr.mhi_handle);
uci_log(client_handle->uci_ipc_log,
UCI_DBG_INFO, "Channel %d supports %d desc\n",
chan_attributes->chan_id,
chan_attributes->nr_trbs);
for (i = 0; i < chan_attributes->nr_trbs; ++i) {
- data_loc = kmalloc(buf_size, GFP_KERNEL);
- uci_log(client_handle->uci_ipc_log,
- UCI_DBG_INFO,
- "Allocated buffer %p size %zd\n",
- data_loc,
- buf_size);
+ data_loc = kmalloc(buf_size + sizeof(*uci_buf), GFP_KERNEL);
+
+ /*
+ * previously allocated memory will be freed after
+ * channel close
+ */
if (data_loc == NULL)
return -ENOMEM;
- client_handle->in_buf_list[i] = data_loc;
- ret_val = mhi_queue_xfer(client_handle->in_handle,
- data_loc, buf_size, MHI_EOT);
+ uci_buf = data_loc + buf_size;
+ uci_buf->data = data_loc;
+ uci_buf->pkt_id = chan_attributes->pkt_count++;
+ uci_log(client_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Allocated buffer %llu size %ld for chan:%d\n",
+ uci_buf->pkt_id, buf_size, chan_attributes->chan_id);
+ ret_val = mhi_queue_xfer(client_handle->in_attr.mhi_handle,
+ data_loc, buf_size, MHI_EOT);
if (0 != ret_val) {
kfree(data_loc);
uci_log(client_handle->uci_ipc_log,
@@ -297,139 +320,138 @@ static int mhi_init_inbound(struct uci_client *client_handle)
ret_val);
break;
}
+ list_add_tail(&uci_buf->node, &client_handle->in_attr.buf_head);
}
return ret_val;
}
static int mhi_uci_send_packet(struct mhi_client_handle **client_handle,
- void *buf, u32 size, u32 is_uspace_buf)
+ void *buf,
+ u32 size)
{
u32 nr_avail_trbs = 0;
u32 i = 0;
void *data_loc = NULL;
- uintptr_t memcpy_result = 0;
+ unsigned long memcpy_result = 0;
int data_left_to_insert = 0;
size_t data_to_insert_now = 0;
u32 data_inserted_so_far = 0;
int ret_val = 0;
- enum MHI_FLAGS flags;
struct uci_client *uci_handle;
- uci_handle = container_of(client_handle, struct uci_client,
- out_handle);
+ struct uci_buf *uci_buf;
- if (client_handle == NULL || buf == NULL ||
- !size || uci_handle == NULL)
- return -EINVAL;
-
- nr_avail_trbs = mhi_get_free_desc(*client_handle);
+ uci_handle = container_of(client_handle, struct uci_client,
+ out_attr.mhi_handle);
+ nr_avail_trbs = atomic_read(&uci_handle->out_attr.avail_pkts);
data_left_to_insert = size;
- if (0 == nr_avail_trbs)
- return 0;
-
for (i = 0; i < nr_avail_trbs; ++i) {
data_to_insert_now = min_t(size_t, data_left_to_insert,
uci_handle->out_attr.max_packet_size);
- if (is_uspace_buf) {
- data_loc = kmalloc(data_to_insert_now, GFP_KERNEL);
- if (NULL == data_loc) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Failed to allocate memory 0x%zx\n",
- data_to_insert_now);
+ data_loc = kmalloc(data_to_insert_now + sizeof(*uci_buf),
+ GFP_KERNEL);
+ if (!data_loc) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Failed to allocate memory 0x%zx\n",
+ data_to_insert_now);
return -ENOMEM;
- }
- memcpy_result = copy_from_user(data_loc,
- buf + data_inserted_so_far,
- data_to_insert_now);
-
- if (0 != memcpy_result)
- goto error_memcpy;
- } else {
- data_loc = buf;
}
-
- flags = MHI_EOT;
- if (data_left_to_insert - data_to_insert_now > 0)
- flags |= MHI_CHAIN | MHI_EOB;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "At trb i = %d/%d, chain = %d, eob = %d, addr 0x%p chan %d\n",
- i,
- nr_avail_trbs,
- flags & MHI_CHAIN,
- flags & MHI_EOB,
- data_loc,
- uci_handle->out_chan);
- ret_val = mhi_queue_xfer(*client_handle, data_loc,
- data_to_insert_now, flags);
-
- if (0 != ret_val) {
- goto error_queue;
+ uci_buf = data_loc + data_to_insert_now;
+ uci_buf->data = data_loc;
+ uci_buf->pkt_id = uci_handle->out_attr.pkt_count++;
+ memcpy_result = copy_from_user(uci_buf->data,
+ buf + data_inserted_so_far,
+ data_to_insert_now);
+ if (memcpy_result)
+ goto error_xfer;
+
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "At trb i = %d/%d, size = %lu, id %llu chan %d\n",
+ i, nr_avail_trbs, data_to_insert_now, uci_buf->pkt_id,
+ uci_handle->out_attr.chan_id);
+ ret_val = mhi_queue_xfer(*client_handle, uci_buf->data,
+ data_to_insert_now, MHI_EOT);
+ if (ret_val) {
+ goto error_xfer;
} else {
data_left_to_insert -= data_to_insert_now;
data_inserted_so_far += data_to_insert_now;
atomic_inc(&uci_handle->out_pkt_pend_ack);
+ atomic_dec(&uci_handle->out_attr.avail_pkts);
+ list_add_tail(&uci_buf->node,
+ &uci_handle->out_attr.buf_head);
}
- if (0 == data_left_to_insert)
+ if (!data_left_to_insert)
break;
}
return data_inserted_so_far;
-error_queue:
-error_memcpy:
- kfree(data_loc);
+error_xfer:
+ kfree(uci_buf->data);
return data_inserted_so_far;
}
static int mhi_uci_send_status_cmd(struct uci_client *client)
{
+ void *buf = NULL;
struct rs232_ctrl_msg *rs232_pkt = NULL;
+ struct uci_buf *uci_buf = NULL;
struct uci_client *uci_ctrl_handle;
struct mhi_uci_ctxt_t *uci_ctxt = client->uci_ctxt;
int ret_val = 0;
- size_t pkt_size = sizeof(struct rs232_ctrl_msg);
- u32 amount_sent;
if (!uci_ctxt->ctrl_client) {
- uci_log(client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(client->uci_ipc_log, UCI_DBG_INFO,
"Control channel is not defined\n");
return -EIO;
}
uci_ctrl_handle = uci_ctxt->ctrl_client;
- mutex_lock(&uci_ctrl_handle->out_chan_lock);
+ mutex_lock(&uci_ctrl_handle->out_attr.chan_lock);
- if (!atomic_read(&uci_ctrl_handle->mhi_disabled) &&
- !uci_ctrl_handle->out_chan_state) {
- uci_log(uci_ctrl_handle->uci_ipc_log,
- UCI_DBG_INFO,
+ if (!uci_ctrl_handle->enabled) {
+ uci_log(uci_ctrl_handle->uci_ipc_log, UCI_DBG_INFO,
"Opening outbound control channel %d for chan:%d\n",
- uci_ctrl_handle->out_chan,
- client->out_chan);
- ret_val = mhi_open_channel(uci_ctrl_handle->out_handle);
- if (0 != ret_val) {
- uci_log(uci_ctrl_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
+ uci_ctrl_handle->out_attr.chan_id,
+ client->out_attr.chan_id);
+ if (!uci_ctrl_handle->out_attr.enabled) {
+ uci_log(uci_ctrl_handle->uci_ipc_log, UCI_DBG_CRITICAL,
+ "Channel %d is not enable\n",
+ uci_ctrl_handle->out_attr.chan_id);
+ ret_val = -EIO;
+ goto error_open;
+ }
+ ret_val = mhi_open_channel(uci_ctrl_handle->
+ out_attr.mhi_handle);
+ if (ret_val) {
+ uci_log(uci_ctrl_handle->uci_ipc_log, UCI_DBG_CRITICAL,
"Could not open chan %d, for sideband ctrl\n",
- uci_ctrl_handle->out_chan);
+ uci_ctrl_handle->out_attr.chan_id);
ret_val = -EIO;
goto error_open;
}
- uci_ctrl_handle->out_chan_state = 1;
+ uci_ctrl_handle->enabled = true;
+ }
+
+ if (mhi_get_free_desc(uci_ctrl_handle->out_attr.mhi_handle) <= 0) {
+ ret_val = -EIO;
+ goto error_open;
}
- rs232_pkt = kzalloc(sizeof(struct rs232_ctrl_msg), GFP_KERNEL);
- if (rs232_pkt == NULL) {
+ buf = kzalloc(sizeof(*rs232_pkt) + sizeof(*uci_buf), GFP_KERNEL);
+ if (!buf) {
ret_val = -ENOMEM;
goto error_open;
}
- uci_log(uci_ctrl_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+
+
+ uci_log(uci_ctrl_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Received request to send msg for chan %d\n",
- client->out_chan);
+ client->out_attr.chan_id);
+ uci_buf = buf + sizeof(*rs232_pkt);
+ uci_buf->data = buf;
+ rs232_pkt = (struct rs232_ctrl_msg *)uci_buf->data;
rs232_pkt->preamble = CTRL_MAGIC;
if (client->local_tiocm & TIOCM_DTR)
MHI_SET_CTRL_MSG(CTRL_MSG_DTR, rs232_pkt, 1);
@@ -438,26 +460,27 @@ static int mhi_uci_send_status_cmd(struct uci_client *client)
MHI_SET_CTRL_MSG_ID(CTRL_MSG_ID, rs232_pkt, MHI_CTRL_LINE_STATE_ID);
MHI_SET_CTRL_MSG_SIZE(CTRL_MSG_SIZE, rs232_pkt, sizeof(u32));
- MHI_SET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, client->out_chan);
+ MHI_SET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, client->out_attr.chan_id);
- amount_sent = mhi_uci_send_packet(&uci_ctrl_handle->out_handle,
- rs232_pkt,
- pkt_size, 0);
-
- if (pkt_size != amount_sent) {
- uci_log(uci_ctrl_handle->uci_ipc_log,
- UCI_DBG_INFO,
+ ret_val = mhi_queue_xfer(uci_ctrl_handle->out_attr.mhi_handle,
+ uci_buf->data, sizeof(*rs232_pkt), MHI_EOT);
+ if (ret_val) {
+ uci_log(uci_ctrl_handle->uci_ipc_log, UCI_DBG_INFO,
"Failed to send signal for chan %d, ret : %d\n",
- client->out_chan,
- ret_val);
- goto error;
+ client->out_attr.chan_id, ret_val);
+ goto error_queue;
}
-error_open:
- mutex_unlock(&uci_ctrl_handle->out_chan_lock);
+ list_add_tail(&uci_buf->node, &uci_ctrl_handle->out_attr.buf_head);
+
+ mutex_unlock(&uci_ctrl_handle->out_attr.chan_lock);
+ return 0;
+
+ mutex_unlock(&uci_ctrl_handle->out_attr.chan_lock);
return ret_val;
-error:
- kfree(rs232_pkt);
- mutex_unlock(&uci_ctrl_handle->out_chan_lock);
+error_queue:
+ kfree(buf);
+error_open:
+ mutex_unlock(&uci_ctrl_handle->out_attr.chan_lock);
return ret_val;
}
@@ -466,6 +489,7 @@ static int mhi_uci_tiocm_set(struct uci_client *client_ctxt, u32 set, u32 clear)
u8 status_set = 0;
u8 status_clear = 0;
u8 old_status = 0;
+ int ret = 0;
mutex_lock(&client_ctxt->uci_ctxt->ctrl_mutex);
@@ -475,23 +499,21 @@ static int mhi_uci_tiocm_set(struct uci_client *client_ctxt, u32 set, u32 clear)
client_ctxt->local_tiocm |= status_set;
client_ctxt->local_tiocm &= ~status_clear;
- uci_log(client_ctxt->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(client_ctxt->uci_ipc_log, UCI_DBG_VERBOSE,
"Old TIOCM0x%x for chan %d, Current TIOCM 0x%x\n",
- old_status,
- client_ctxt->out_chan,
+ old_status, client_ctxt->out_attr.chan_id,
client_ctxt->local_tiocm);
- mutex_unlock(&client_ctxt->uci_ctxt->ctrl_mutex);
if (client_ctxt->local_tiocm != old_status) {
- uci_log(client_ctxt->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(client_ctxt->uci_ipc_log, UCI_DBG_VERBOSE,
"Setting TIOCM to 0x%x for chan %d\n",
client_ctxt->local_tiocm,
- client_ctxt->out_chan);
- return mhi_uci_send_status_cmd(client_ctxt);
+ client_ctxt->out_attr.chan_id);
+ ret = mhi_uci_send_status_cmd(client_ctxt);
}
- return 0;
+
+ mutex_unlock(&client_ctxt->uci_ctxt->ctrl_mutex);
+ return ret;
}
static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd,
@@ -503,35 +525,26 @@ static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd,
uci_handle = file->private_data;
if (uci_handle == NULL) {
- uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, UCI_DBG_VERBOSE,
"Invalid handle for client\n");
return -ENODEV;
}
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Attempting to dtr cmd 0x%x arg 0x%lx for chan %d\n",
- cmd,
- arg,
- uci_handle->out_chan);
+ cmd, arg, uci_handle->out_attr.chan_id);
switch (cmd) {
case TIOCMGET:
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Returning 0x%x mask\n",
- uci_handle->local_tiocm);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Returning 0x%x mask\n", uci_handle->local_tiocm);
ret_val = uci_handle->local_tiocm;
break;
case TIOCMSET:
if (0 != copy_from_user(&set_val, (void *)arg, sizeof(set_val)))
return -ENOMEM;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Attempting to set cmd 0x%x arg 0x%x for chan %d\n",
- cmd,
- set_val,
- uci_handle->out_chan);
+ cmd, set_val, uci_handle->out_attr.chan_id);
ret_val = mhi_uci_tiocm_set(uci_handle, set_val, ~set_val);
break;
default:
@@ -551,29 +564,32 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
return -ENODEV;
uci_ctxt = uci_handle->uci_ctxt;
- poll_wait(file, &uci_handle->read_wq, wait);
- poll_wait(file, &uci_handle->write_wq, wait);
- if (atomic_read(&uci_handle->avail_pkts) > 0) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ poll_wait(file, &uci_handle->in_attr.wq, wait);
+ poll_wait(file, &uci_handle->out_attr.wq, wait);
+ mutex_lock(&uci_handle->in_attr.chan_lock);
+ if (!uci_handle->in_attr.enabled || !uci_handle->enabled)
+ mask = POLLERR;
+ else if (atomic_read(&uci_handle->in_attr.avail_pkts) ||
+ uci_handle->cur_buf) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Client can read chan %d\n",
- uci_handle->in_chan);
+ uci_handle->in_attr.chan_id);
mask |= POLLIN | POLLRDNORM;
}
- if (!atomic_read(&uci_ctxt->mhi_disabled) &&
- (mhi_get_free_desc(uci_handle->out_handle) > 0)) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ mutex_unlock(&uci_handle->in_attr.chan_lock);
+ mutex_lock(&uci_handle->out_attr.chan_lock);
+ if (!uci_handle->out_attr.enabled || !uci_handle->enabled)
+ mask |= POLLERR;
+ else if (atomic_read(&uci_handle->out_attr.avail_pkts) > 0) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Client can write chan %d\n",
- uci_handle->out_chan);
+ uci_handle->out_attr.chan_id);
mask |= POLLOUT | POLLWRNORM;
}
-
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ mutex_unlock(&uci_handle->out_attr.chan_lock);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Client attempted to poll chan %d, returning mask 0x%x\n",
- uci_handle->in_chan,
- mask);
+ uci_handle->in_attr.chan_id, mask);
return mask;
}
@@ -581,66 +597,67 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
{
int ret_val = 0;
int r = 0;
+ struct uci_buf *itr, *tmp;
- uci_log(uci_client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(uci_client->uci_ipc_log, UCI_DBG_INFO,
"Starting channels %d %d\n",
- uci_client->out_chan,
- uci_client->in_chan);
- mutex_lock(&uci_client->out_chan_lock);
- mutex_lock(&uci_client->in_chan_lock);
- ret_val = mhi_open_channel(uci_client->out_handle);
+ uci_client->out_attr.chan_id,
+ uci_client->in_attr.chan_id);
+
+ ret_val = mhi_open_channel(uci_client->out_attr.mhi_handle);
if (ret_val != 0) {
if (ret_val == -ENOTCONN)
- r = -EAGAIN;
+ return -EAGAIN;
else
- r = -EIO;
- goto handle_not_rdy_err;
+ return -EIO;
}
- uci_client->out_chan_state = 1;
+ ret_val = mhi_get_free_desc(uci_client->out_attr.mhi_handle);
+ if (ret_val >= 0)
+ atomic_set(&uci_client->out_attr.avail_pkts, ret_val);
- ret_val = mhi_open_channel(uci_client->in_handle);
+ ret_val = mhi_open_channel(uci_client->in_attr.mhi_handle);
if (ret_val != 0) {
- uci_log(uci_client->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(uci_client->uci_ipc_log, UCI_DBG_ERROR,
"Failed to open chan %d, ret 0x%x\n",
- uci_client->out_chan,
- ret_val);
- goto handle_in_err;
+ uci_client->out_attr.chan_id, ret_val);
+ goto error_inbound_open;
}
- uci_log(uci_client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(uci_client->uci_ipc_log, UCI_DBG_INFO,
"Initializing inbound chan %d\n",
- uci_client->in_chan);
- uci_client->in_chan_state = 1;
+ uci_client->in_attr.chan_id);
ret_val = mhi_init_inbound(uci_client);
if (0 != ret_val) {
- uci_log(uci_client->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(uci_client->uci_ipc_log, UCI_DBG_ERROR,
"Failed to init inbound 0x%x, ret 0x%x\n",
- uci_client->in_chan,
- ret_val);
- }
+ uci_client->in_attr.chan_id, ret_val);
+ goto error_init_inbound;
- mutex_unlock(&uci_client->in_chan_lock);
- mutex_unlock(&uci_client->out_chan_lock);
+ }
+ atomic_set(&uci_client->completion_ack, 0);
+ uci_client->enabled = true;
return 0;
-handle_in_err:
- mhi_close_channel(uci_client->out_handle);
- uci_client->out_chan_state = 0;
-handle_not_rdy_err:
- mutex_unlock(&uci_client->in_chan_lock);
- mutex_unlock(&uci_client->out_chan_lock);
+error_init_inbound:
+ mhi_close_channel(uci_client->in_attr.mhi_handle);
+ list_for_each_entry_safe(itr, tmp, &uci_client->in_attr.buf_head,
+ node) {
+ list_del(&itr->node);
+ kfree(itr->data);
+ }
+ INIT_LIST_HEAD(&uci_client->in_attr.buf_head);
+
+error_inbound_open:
+ mhi_close_channel(uci_client->out_attr.mhi_handle);
return r;
}
static int mhi_uci_client_open(struct inode *inode,
- struct file *file_handle)
+ struct file *file_handle)
{
struct uci_client *uci_handle = NULL;
struct mhi_uci_ctxt_t *uci_ctxt = NULL, *itr;
+ const long timeout = msecs_to_jiffies(1000);
int r = 0;
int client_id = iminor(inode);
int major = imajor(inode);
@@ -654,100 +671,136 @@ static int mhi_uci_client_open(struct inode *inode,
}
}
mutex_unlock(&mhi_uci_drv_ctxt.list_lock);
+
if (!uci_ctxt || client_id >= MHI_SOFTWARE_CLIENT_LIMIT)
return -EINVAL;
uci_handle = &uci_ctxt->client_handles[client_id];
- if (atomic_read(&uci_handle->mhi_disabled)) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
+ r = wait_event_interruptible_timeout(uci_handle->out_attr.wq,
+ uci_handle->out_attr.enabled,
+ timeout);
+ if (r < 0)
+ return -EAGAIN;
+ r = wait_event_interruptible_timeout(uci_handle->in_attr.wq,
+ uci_handle->in_attr.enabled,
+ timeout);
+ if (r < 0)
+ return -EAGAIN;
+ r = 0;
+ mutex_lock(&uci_handle->client_lock);
+ mutex_lock(&uci_handle->out_attr.chan_lock);
+ mutex_lock(&uci_handle->in_attr.chan_lock);
+ if (!uci_handle->out_attr.enabled || !uci_handle->in_attr.enabled) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
"MHI channel still disable for, client %d\n",
client_id);
- msleep(500);
+ mutex_unlock(&uci_handle->in_attr.chan_lock);
+ mutex_unlock(&uci_handle->out_attr.chan_lock);
+ mutex_unlock(&uci_handle->client_lock);
return -EAGAIN;
}
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
"Client opened device node 0x%x, ref count 0x%x\n",
- client_id,
- atomic_read(&uci_handle->ref_count));
+ client_id, uci_handle->ref_count);
- if (1 == atomic_add_return(1, &uci_handle->ref_count)) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
- "Opening channels client %d\n",
+ if (++uci_handle->ref_count == 1) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Opening channels client %d for first time\n",
client_id);
r = open_client_mhi_channels(uci_handle);
- if (r)
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
- "Failed to open channels ret %d\n",
- r);
+ if (r) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Failed to open channels ret %d\n", r);
+ uci_handle->ref_count--;
+ }
}
+ mutex_unlock(&uci_handle->in_attr.chan_lock);
+ mutex_unlock(&uci_handle->out_attr.chan_lock);
+ mutex_unlock(&uci_handle->client_lock);
file_handle->private_data = uci_handle;
return r;
}
static int mhi_uci_client_release(struct inode *mhi_inode,
- struct file *file_handle)
+ struct file *file_handle)
{
struct uci_client *uci_handle = file_handle->private_data;
u32 nr_in_bufs = 0;
int in_chan = 0;
- int i = 0;
u32 buf_size = 0;
- if (uci_handle == NULL)
- return -EINVAL;
-
+ mutex_lock(&uci_handle->client_lock);
in_chan = uci_handle->in_attr.chan_id;
nr_in_bufs = uci_handle->in_attr.nr_trbs;
buf_size = uci_handle->in_attr.max_packet_size;
+ uci_handle->ref_count--;
+ if (!uci_handle->ref_count) {
+ struct uci_buf *itr, *tmp;
- if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_ERROR,
"Last client left, closing channel 0x%x\n",
in_chan);
+ mutex_lock(&uci_handle->in_attr.chan_lock);
+ mutex_lock(&uci_handle->out_attr.chan_lock);
+
if (atomic_read(&uci_handle->out_pkt_pend_ack))
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
"Still waiting on %d acks!, chan %d\n",
atomic_read(&uci_handle->out_pkt_pend_ack),
uci_handle->out_attr.chan_id);
- mhi_close_channel(uci_handle->out_handle);
- mhi_close_channel(uci_handle->in_handle);
- uci_handle->out_chan_state = 0;
- uci_handle->in_chan_state = 0;
+ atomic_set(&uci_handle->in_attr.avail_pkts, 0);
+ if (uci_handle->in_attr.enabled)
+ mhi_close_channel(uci_handle->in_attr.mhi_handle);
+ list_for_each_entry_safe(itr, tmp,
+ &uci_handle->in_attr.buf_head, node) {
+ list_del(&itr->node);
+ kfree(itr->data);
+ }
+ if (uci_handle->cur_buf)
+ kfree(uci_handle->cur_buf->data);
+ uci_handle->cur_buf = NULL;
+ INIT_LIST_HEAD(&uci_handle->in_attr.buf_head);
+ atomic_set(&uci_handle->out_attr.avail_pkts, 0);
atomic_set(&uci_handle->out_pkt_pend_ack, 0);
- for (i = 0; i < nr_in_bufs; ++i) {
- kfree((void *)uci_handle->in_buf_list[i]);
+ if (uci_handle->out_attr.enabled)
+ mhi_close_channel(uci_handle->out_attr.mhi_handle);
+ list_for_each_entry_safe(itr, tmp,
+ &uci_handle->out_attr.buf_head, node) {
+ list_del(&itr->node);
+ kfree(itr->data);
}
- kfree(uci_handle->in_buf_list);
- atomic_set(&uci_handle->avail_pkts, 0);
+ INIT_LIST_HEAD(&uci_handle->out_attr.buf_head);
+ uci_handle->enabled = false;
+ mutex_unlock(&uci_handle->out_attr.chan_lock);
+ flush_work(&uci_handle->outbound_worker);
+ atomic_set(&uci_handle->completion_ack, 0);
+ mutex_unlock(&uci_handle->in_attr.chan_lock);
} else {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
"Client close chan %d, ref count 0x%x\n",
iminor(mhi_inode),
- atomic_read(&uci_handle->ref_count));
+ uci_handle->ref_count);
}
+ mutex_unlock(&uci_handle->client_lock);
+
return 0;
}
-static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
- size_t uspace_buf_size, loff_t *bytes_pending)
+static ssize_t mhi_uci_client_read(struct file *file,
+ char __user *buf,
+ size_t uspace_buf_size,
+ loff_t *bytes_pending)
{
struct uci_client *uci_handle = NULL;
struct mhi_client_handle *client_handle = NULL;
int ret_val = 0;
size_t buf_size = 0;
- struct mutex *mutex;
+ struct mutex *chan_lock;
u32 chan = 0;
- ssize_t bytes_copied = 0;
+ size_t bytes_copied = 0;
u32 addr_offset = 0;
struct mhi_result result;
@@ -756,213 +809,194 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
return -EINVAL;
uci_handle = file->private_data;
- client_handle = uci_handle->in_handle;
- mutex = &uci_handle->in_chan_lock;
- chan = uci_handle->in_chan;
- mutex_lock(mutex);
+ client_handle = uci_handle->in_attr.mhi_handle;
+ chan_lock = &uci_handle->in_attr.chan_lock;
+ chan = uci_handle->in_attr.chan_id;
buf_size = uci_handle->in_attr.max_packet_size;
result.buf_addr = NULL;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Client attempted read on chan %d\n",
- chan);
- do {
- if (!uci_handle->pkt_loc &&
- !atomic_read(&uci_handle->uci_ctxt->mhi_disabled)) {
- ret_val = mhi_poll_inbound(client_handle, &result);
- if (ret_val) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
- "Failed to poll inbound ret %d avail pkt %d\n",
- ret_val,
- atomic_read(&uci_handle->avail_pkts));
- }
- if (result.buf_addr)
- uci_handle->pkt_loc = result.buf_addr;
- else
- uci_handle->pkt_loc = 0;
- uci_handle->pkt_size = result.bytes_xferd;
- *bytes_pending = uci_handle->pkt_size;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Got pkt size 0x%zx at addr 0x%lx, chan %d\n",
- uci_handle->pkt_size,
- (uintptr_t)result.buf_addr,
- chan);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Client attempted read on chan %d\n", chan);
+
+ mutex_lock(chan_lock);
+
+ /* confirm channel is active */
+ if (!uci_handle->in_attr.enabled || !uci_handle->enabled) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "chan:%d is disabled\n", chan);
+ ret_val = -ERESTARTSYS;
+ goto read_error;
+ }
+
+ /* No data available to read, wait */
+ if (!uci_handle->cur_buf &&
+ !atomic_read(&uci_handle->in_attr.avail_pkts)) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "No data available to read for chan:%d waiting\n",
+ chan);
+ mutex_unlock(chan_lock);
+ ret_val = wait_event_interruptible(uci_handle->in_attr.wq,
+ (atomic_read(&uci_handle->in_attr.avail_pkts) ||
+ !uci_handle->in_attr.enabled));
+ mutex_lock(chan_lock);
+ if (ret_val == -ERESTARTSYS) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Exit signal caught for chan:%d\n", chan);
+ goto read_error;
+
}
- if ((*bytes_pending == 0 || uci_handle->pkt_loc == 0) &&
- (atomic_read(&uci_handle->avail_pkts) <= 0)) {
- /* If nothing was copied yet, wait for data */
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "No data avail_pkts %d, chan %d\n",
- atomic_read(&uci_handle->avail_pkts),
- chan);
- ret_val = wait_event_interruptible(
- uci_handle->read_wq,
- (atomic_read(&uci_handle->avail_pkts) > 0));
- if (ret_val == -ERESTARTSYS) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
- "Exit signal caught\n");
- goto error;
- }
- /* A pending reset exists */
- } else if ((atomic_read(&uci_handle->avail_pkts) > 0) &&
- 0 == uci_handle->pkt_size &&
- 0 == uci_handle->pkt_loc &&
- uci_handle->mhi_status == -ENETRESET) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
- "Detected pending reset, reporting chan %d\n",
- chan);
- atomic_dec(&uci_handle->avail_pkts);
- uci_handle->mhi_status = 0;
- mutex_unlock(mutex);
- return -ENETRESET;
- /* A valid packet was returned from MHI */
- } else if (atomic_read(&uci_handle->avail_pkts) &&
- uci_handle->pkt_size != 0 &&
- uci_handle->pkt_loc != 0) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Got packet: avail pkts %d phy_adr 0x%p, chan %d\n",
- atomic_read(&uci_handle->avail_pkts),
- result.buf_addr,
- chan);
- break;
- /*
- * MHI did not return a valid packet, but we have one
- * which we did not finish returning to user
- */
- } else {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
- "chan %d err: avail pkts %d mhi_stat%d\n",
- chan,
- atomic_read(&uci_handle->avail_pkts),
- uci_handle->mhi_status);
- return -EIO;
+ if (!uci_handle->in_attr.enabled || !uci_handle->enabled) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "chan:%d is disabled\n", chan);
+ ret_val = -ERESTARTSYS;
+ goto read_error;
}
- } while (!uci_handle->pkt_loc);
+ }
- if (uspace_buf_size >= *bytes_pending) {
- addr_offset = uci_handle->pkt_size - *bytes_pending;
- if (0 != copy_to_user(buf,
- uci_handle->pkt_loc + addr_offset,
- *bytes_pending)) {
+ /* new read, get the data from MHI */
+ if (!uci_handle->cur_buf) {
+ struct uci_buf *cur_buf;
+
+ ret_val = mhi_poll_inbound(client_handle, &result);
+ if (unlikely(ret_val || !result.buf_addr)) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_ERROR,
+ "Failed to poll inbound ret %d avail_pkt %d\n",
+ ret_val,
+ atomic_read(&uci_handle->in_attr.avail_pkts));
+ goto read_error;
+ }
+ cur_buf = list_first_entry_or_null(
+ &uci_handle->in_attr.buf_head,
+ struct uci_buf, node);
+ if (unlikely(!cur_buf)) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_ERROR,
+ "Received completion cb but no packets queued, avail_pkt:%d\n",
+ atomic_read(&uci_handle->in_attr.avail_pkts));
ret_val = -EIO;
- goto error;
+ goto read_error;
}
- bytes_copied = *bytes_pending;
- *bytes_pending = 0;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Copied 0x%zx of 0x%x, chan %d\n",
- bytes_copied,
- (u32)*bytes_pending,
- chan);
- } else {
- addr_offset = uci_handle->pkt_size - *bytes_pending;
- if (copy_to_user(buf,
- (void *)(uintptr_t)uci_handle->pkt_loc +
- addr_offset,
- uspace_buf_size) != 0) {
+ if (unlikely(cur_buf->data != result.buf_addr)) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_ERROR,
+ "Receive out of order packet id:%llu\n",
+ cur_buf->pkt_id);
ret_val = -EIO;
- goto error;
+ goto read_error;
}
- bytes_copied = uspace_buf_size;
- *bytes_pending -= uspace_buf_size;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Copied 0x%zx of 0x%x,chan %d\n",
- bytes_copied,
- (u32)*bytes_pending,
- chan);
+
+ list_del(&cur_buf->node);
+ uci_handle->cur_buf = cur_buf;
+ *bytes_pending = result.bytes_xferd;
+ uci_handle->pkt_size = result.bytes_xferd;
+ atomic_dec(&uci_handle->in_attr.avail_pkts);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Got pkt @ %llu size:%llu for chan:%d\n",
+ uci_handle->cur_buf->pkt_id, *bytes_pending, chan);
}
+
+ /* Copy the buffer to user space */
+ bytes_copied = min_t(size_t, uspace_buf_size, *bytes_pending);
+ addr_offset = uci_handle->pkt_size - *bytes_pending;
+ ret_val = copy_to_user(buf, uci_handle->cur_buf->data + addr_offset,
+ bytes_copied);
+ if (ret_val != 0)
+ goto read_error;
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Copied %lu of %llu bytes for chan:%d\n",
+ bytes_copied, *bytes_pending, chan);
+ *bytes_pending -= bytes_copied;
+
/* We finished with this buffer, map it back */
if (*bytes_pending == 0) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Pkt loc %p ,chan %d\n",
- uci_handle->pkt_loc,
- chan);
- memset(uci_handle->pkt_loc, 0, buf_size);
- atomic_dec(&uci_handle->avail_pkts);
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Decremented avail pkts avail 0x%x\n",
- atomic_read(&uci_handle->avail_pkts));
- ret_val = mhi_queue_xfer(client_handle, uci_handle->pkt_loc,
+ struct uci_buf *uci_buf = uci_handle->cur_buf;
+
+ uci_handle->cur_buf = NULL;
+ uci_buf->pkt_id = uci_handle->in_attr.pkt_count++;
+ memset(uci_buf->data, 0xdeadbeef, buf_size);
+ ret_val = mhi_queue_xfer(client_handle, uci_buf->data,
buf_size, MHI_EOT);
if (0 != ret_val) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_ERROR,
"Failed to recycle element\n");
- ret_val = -EIO;
- goto error;
+ kfree(uci_buf->data);
+ goto read_error;
}
- uci_handle->pkt_loc = 0;
+ list_add_tail(&uci_buf->node, &uci_handle->in_attr.buf_head);
}
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "Returning 0x%zx bytes, 0x%x bytes left\n",
- bytes_copied,
- (u32)*bytes_pending);
- mutex_unlock(mutex);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Returning %lu bytes, %llu bytes left\n",
+ bytes_copied, *bytes_pending);
+ mutex_unlock(chan_lock);
return bytes_copied;
-error:
- mutex_unlock(mutex);
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_ERROR,
- "Returning %d\n",
- ret_val);
+
+read_error:
+ mutex_unlock(chan_lock);
return ret_val;
}
static ssize_t mhi_uci_client_write(struct file *file,
- const char __user *buf,
- size_t count, loff_t *offp)
+ const char __user *buf,
+ size_t count,
+ loff_t *offp)
{
struct uci_client *uci_handle = NULL;
+ struct chan_attr *chan_attr;
+ size_t bytes_transferrd = 0;
int ret_val = 0;
u32 chan = 0xFFFFFFFF;
- if (file == NULL || buf == NULL ||
- !count || file->private_data == NULL)
+ if (file == NULL || buf == NULL || !count ||
+ file->private_data == NULL)
return -EINVAL;
else
uci_handle = file->private_data;
- chan = uci_handle->out_chan;
- mutex_lock(&uci_handle->out_chan_lock);
-
- while (ret_val == 0) {
- ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
- (void *)buf, count, 1);
- if (!ret_val) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
- "No descriptors available, did we poll, chan %d?\n",
- chan);
- mutex_unlock(&uci_handle->out_chan_lock);
- ret_val =
- wait_event_interruptible(
- uci_handle->write_wq,
- mhi_get_free_desc(uci_handle->out_handle) > 0);
- mutex_lock(&uci_handle->out_chan_lock);
- if (-ERESTARTSYS == ret_val) {
- goto sys_interrupt;
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_WARNING,
- "Waitqueue cancelled by system\n");
- }
+ chan_attr = &uci_handle->out_attr;
+ chan = chan_attr->chan_id;
+ mutex_lock(&chan_attr->chan_lock);
+
+ if (!chan_attr->enabled || !uci_handle->enabled) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Link is disabled\n");
+ ret_val = -ERESTARTSYS;
+ goto sys_interrupt;
+ }
+
+ while (bytes_transferrd != count) {
+ ret_val = mhi_uci_send_packet(&chan_attr->mhi_handle,
+ (void *)buf, count);
+ if (ret_val < 0)
+ goto sys_interrupt;
+
+ bytes_transferrd += ret_val;
+ if (bytes_transferrd == count)
+ break;
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "No descriptors available, did we poll, chan %d?\n",
+ chan);
+ mutex_unlock(&chan_attr->chan_lock);
+ ret_val = wait_event_interruptible(chan_attr->wq,
+ (atomic_read(&chan_attr->avail_pkts) ||
+ !chan_attr->enabled));
+ mutex_lock(&chan_attr->chan_lock);
+ if (-ERESTARTSYS == ret_val) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Waitqueue cancelled by system\n");
+ goto sys_interrupt;
+ }
+ if (!chan_attr->enabled || !uci_handle->enabled) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "Link is disabled\n");
+ ret_val = -ERESTARTSYS;
+ goto sys_interrupt;
}
}
+
+ mutex_unlock(&chan_attr->chan_lock);
+ return bytes_transferrd;
+
sys_interrupt:
- mutex_unlock(&uci_handle->out_chan_lock);
+ mutex_unlock(&chan_attr->chan_lock);
return ret_val;
}
@@ -1022,7 +1056,12 @@ static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt,
if (chan_attrib->chan_id == ctrl_chan)
uci_ctxt->ctrl_client = client;
+
+ INIT_LIST_HEAD(&chan_attrib->buf_head);
+ mutex_init(&chan_attrib->chan_lock);
+ atomic_set(&chan_attrib->avail_pkts, 0);
}
+ INIT_WORK(&client->outbound_worker, mhi_uci_clean_acked_tre);
}
error_dts:
@@ -1030,35 +1069,6 @@ error_dts:
return ret_val;
}
-static int process_mhi_disabled_notif_sync(struct uci_client *uci_handle)
-{
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
- "Entered.\n");
- if (uci_handle->mhi_status != -ENETRESET) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
- "Setting reset for chan %d\n",
- uci_handle->out_chan);
- uci_handle->pkt_size = 0;
- uci_handle->pkt_loc = NULL;
- uci_handle->mhi_status = -ENETRESET;
- atomic_set(&uci_handle->avail_pkts, 1);
- mhi_close_channel(uci_handle->out_handle);
- mhi_close_channel(uci_handle->in_handle);
- uci_handle->out_chan_state = 0;
- uci_handle->in_chan_state = 0;
- wake_up(&uci_handle->read_wq);
- } else {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
- "Chan %d state already reset\n",
- uci_handle->out_chan);
- }
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO, "Exited\n");
- return 0;
-}
-
static void process_rs232_state(struct uci_client *ctrl_client,
struct mhi_result *result)
{
@@ -1071,15 +1081,13 @@ static void process_rs232_state(struct uci_client *ctrl_client,
mutex_lock(&uci_ctxt->ctrl_mutex);
if (result->transaction_status != 0) {
- uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, UCI_DBG_ERROR,
"Non successful transfer code 0x%x\n",
result->transaction_status);
goto error_bad_xfer;
}
if (result->bytes_xferd != sizeof(struct rs232_ctrl_msg)) {
- uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, UCI_DBG_ERROR,
"Buffer is of wrong size is: 0x%zx: expected 0x%zx\n",
result->bytes_xferd,
sizeof(struct rs232_ctrl_msg));
@@ -1088,8 +1096,8 @@ static void process_rs232_state(struct uci_client *ctrl_client,
rs232_pkt = result->buf_addr;
MHI_GET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, chan);
for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++)
- if (chan == uci_ctxt->client_handles[i].out_chan ||
- chan == uci_ctxt->client_handles[i].in_chan) {
+ if (chan == uci_ctxt->client_handles[i].out_attr.chan_id ||
+ chan == uci_ctxt->client_handles[i].in_attr.chan_id) {
client = &uci_ctxt->client_handles[i];
break;
}
@@ -1114,13 +1122,12 @@ static void process_rs232_state(struct uci_client *ctrl_client,
error_bad_xfer:
error_size:
memset(rs232_pkt, 0, sizeof(struct rs232_ctrl_msg));
- ret_val = mhi_queue_xfer(ctrl_client->in_handle,
+ ret_val = mhi_queue_xfer(ctrl_client->in_attr.mhi_handle,
result->buf_addr,
result->bytes_xferd,
result->flags);
if (0 != ret_val) {
- uci_log(ctrl_client->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(ctrl_client->uci_ipc_log, UCI_DBG_ERROR,
"Failed to recycle ctrl msg buffer\n");
}
mutex_unlock(&uci_ctxt->ctrl_mutex);
@@ -1129,13 +1136,12 @@ error_size:
static void parse_inbound_ack(struct uci_client *uci_handle,
struct mhi_result *result)
{
- atomic_inc(&uci_handle->avail_pkts);
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ atomic_inc(&uci_handle->in_attr.avail_pkts);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Received cb on chan %d, avail pkts: 0x%x\n",
- uci_handle->in_chan,
- atomic_read(&uci_handle->avail_pkts));
- wake_up(&uci_handle->read_wq);
+ uci_handle->in_attr.chan_id,
+ atomic_read(&uci_handle->in_attr.avail_pkts));
+ wake_up(&uci_handle->in_attr.wq);
if (uci_handle == uci_handle->uci_ctxt->ctrl_client)
process_rs232_state(uci_handle, result);
}
@@ -1143,25 +1149,25 @@ static void parse_inbound_ack(struct uci_client *uci_handle,
static void parse_outbound_ack(struct uci_client *uci_handle,
struct mhi_result *result)
{
- kfree(result->buf_addr);
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Received ack on chan %d, pending acks: 0x%x\n",
- uci_handle->out_chan,
+ uci_handle->out_attr.chan_id,
atomic_read(&uci_handle->out_pkt_pend_ack));
atomic_dec(&uci_handle->out_pkt_pend_ack);
- if (mhi_get_free_desc(uci_handle->out_handle))
- wake_up(&uci_handle->write_wq);
+ atomic_inc(&uci_handle->out_attr.avail_pkts);
+ atomic_inc(&uci_handle->completion_ack);
+ wake_up(&uci_handle->out_attr.wq);
+ schedule_work(&uci_handle->outbound_worker);
}
static void uci_xfer_cb(struct mhi_cb_info *cb_info)
{
struct uci_client *uci_handle = NULL;
struct mhi_result *result;
+ struct chan_attr *chan_attr;
if (!cb_info || !cb_info->result) {
- uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
- UCI_DBG_CRITICAL,
+ uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, UCI_DBG_CRITICAL,
"Bad CB info from MHI\n");
return;
}
@@ -1169,22 +1175,23 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
uci_handle = cb_info->result->user_data;
switch (cb_info->cb_reason) {
case MHI_CB_MHI_ENABLED:
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
- "MHI enabled CB received.\n");
- atomic_set(&uci_handle->mhi_disabled, 0);
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
+ "MHI enabled CB received for chan %d\n",
+ cb_info->chan);
+ chan_attr = (cb_info->chan % 2) ? &uci_handle->in_attr :
+ &uci_handle->out_attr;
+ mutex_lock(&chan_attr->chan_lock);
+ chan_attr->enabled = true;
+ mutex_unlock(&chan_attr->chan_lock);
+ wake_up(&chan_attr->wq);
break;
case MHI_CB_MHI_DISABLED:
- atomic_set(&uci_handle->mhi_disabled, 1);
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
"MHI disabled CB received\n");
- process_mhi_disabled_notif_sync(uci_handle);
break;
case MHI_CB_XFER:
if (!cb_info->result) {
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_CRITICAL,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_CRITICAL,
"Failed to obtain mhi result from CB\n");
return;
}
@@ -1195,8 +1202,7 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
parse_outbound_ack(uci_handle, result);
break;
default:
- uci_log(uci_handle->uci_ipc_log,
- UCI_DBG_VERBOSE,
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
"Cannot handle cb reason 0x%x\n",
cb_info->cb_reason);
}
@@ -1208,48 +1214,40 @@ static int mhi_register_client(struct uci_client *mhi_client,
int ret_val = 0;
struct mhi_client_info_t client_info;
- uci_log(mhi_client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(mhi_client->uci_ipc_log, UCI_DBG_INFO,
"Setting up workqueues\n");
- init_waitqueue_head(&mhi_client->read_wq);
- init_waitqueue_head(&mhi_client->write_wq);
- mhi_client->out_chan = mhi_client->out_attr.chan_id;
- mhi_client->in_chan = mhi_client->in_attr.chan_id;
-
- mutex_init(&mhi_client->in_chan_lock);
- mutex_init(&mhi_client->out_chan_lock);
- atomic_set(&mhi_client->mhi_disabled, 1);
+ init_waitqueue_head(&mhi_client->in_attr.wq);
+ init_waitqueue_head(&mhi_client->out_attr.wq);
- uci_log(mhi_client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(mhi_client->uci_ipc_log, UCI_DBG_INFO,
"Registering chan %d\n",
- mhi_client->out_chan);
+ mhi_client->out_attr.chan_id);
client_info.dev = dev;
client_info.node_name = "qcom,mhi";
client_info.user_data = mhi_client;
client_info.mhi_client_cb = uci_xfer_cb;
- client_info.chan = mhi_client->out_chan;
+ client_info.chan = mhi_client->out_attr.chan_id;
client_info.max_payload = mhi_client->out_attr.max_packet_size;
- ret_val = mhi_register_channel(&mhi_client->out_handle, &client_info);
+ ret_val = mhi_register_channel(&mhi_client->out_attr.mhi_handle,
+ &client_info);
if (0 != ret_val)
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_ERROR,
"Failed to init outbound chan 0x%x, ret 0x%x\n",
- mhi_client->out_chan,
+ mhi_client->out_attr.chan_id,
ret_val);
- uci_log(mhi_client->uci_ipc_log,
- UCI_DBG_INFO,
+ uci_log(mhi_client->uci_ipc_log, UCI_DBG_INFO,
"Registering chan %d\n",
- mhi_client->in_chan);
+ mhi_client->in_attr.chan_id);
client_info.max_payload = mhi_client->in_attr.max_packet_size;
- client_info.chan = mhi_client->in_chan;
- ret_val = mhi_register_channel(&mhi_client->in_handle, &client_info);
+ client_info.chan = mhi_client->in_attr.chan_id;
+ ret_val = mhi_register_channel(&mhi_client->in_attr.mhi_handle,
+ &client_info);
if (0 != ret_val)
- uci_log(mhi_client->uci_ipc_log,
- UCI_DBG_ERROR,
+ uci_log(mhi_client->uci_ipc_log, UCI_DBG_ERROR,
"Failed to init inbound chan 0x%x, ret 0x%x\n",
- mhi_client->in_chan,
+ mhi_client->in_attr.chan_id,
ret_val);
return 0;
}
@@ -1313,6 +1311,7 @@ static int mhi_uci_probe(struct platform_device *pdev)
struct uci_client *uci_client = &uci_ctxt->client_handles[i];
uci_client->uci_ctxt = uci_ctxt;
+ mutex_init(&uci_client->client_lock);
if (uci_client->in_attr.uci_ownership) {
ret_val = mhi_register_client(uci_client,
&pdev->dev);
@@ -1328,10 +1327,10 @@ static int mhi_uci_probe(struct platform_device *pdev)
snprintf(node_name,
sizeof(node_name),
"mhi_uci_%04x_%02u.%02u.%02u_%d",
- uci_client->out_handle->dev_id,
- uci_client->out_handle->domain,
- uci_client->out_handle->bus,
- uci_client->out_handle->slot,
+ uci_client->out_attr.mhi_handle->dev_id,
+ uci_client->out_attr.mhi_handle->domain,
+ uci_client->out_attr.mhi_handle->bus,
+ uci_client->out_attr.mhi_handle->slot,
uci_client->out_attr.chan_id);
uci_client->uci_ipc_log = ipc_log_context_create
(MHI_UCI_IPC_LOG_PAGES,
@@ -1380,12 +1379,12 @@ static int mhi_uci_probe(struct platform_device *pdev)
uci_ctxt->dev_t + i,
NULL,
DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
- uci_client->out_handle->dev_id,
- uci_client->out_handle->domain,
- uci_client->out_handle->bus,
- uci_client->out_handle->slot,
+ uci_client->out_attr.mhi_handle->dev_id,
+ uci_client->out_attr.mhi_handle->domain,
+ uci_client->out_attr.mhi_handle->bus,
+ uci_client->out_attr.mhi_handle->slot,
"_pipe_",
- uci_client->out_chan);
+ uci_client->out_attr.chan_id);
if (IS_ERR(uci_client->dev)) {
uci_log(uci_client->uci_ipc_log,
UCI_DBG_ERROR,
@@ -1427,8 +1426,8 @@ static int mhi_uci_remove(struct platform_device *pdev)
uci_client->uci_ctxt = uci_ctxt;
if (uci_client->in_attr.uci_ownership) {
- mhi_deregister_channel(uci_client->out_handle);
- mhi_deregister_channel(uci_client->in_handle);
+ mhi_deregister_channel(uci_client->out_attr.mhi_handle);
+ mhi_deregister_channel(uci_client->in_attr.mhi_handle);
cdev_del(&uci_ctxt->cdev[i]);
device_destroy(mhi_uci_drv_ctxt.mhi_uci_class,
MKDEV(MAJOR(uci_ctxt->dev_t), i));
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 77e21a7976ff..cb7a85068233 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1140,6 +1140,9 @@ static int smb2_init_vconn_regulator(struct smb2 *chip)
struct regulator_config cfg = {};
int rc = 0;
+ if (chg->micro_usb_mode)
+ return 0;
+
chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
GFP_KERNEL);
if (!chg->vconn_vreg)
@@ -1351,9 +1354,10 @@ static int smb2_disable_typec(struct smb_charger *chg)
int rc;
/* Move to typeC mode */
- /* configure FSM in idle state */
+ /* configure FSM in idle state and disable UFP_ENABLE bit */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
+ TYPEC_DISABLE_CMD_BIT | UFP_EN_CMD_BIT,
+ TYPEC_DISABLE_CMD_BIT);
if (rc < 0) {
dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
return rc;
@@ -2148,7 +2152,7 @@ static int smb2_probe(struct platform_device *pdev)
rc = smb2_init_vconn_regulator(chip);
if (rc < 0) {
pr_err("Couldn't initialize vconn regulator rc=%d\n",
- rc);
+ rc);
goto cleanup;
}
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 5e1300ce1897..379280d3eb33 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -4094,6 +4094,9 @@ static void smblib_vconn_oc_work(struct work_struct *work)
int rc, i;
u8 stat;
+ if (chg->micro_usb_mode)
+ return;
+
smblib_err(chg, "over-current detected on VCONN\n");
if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
return;
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index 176c3888aa34..7a501d6d7c84 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,20 @@ static struct ufs_card_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_HYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hB8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hD8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "h08aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8GL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8HL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
END_FIX
};
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index b8ab5948f12d..3102517e841c 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -139,6 +139,14 @@ struct ufs_card_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 7)
+/*
+ * Some UFS devices may stop responding after switching from HS-G1 to HS-G3.
+ * Also, it is found that these devices work fine if we do 2 steps switch:
+ * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
+ * device would apply this 2 steps gear switch workaround.
+ */
+#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 16f130dd3c76..3e858015813f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -509,7 +509,7 @@ out:
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
- if (!val)
+ if (!val || !*val)
return;
if (*val < 0x20 || *val > 0x7e)
@@ -3651,7 +3651,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
goto out;
}
- buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
if (!buff_ascii) {
dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
__func__, ascii_len);
@@ -9207,6 +9207,31 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
sizeof(struct ufs_pa_layer_attr));
+ /*
+ * Some UFS devices may stop responding after switching from
+ * HS-G1 to HS-G3. Also, it is found that these devices work
+ * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+ * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+ * quirk is enabled for such devices, this 2 steps gear switch
+ * workaround will be applied.
+ */
+ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+ && (hba->pwr_info.gear_tx == UFS_HS_G1)
+ && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+ /* scale up to G2 first */
+ new_pwr_info.gear_tx = UFS_HS_G2;
+ new_pwr_info.gear_rx = UFS_HS_G2;
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
+
+ /* scale up to G3 now */
+ new_pwr_info.gear_tx = UFS_HS_G3;
+ new_pwr_info.gear_rx = UFS_HS_G3;
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
+ }
} else {
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -9226,10 +9251,10 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
new_pwr_info.pwr_rx = FASTAUTO_MODE;
}
}
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
}
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
+out:
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
__func__, ret,
@@ -9292,10 +9317,29 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
goto clk_scaling_unprepare;
}
+ /*
+ * If auto hibern8 is supported then put the link in
+ * hibern8 manually, this is to avoid auto hibern8
+ * racing during clock frequency scaling sequence.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
ret = ufshcd_scale_clks(hba, scale_up);
if (ret)
goto scale_up_gear;
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index d11ffdde23be..3a6d84140bc9 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -98,8 +98,7 @@ static int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
unsigned long flags;
spin_lock_irqsave(&apr_ch->w_lock, flags);
- rc = glink_tx(apr_ch->handle, pkt_priv, data, len,
- GLINK_TX_REQ_INTENT | GLINK_TX_ATOMIC);
+ rc = glink_tx(apr_ch->handle, pkt_priv, data, len, GLINK_TX_ATOMIC);
spin_unlock_irqrestore(&apr_ch->w_lock, flags);
if (rc)
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 5ac2a58899f4..0625f75de373 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -266,8 +266,10 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd)
if (!domains_read) {
db_rev_count = pd->db_rev_count = resp->db_rev_count;
pd->total_domains = resp->total_domains;
- if (!resp->total_domains)
- pr_info("No matching domains found\n");
+ if (!resp->total_domains) {
+ pr_err("No matching domains found\n");
+ goto out;
+ }
pd->domain_list = kmalloc(
sizeof(struct servreg_loc_entry_v01) *
diff --git a/drivers/thermal/msm_thermal-dev.c b/drivers/thermal/msm_thermal-dev.c
index e6af6b884e99..ead9765666c8 100644
--- a/drivers/thermal/msm_thermal-dev.c
+++ b/drivers/thermal/msm_thermal-dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@ static unsigned int freq_table_len[NR_CPUS], freq_table_set[NR_CPUS];
static unsigned int voltage_table_set[NR_CPUS];
static unsigned int *freq_table_ptr[NR_CPUS];
static uint32_t *voltage_table_ptr[NR_CPUS];
+static DEFINE_MUTEX(ioctl_access_mutex);
static int msm_thermal_ioctl_open(struct inode *node, struct file *filep)
{
@@ -291,8 +292,9 @@ static long msm_thermal_ioctl_process(struct file *filep, unsigned int cmd,
ret = validate_and_copy(&cmd, &arg, &query);
if (ret)
- goto process_exit;
+ return ret;
+ mutex_lock(&ioctl_access_mutex);
switch (cmd) {
case MSM_THERMAL_SET_CPU_MAX_FREQUENCY:
ret = msm_thermal_set_frequency(query.cpu_freq.cpu_num,
@@ -321,6 +323,7 @@ static long msm_thermal_ioctl_process(struct file *filep, unsigned int cmd,
goto process_exit;
}
process_exit:
+ mutex_unlock(&ioctl_access_mutex);
return ret;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5b648460e621..c2eba06f2ace 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -670,6 +670,16 @@ int dwc3_core_init(struct dwc3 *dwc)
}
}
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval && (dwc->revision == DWC3_REVISION_300A))
+ dwc->imod_interval = 1;
+
ret = dwc3_core_reset(dwc);
if (ret)
goto err0;
@@ -1000,6 +1010,15 @@ err0:
#define DWC3_ALIGN_MASK (16 - 1)
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return ((dwc3_is_usb3(dwc) &&
+ dwc->revision >= DWC3_REVISION_300A) ||
+ (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1040,8 +1059,8 @@ static int dwc3_probe(struct platform_device *pdev)
/* will be enabled in dwc3_msm_resume() */
irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(dev, irq, NULL, dwc3_interrupt,
- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
+ ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
+ dwc);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
@@ -1219,6 +1238,14 @@ static int dwc3_probe(struct platform_device *pdev)
dev->dma_parms = dev->parent->dma_parms;
dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+ dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
+ if (!dwc->dwc_wq) {
+ pr_err("%s: Unable to create workqueue dwc_wq\n", __func__);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&dwc->bh_work, dwc3_bh_work);
+
pm_runtime_no_callbacks(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -1284,6 +1311,7 @@ err0:
* memory region the next time probe is called.
*/
res->start -= DWC3_GLOBALS_REGS_START;
+ destroy_workqueue(dwc->dwc_wq);
return ret;
}
@@ -1313,6 +1341,8 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
+ destroy_workqueue(dwc->dwc_wq);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index f6e2bea7b9aa..453eee734b23 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -66,6 +66,7 @@
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB (1 << 31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -149,6 +150,8 @@
#define DWC3_DEPCMDPAR0(n) (0xc808 + (n * 0x10))
#define DWC3_DEPCMD(n) (0xc80c + (n * 0x10))
+#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4))
+
/* OTG Registers */
#define DWC3_OCFG 0xcc00
#define DWC3_OCTL 0xcc04
@@ -433,6 +436,11 @@
#define DWC3_DEPCMD_TYPE_BULK 2
#define DWC3_DEPCMD_TYPE_INTR 3
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
/* Structures */
struct dwc3_trb;
@@ -837,6 +845,8 @@ struct dwc3_scratchpad_array {
* @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
* @wait_linkstate: waitqueue for waiting LINK to move into required state
* @vbus_draw: current to be drawn from USB
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -920,6 +930,7 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
/*
@@ -928,6 +939,7 @@ struct dwc3 {
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -1008,6 +1020,11 @@ struct dwc3 {
bool b_suspend;
unsigned vbus_draw;
+ u16 imod_interval;
+
+ struct workqueue_struct *dwc_wq;
+ struct work_struct bh_work;
+
/* IRQ timing statistics */
int irq;
unsigned long irq_cnt;
@@ -1175,6 +1192,20 @@ struct dwc3_gadget_ep_cmd_params {
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+ return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+/* check whether we are on the DWC_usb31 core */
+static inline bool dwc3_is_usb31(struct dwc3 *dwc)
+{
+ return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+bool dwc3_has_imod(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 623f3ce211aa..a80fb34cdce8 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2053,6 +2053,9 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
if (dwc->irq)
disable_irq(dwc->irq);
+ if (work_busy(&dwc->bh_work))
+ dbg_event(0xFF, "pend evt", 0);
+
/* disable power event irq, hs and ss phy irq is used as wake up src */
disable_irq(mdwc->pwr_event_irq);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e1284b6cc2ef..9608a79cbe40 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2011,6 +2011,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
int ret = 0;
u32 reg;
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -3362,8 +3373,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
*/
evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
left -= 4;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
}
dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] += (evt->count / 4);
@@ -3377,9 +3386,22 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ if (dwc->imod_interval)
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf),
+ DWC3_GEVNTCOUNT_EHB);
+
return ret;
}
+void dwc3_bh_work(struct work_struct *w)
+{
+ struct dwc3 *dwc = container_of(w, struct dwc3, bh_work);
+
+ pm_runtime_get_sync(dwc->dev);
+ dwc3_thread_interrupt(dwc->irq, dwc);
+ pm_runtime_put(dwc->dev);
+}
+
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
{
struct dwc3 *dwc = _dwc;
@@ -3434,6 +3456,8 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), count);
+
return IRQ_WAKE_THREAD;
}
@@ -3469,7 +3493,7 @@ irqreturn_t dwc3_interrupt(int irq, void *_dwc)
dwc->irq_dbg_index = (dwc->irq_dbg_index + 1) % MAX_INTR_STATS;
if (ret == IRQ_WAKE_THREAD)
- dwc3_thread_interrupt(dwc->irq, dwc);
+ queue_work(dwc->dwc_wq, &dwc->bh_work);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 99ff6df063a7..baa83cf9638b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -105,6 +105,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
irqreturn_t dwc3_interrupt(int irq, void *_dwc);
+void dwc3_bh_work(struct work_struct *w);
static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ed0ff7b1fc15..c31aaf7a9880 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -141,21 +141,28 @@ struct gadget_config_name {
struct list_head list;
};
+#define MAX_USB_STRING_LEN 126
+#define MAX_USB_STRING_WITH_NULL_LEN (MAX_USB_STRING_LEN+1)
+
static int usb_string_copy(const char *s, char **s_copy)
{
int ret;
char *str;
char *copy = *s_copy;
ret = strlen(s);
- if (ret > 126)
+ if (ret > MAX_USB_STRING_LEN)
return -EOVERFLOW;
- str = kstrdup(s, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
+ if (copy) {
+ str = copy;
+ } else {
+ str = kmalloc(MAX_USB_STRING_WITH_NULL_LEN, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ }
+ strncpy(str, s, MAX_USB_STRING_WITH_NULL_LEN);
if (str[ret - 1] == '\n')
str[ret - 1] = '\0';
- kfree(copy);
*s_copy = str;
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 7423645c204c..1ddf882fb607 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -245,7 +245,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_usb3_hcd;
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
@@ -254,7 +254,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
- ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 40943af749a1..8eb12d764be3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -48,6 +48,12 @@
#define BUF_POOL_SIZE 32
+#define DFPS_DATA_MAX_HFP 8192
+#define DFPS_DATA_MAX_HBP 8192
+#define DFPS_DATA_MAX_HPW 8192
+#define DFPS_DATA_MAX_FPS 0x7fffffff
+#define DFPS_DATA_MAX_CLK_RATE 250000
+
static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
@@ -3516,6 +3522,13 @@ static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
return count;
}
+ if (data.hfp > DFPS_DATA_MAX_HFP || data.hbp > DFPS_DATA_MAX_HBP ||
+ data.hpw > DFPS_DATA_MAX_HPW || data.fps > DFPS_DATA_MAX_FPS ||
+ data.clk_rate > DFPS_DATA_MAX_CLK_RATE){
+ pr_err("Data values out of bound.\n");
+ return -EINVAL;
+ }
+
rc = mdss_mdp_dfps_update_params(mfd, pdata, &data);
if (rc) {
pr_err("failed to set dfps params\n");
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 1c87478b5fc0..31cc6f40baa5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -432,9 +432,10 @@ struct mmc_card {
struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
struct notifier_block reboot_notify;
enum mmc_pon_type pon_type;
- u8 *cached_ext_csd;
bool cmdq_init;
struct mmc_bkops_info bkops;
+ bool err_in_sdr104;
+ bool sdr104_blocked;
};
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 1068953943d8..2a1a6fec179f 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -169,6 +169,7 @@ extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
+extern int mmc_suspend_clk_scaling(struct mmc_host *host);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 055b879dfa6b..d9e12c1b1748 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -595,6 +595,7 @@ struct mmc_host {
struct io_latency_state io_lat_s;
#endif
+ bool sdr104_wa;
unsigned long private[0] ____cacheline_aligned;
};
@@ -728,6 +729,16 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50);
}
+static inline void mmc_host_clear_sdr104(struct mmc_host *host)
+{
+ host->caps &= ~MMC_CAP_UHS_SDR104;
+}
+
+static inline void mmc_host_set_sdr104(struct mmc_host *host)
+{
+ host->caps |= MMC_CAP_UHS_SDR104;
+}
+
static inline int mmc_host_packed_wr(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_PACKED_WR;
diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h
index b9fd610f92da..2b50ce59406e 100644
--- a/include/linux/msm_mhi.h
+++ b/include/linux/msm_mhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#define MSM_MHI_H
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/scatterlist.h>
#define MHI_DMA_MASK 0xFFFFFFFFFFULL
#define MHI_MAX_MTU 0xFFFF
@@ -77,6 +78,7 @@ enum MHI_CB_REASON {
MHI_CB_MHI_ENABLED,
MHI_CB_MHI_SHUTDOWN,
MHI_CB_SYS_ERROR,
+ MHI_CB_RDDM,
};
enum MHI_FLAGS {
@@ -128,16 +130,22 @@ struct __packed bhi_vec_entry {
* @dev: device node points to of_node
* @pdev: pci device node
* @resource: bar memory space and IRQ resources
+ * @support_rddm: this device support ramdump collection
+ * @rddm_size: size of ramdump buffer in bytes to allocate
* @pm_runtime_get: fp for bus masters rpm pm_runtime_get
* @pm_runtime_noidle: fp for bus masters rpm pm_runtime_noidle
+ * @status_cb: fp for MHI status change notifications
* @mhi_dev_ctxt: private data for host
*/
struct mhi_device {
struct device *dev;
struct pci_dev *pci_dev;
struct resource resources[2];
+ bool support_rddm;
+ size_t rddm_size;
int (*pm_runtime_get)(struct pci_dev *pci_dev);
- void (*pm_runtime_noidle)(struct pci_dev *pci_dev);
+ void (*pm_runtime_put_noidle)(struct pci_dev *pci_dev);
+ void (*status_cb)(enum MHI_CB_REASON, void *priv);
struct mhi_device_ctxt *mhi_dev_ctxt;
};
@@ -148,10 +156,16 @@ enum mhi_dev_ctrl {
MHI_DEV_CTRL_RESUME,
MHI_DEV_CTRL_POWER_OFF,
MHI_DEV_CTRL_POWER_ON,
- MHI_DEV_CTRL_RAM_DUMP,
+ MHI_DEV_CTRL_RDDM,
+ MHI_DEV_CTRL_RDDM_KERNEL_PANIC,
MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
};
+enum mhi_rddm_segment {
+ MHI_RDDM_FW_SEGMENT,
+ MHI_RDDM_RD_SEGMENT,
+};
+
/**
* mhi_is_device_ready - Check if MHI is ready to register clients
*
@@ -173,7 +187,7 @@ bool mhi_is_device_ready(const struct device * const dev,
*/
int mhi_register_device(struct mhi_device *mhi_device,
const char *node_name,
- unsigned long user_data);
+ void *user_data);
/**
* mhi_pm_control_device - power management control api
@@ -185,6 +199,15 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
enum mhi_dev_ctrl ctrl);
/**
+ * mhi_xfer_rddm - transfer rddm segment to bus master
+ * @mhi_device: registered device structure
+ * @seg: scatterlist pointing to segments
+ * @Return: # of segments, 0 if no segment available
+ */
+int mhi_xfer_rddm(struct mhi_device *mhi_device, enum mhi_rddm_segment seg,
+ struct scatterlist **sg_list);
+
+/**
* mhi_deregister_channel - de-register callbacks from MHI
*
* @client_handle: Handle populated by MHI, opaque to client
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 4b98d1ee0ecd..7f9ad8ebcd3d 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -53,7 +53,7 @@
#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS 50
#define ANC_DETECT_RETRY_CNT 7
-#define WCD_MBHC_SPL_HS_CNT 2
+#define WCD_MBHC_SPL_HS_CNT 1
static int det_extn_cable_en;
module_param(det_extn_cable_en, int,
@@ -1162,7 +1162,7 @@ static void wcd_correct_swch_plug(struct work_struct *work)
bool wrk_complete = false;
int pt_gnd_mic_swap_cnt = 0;
int no_gnd_mic_swap_cnt = 0;
- bool is_pa_on = false, spl_hs = false;
+ bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
bool micbias2 = false;
bool micbias1 = false;
int ret = 0;
@@ -1368,6 +1368,16 @@ correct_plug_type:
plug_type);
if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
plug_type = MBHC_PLUG_TYPE_HEADSET;
+ if (!spl_hs_reported &&
+ spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+ spl_hs_reported = true;
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc,
+ plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ continue;
+ } else if (spl_hs_reported)
+ continue;
/*
* Report headset only if not already reported
* and if there is not button press without
@@ -1442,6 +1452,29 @@ exit:
!mbhc->micbias_enable)
mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
MICB_DISABLE);
+
+ /*
+ * If plug type is corrected from special headset to headphone,
+ * clear the micbias enable flag, set micbias back to 1.8V and
+ * disable micbias.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+ mbhc->micbias_enable) {
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->mbhc_cb->mbhc_micbias_control(
+ codec, MIC_BIAS_2,
+ MICB_DISABLE);
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ codec,
+ MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value) {
+ mbhc->mbhc_cb->set_micbias_value(codec);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+ }
+ mbhc->micbias_enable = false;
+ }
+
if (mbhc->mbhc_cb->micbias_enable_status) {
micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
MIC_BIAS_1);
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 192d9291a8f3..cc8e45d77fcd 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -798,11 +798,13 @@ int wcd934x_bringup(struct wcd9xxx *wcd9xxx)
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+ /* Add 1msec delay for VOUT to settle */
+ usleep_range(1000, 1100);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
return 0;
}
@@ -8277,6 +8279,9 @@ static int __tavil_cdc_mclk_enable(struct tavil_priv *tavil,
WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
ret = __tavil_cdc_mclk_enable_locked(tavil, enable);
+ if (enable)
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
return ret;
@@ -8415,6 +8420,8 @@ static int __tavil_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
__func__, ret);
goto done;
}
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
WCD_CLK_RCO);
ret |= tavil_cdc_req_mclk_enable(tavil, false);
@@ -9816,18 +9823,23 @@ static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
{
int val, rc;
- __tavil_cdc_mclk_enable(tavil, true);
+ WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+ __tavil_cdc_mclk_enable_locked(tavil, true);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x10);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
-
/*
* 5ms sleep required after enabling efuse control
* before checking the status.
*/
usleep_range(5000, 5500);
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
+
+ WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
rc = regmap_read(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
if (rc || (!(val & 0x01)))
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index bd92ccc9e009..f16fc05a5eaa 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -25,8 +25,7 @@
#define WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
#define WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
- int sido_src);
+
static const char *wcd_resmgr_clk_type_to_str(enum wcd_clock_type clk_type)
{
if (clk_type == WCD_CLK_OFF)
@@ -267,8 +266,6 @@ static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
0x01, 0x01);
wcd_resmgr_codec_reg_update_bits(resmgr,
WCD934X_CODEC_RPM_CLK_GATE, 0x03, 0x00);
- wcd_resmgr_set_sido_input_src(resmgr,
- SIDO_SOURCE_RCO_BG);
} else {
wcd_resmgr_codec_reg_update_bits(resmgr,
WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
@@ -515,7 +512,7 @@ int wcd_resmgr_enable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
return ret;
}
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
int sido_src)
{
if (!resmgr)
@@ -553,6 +550,7 @@ static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
pr_debug("%s: sido input src to external\n", __func__);
}
}
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src);
/*
* wcd_resmgr_set_sido_input_src_locked:
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.h b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
index f605a249a620..e831ba61e9c2 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,4 +87,7 @@ int wcd_resmgr_get_clk_type(struct wcd9xxx_resmgr_v2 *resmgr);
void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr);
void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
int sido_src);
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+ int sido_src);
+
#endif
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index 138f4a02452c..7b363bdca291 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -115,6 +115,9 @@ static int msm_ec_ref_ch = 4;
static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_ec_ref_sampling_rate = SAMPLING_RATE_48KHZ;
+static int msm_tdm_slot_width = 32;
+static int msm_tdm_num_slots = 8;
+
static void *adsp_state_notifier;
static bool dummy_device_registered;
@@ -295,6 +298,62 @@ static unsigned int tdm_slot_offset_adp_mmxf[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
{0xFFFF}, /* not used */
};
+static unsigned int tdm_slot_offset_custom[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
+ /* QUAT_TDM_RX */
+ {0, 2, 0xFFFF},
+ {4, 6, 8, 10, 12, 14, 16, 18},
+ {20, 22, 24, 26, 28, 30, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* QUAT_TDM_TX */
+ {0, 2, 0xFFFF},
+ {4, 6, 8, 10, 12, 14, 16, 18},
+ {20, 22, 24, 26, 28, 30, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* TERT_TDM_RX */
+ {0, 2, 0xFFFF},
+ {4, 0xFFFF},
+ {6, 0xFFFF},
+ {8, 0xFFFF},
+ {10, 0xFFFF},
+ {12, 14, 16, 18, 20, 22, 24, 26},
+ {28, 30, 0xFFFF},
+ {0xFFFF}, /* not used */
+ /* TERT_TDM_TX */
+ {0, 2, 4, 6, 8, 10, 12, 0xFFFF},
+ {14, 16, 0xFFFF},
+ {18, 20, 22, 24, 26, 28, 30, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* SEC_TDM_RX */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* SEC_TDM_TX */
+ {0xFFFF},
+ {0xFFFF},
+ {0xFFFF},
+ {0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+};
static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
"Six", "Seven", "Eight"};
@@ -2256,44 +2315,38 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
channels = params_channels(params);
- switch (channels) {
- case 1:
- case 2:
- case 3:
- case 4:
- case 6:
- case 8:
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S32_LE:
- case SNDRV_PCM_FORMAT_S24_LE:
- case SNDRV_PCM_FORMAT_S16_LE:
- /*
- * up to 8 channel HW configuration should
- * use 32 bit slot width for max support of
- * stream bit width. (slot_width > bit_width)
- */
- slot_width = 32;
- break;
- default:
- pr_err("%s: invalid param format 0x%x\n",
- __func__, params_format(params));
- return -EINVAL;
- }
- slots = 8;
- slot_mask = tdm_param_set_slot_mask(cpu_dai->id,
- slot_width, slots);
- if (!slot_mask) {
- pr_err("%s: invalid slot_mask 0x%x\n",
- __func__, slot_mask);
- return -EINVAL;
- }
- break;
- default:
+ if (channels < 1 || channels > 8) {
pr_err("%s: invalid param channels %d\n",
__func__, channels);
return -EINVAL;
}
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S16_LE:
+ /*
+ * up to 8 channel HW configuration should
+ * use 32 bit slot width for max support of
+ * stream bit width. (slot_width > bit_width)
+ */
+ slot_width = msm_tdm_slot_width;
+ break;
+ default:
+ pr_err("%s: invalid param format 0x%x\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ slots = msm_tdm_num_slots;
+ slot_mask = tdm_param_set_slot_mask(cpu_dai->id,
+ slot_width, slots);
+ if (!slot_mask) {
+ pr_err("%s: invalid slot_mask 0x%x\n",
+ __func__, slot_mask);
+ return -EINVAL;
+ }
+
switch (cpu_dai->id) {
case AFE_PORT_ID_SECONDARY_TDM_RX:
slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_0];
@@ -2660,7 +2713,7 @@ static int apq8096_get_ll_qos_val(struct snd_pcm_runtime *runtime)
return usecs;
}
-static int apq8096_mm5_prepare(struct snd_pcm_substream *substream)
+static int apq8096_ll_prepare(struct snd_pcm_substream *substream)
{
if (pm_qos_request_active(&substream->latency_pm_qos_req))
pm_qos_remove_request(&substream->latency_pm_qos_req);
@@ -2670,8 +2723,8 @@ static int apq8096_mm5_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_soc_ops apq8096_mm5_ops = {
- .prepare = apq8096_mm5_prepare,
+static struct snd_soc_ops apq8096_ll_ops = {
+ .prepare = apq8096_ll_prepare,
};
/* Digital audio interface glue - connects codec <---> CPU */
@@ -2938,7 +2991,7 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = {
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
- .ops = &apq8096_mm5_ops,
+ .ops = &apq8096_ll_ops,
},
{
.name = "Listen 1 Audio Service",
@@ -3647,6 +3700,143 @@ static struct snd_soc_dai_link apq8096_auto_fe_dai_links[] = {
},
};
+static struct snd_soc_dai_link apq8096_custom_fe_dai_links[] = {
+ /* FrontEnd DAI Links */
+ {
+ .name = "MSM8996 Media1",
+ .stream_name = "MultiMedia1",
+ .cpu_dai_name = "MultiMedia1",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media2",
+ .stream_name = "MultiMedia2",
+ .cpu_dai_name = "MultiMedia2",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media3",
+ .stream_name = "MultiMedia3",
+ .cpu_dai_name = "MultiMedia3",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media5",
+ .stream_name = "MultiMedia5",
+ .cpu_dai_name = "MultiMedia5",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media6",
+ .stream_name = "MultiMedia6",
+ .cpu_dai_name = "MultiMedia6",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media8",
+ .stream_name = "MultiMedia8",
+ .cpu_dai_name = "MultiMedia8",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+ .ops = &apq8096_ll_ops,
+ },
+ {
+ .name = "MSM8996 Media9",
+ .stream_name = "MultiMedia9",
+ .cpu_dai_name = "MultiMedia9",
+ .platform_name = "msm-pcm-dsp.1",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
+ .ops = &apq8096_ll_ops,
+ },
+};
+
static struct snd_soc_dai_link apq8096_common_be_dai_links[] = {
/* Backend AFE DAI Links */
{
@@ -4115,6 +4305,13 @@ static struct snd_soc_dai_link apq8096_auto_dai_links[
ARRAY_SIZE(apq8096_auto_be_dai_links) +
ARRAY_SIZE(apq8096_hdmi_dai_link)];
+static struct snd_soc_dai_link apq8096_auto_custom_dai_links[
+ ARRAY_SIZE(apq8096_custom_fe_dai_links) +
+ ARRAY_SIZE(apq8096_auto_fe_dai_links) +
+ ARRAY_SIZE(apq8096_common_be_dai_links) +
+ ARRAY_SIZE(apq8096_auto_be_dai_links) +
+ ARRAY_SIZE(apq8096_hdmi_dai_link)];
+
struct snd_soc_card snd_soc_card_auto_apq8096 = {
.name = "apq8096-auto-snd-card",
};
@@ -4127,6 +4324,10 @@ struct snd_soc_card snd_soc_card_adp_mmxf_apq8096 = {
.name = "apq8096-adp-mmxf-snd-card",
};
+struct snd_soc_card snd_soc_card_auto_custom_apq8096 = {
+ .name = "apq8096-auto-custom-snd-card",
+};
+
static int apq8096_populate_dai_link_component_of_node(
struct snd_soc_card *card)
{
@@ -4220,6 +4421,8 @@ static const struct of_device_id apq8096_asoc_machine_of_match[] = {
.data = "adp_agave_codec"},
{ .compatible = "qcom,apq8096-asoc-snd-adp-mmxf",
.data = "adp_mmxf_codec"},
+ { .compatible = "qcom,apq8096-asoc-snd-auto-custom",
+ .data = "auto_custom_codec"},
{},
};
@@ -4243,31 +4446,55 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
card = &snd_soc_card_adp_agave_apq8096;
else if (!strcmp(match->data, "adp_mmxf_codec"))
card = &snd_soc_card_adp_mmxf_apq8096;
- else {
+ else if (!strcmp(match->data, "auto_custom_codec")) {
+ card = &snd_soc_card_auto_custom_apq8096;
+ } else {
dev_err(dev, "%s: Codec not supported\n",
__func__);
return NULL;
}
- /* same FE and BE used for all codec */
- len_1 = ARRAY_SIZE(apq8096_common_dai_links);
- len_2 = len_1 + ARRAY_SIZE(apq8096_auto_fe_dai_links);
- len_3 = len_2 + ARRAY_SIZE(apq8096_common_be_dai_links);
-
- memcpy(apq8096_auto_dai_links,
- apq8096_common_dai_links,
- sizeof(apq8096_common_dai_links));
- memcpy(apq8096_auto_dai_links + len_1,
- apq8096_auto_fe_dai_links,
- sizeof(apq8096_auto_fe_dai_links));
- memcpy(apq8096_auto_dai_links + len_2,
- apq8096_common_be_dai_links,
- sizeof(apq8096_common_be_dai_links));
- memcpy(apq8096_auto_dai_links + len_3,
- apq8096_auto_be_dai_links,
- sizeof(apq8096_auto_be_dai_links));
-
- dailink = apq8096_auto_dai_links;
+ if (!strcmp(match->data, "auto_custom_codec")) {
+ len_1 = ARRAY_SIZE(apq8096_custom_fe_dai_links);
+ len_2 = len_1 + ARRAY_SIZE(apq8096_auto_fe_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(apq8096_common_be_dai_links);
+
+ memcpy(apq8096_auto_custom_dai_links,
+ apq8096_custom_fe_dai_links,
+ sizeof(apq8096_custom_fe_dai_links));
+ memcpy(apq8096_auto_custom_dai_links + len_1,
+ apq8096_auto_fe_dai_links,
+ sizeof(apq8096_auto_fe_dai_links));
+ memcpy(apq8096_auto_custom_dai_links + len_2,
+ apq8096_common_be_dai_links,
+ sizeof(apq8096_common_be_dai_links));
+ memcpy(apq8096_auto_custom_dai_links + len_3,
+ apq8096_auto_be_dai_links,
+ sizeof(apq8096_auto_be_dai_links));
+
+ dailink = apq8096_auto_custom_dai_links;
+ } else {
+ /* same FE and BE used for all non-custom codec */
+ len_1 = ARRAY_SIZE(apq8096_common_dai_links);
+ len_2 = len_1 + ARRAY_SIZE(apq8096_auto_fe_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(apq8096_common_be_dai_links);
+
+ memcpy(apq8096_auto_dai_links,
+ apq8096_common_dai_links,
+ sizeof(apq8096_common_dai_links));
+ memcpy(apq8096_auto_dai_links + len_1,
+ apq8096_auto_fe_dai_links,
+ sizeof(apq8096_auto_fe_dai_links));
+ memcpy(apq8096_auto_dai_links + len_2,
+ apq8096_common_be_dai_links,
+ sizeof(apq8096_common_be_dai_links));
+ memcpy(apq8096_auto_dai_links + len_3,
+ apq8096_auto_be_dai_links,
+ sizeof(apq8096_auto_be_dai_links));
+
+ dailink = apq8096_auto_dai_links;
+ }
+
len_4 = len_3 + ARRAY_SIZE(apq8096_auto_be_dai_links);
if (of_property_read_bool(dev->of_node, "qcom,hdmi-audio-rx")) {
@@ -4308,10 +4535,20 @@ static int apq8096_init_tdm_dev(struct device *dev)
memcpy(tdm_slot_offset,
tdm_slot_offset_adp_mmxf,
sizeof(tdm_slot_offset_adp_mmxf));
+ } else if (!strcmp(match->data, "auto_custom_codec")) {
+ dev_dbg(dev, "%s: custom tdm slot offset\n", __func__);
+ msm_tdm_slot_width = 16;
+ msm_tdm_num_slots = 16;
+ memcpy(tdm_slot_offset,
+ tdm_slot_offset_custom,
+ sizeof(tdm_slot_offset_custom));
} else {
dev_dbg(dev, "%s: DEFAULT tdm slot offset\n", __func__);
}
+ dev_dbg(dev, "%s: tdm slot_width %d, num_slots %d\n",
+ __func__, msm_tdm_slot_width, msm_tdm_num_slots);
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index ec4380036047..109e1a202ff2 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1165,28 +1165,27 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
break;
case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
- u32 *mode = NULL;
+ u32 mode;
- if (!arg) {
- dev_err(rtd->dev,
- "%s: Invalid param arg for ioctl %s session %d\n",
- __func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG",
- prtd->lsm_client->session);
- rc = -EINVAL;
- break;
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
+ __func__, "LSM_SET_FWK_MODE_CONFIG");
+ return -EFAULT;
}
- mode = (u32 *)arg;
- if (prtd->lsm_client->event_mode == *mode) {
+
+ dev_dbg(rtd->dev, "%s: ioctl %s, enable = %d\n",
+ __func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG", mode);
+ if (prtd->lsm_client->event_mode == mode) {
dev_dbg(rtd->dev,
"%s: mode for %d already set to %d\n",
- __func__, prtd->lsm_client->session, *mode);
+ __func__, prtd->lsm_client->session, mode);
rc = 0;
} else {
dev_dbg(rtd->dev, "%s: Event mode = %d\n",
- __func__, *mode);
- rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, *mode);
+ __func__, mode);
+ rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, mode);
if (!rc)
- prtd->lsm_client->event_mode = *mode;
+ prtd->lsm_client->event_mode = mode;
else
dev_err(rtd->dev,
"%s: set event mode failed %d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 837a08488991..7ce73484a681 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -5362,6 +5362,57 @@ static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_0 ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -11158,6 +11209,9 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
tert_tdm_rx_3_mixer_controls,
ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
quat_tdm_rx_0_mixer_controls,
ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
@@ -12182,6 +12236,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
+
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -12937,6 +13009,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICE2_UL", NULL, "VOC_EXT_EC MUX"},
+ {"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICEMMODE1_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICEMMODE2_UL", NULL, "VOC_EXT_EC MUX"},
@@ -13890,6 +13963,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"BE_OUT", NULL, "TERT_TDM_RX_1"},
{"BE_OUT", NULL, "TERT_TDM_RX_2"},
{"BE_OUT", NULL, "TERT_TDM_RX_3"},
+ {"BE_OUT", NULL, "TERT_TDM_RX_4"},
{"BE_OUT", NULL, "QUAT_TDM_RX_0"},
{"BE_OUT", NULL, "QUAT_TDM_RX_1"},
{"BE_OUT", NULL, "QUAT_TDM_RX_2"},
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 1ca99c3f9115..731f439f5286 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -4300,6 +4300,20 @@ static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels,
PCM_CHANNEL_LB : PCM_CHANNEL_LS;
lchannel_mapping[5] = use_back_flavor ?
PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+ } else if (channels == 7) {
+ /*
+ * Configured for 5.1 channel mapping + 1 channel for debug
+ * Can be customized based on DSP.
+ */
+ lchannel_mapping[0] = PCM_CHANNEL_FL;
+ lchannel_mapping[1] = PCM_CHANNEL_FR;
+ lchannel_mapping[2] = PCM_CHANNEL_FC;
+ lchannel_mapping[3] = PCM_CHANNEL_LFE;
+ lchannel_mapping[4] = use_back_flavor ?
+ PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+ lchannel_mapping[5] = use_back_flavor ?
+ PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+ lchannel_mapping[6] = PCM_CHANNEL_CS;
} else if (channels == 8) {
lchannel_mapping[0] = PCM_CHANNEL_FL;
lchannel_mapping[1] = PCM_CHANNEL_FR;